From da0d4f9034e2b567ad0c88a894579ff0bb2321df Mon Sep 17 00:00:00 2001 From: morvencao Date: Tue, 16 Apr 2024 07:23:23 +0000 Subject: [PATCH 1/6] support cloudevents driver for addon manager. Signed-off-by: morvencao --- .github/workflows/cloudevents-integration.yml | 32 + .github/workflows/go-presubmit.yml | 31 + Makefile | 18 + build/Dockerfile.example | 1 + cmd/example/helloworld_cloudevents/main.go | 145 + .../helloworld-cloudevents/kustomization.yaml | 28 + .../resources/addon_deployment_config.yaml | 6 + .../resources/cluster_role.yaml | 50 + .../resources/cluster_role_binding.yaml | 12 + ...ld_cloudevents_clustermanagementaddon.yaml | 21 + .../helloworld_cloudevents_controller.yaml | 43 + .../resources/managed_clusterset_binding.yaml | 6 + .../resources/placement.yaml | 10 + .../resources/service_account.yaml | 4 + .../resources/work-driver-config.yaml | 11 + examples/deploy/mqtt/mqtt-broker.yaml | 68 + examples/deploy/ocm-cloudevents/install.sh | 87 + examples/helloworld_cloudevents/helloworld.go | 95 + .../helloworld_cloudevents/helloworld_test.go | 192 + .../templates/clusterrolebinding.yaml | 12 + .../manifests/templates/deployment.yaml | 68 + .../manifests/templates/serviceaccount.yaml | 5 + go.mod | 11 +- go.sum | 23 +- pkg/addonmanager/addontesting/helpers.go | 5 + pkg/addonmanager/cloudevents/manager.go | 337 ++ pkg/addonmanager/cloudevents/options.go | 35 + .../controllers/agentdeploy/utils.go | 13 + pkg/addonmanager/manager.go | 12 +- test/e2ecloudevents/e2e_suite_test.go | 155 + .../helloworld_cloudevents_test.go | 536 +++ test/integration-test.mk | 14 +- .../cloudevents/agent_deploy_test.go | 559 +++ .../cloudevents/agent_hook_deploy_test.go | 333 ++ .../cloudevents/agent_hosting_deploy_test.go | 421 ++ .../agent_hosting_hook_deploy_test.go | 454 ++ test/integration/cloudevents/suite_test.go | 278 ++ .../{ => kube}/agent_deploy_test.go | 2 +- .../{ => kube}/agent_hook_deploy_test.go | 2 +- .../{ => kube}/agent_hosting_deploy_test.go | 2 +- .../agent_hosting_hook_deploy_test.go | 2 +- test/integration/{ => kube}/assertion_test.go | 2 +- .../cluster_management_addon_test.go | 2 +- test/integration/{ => kube}/csr_test.go | 2 +- ...leted_managed_cluster_create_addon_test.go | 2 +- .../integration/{ => kube}/multiworks_test.go | 2 +- .../{ => kube}/registration_test.go | 2 +- test/integration/{ => kube}/suite_test.go | 2 +- test/integration/util/recorder.go | 84 + .../github.com/bwmarrin/snowflake/.travis.yml | 12 + vendor/github.com/bwmarrin/snowflake/LICENSE | 23 + .../github.com/bwmarrin/snowflake/README.md | 143 + .../bwmarrin/snowflake/snowflake.go | 365 ++ .../sdk-go/protocol/mqtt_paho/v2/LICENSE | 201 + .../sdk-go/protocol/mqtt_paho/v2/message.go | 119 + .../sdk-go/protocol/mqtt_paho/v2/option.go | 48 + .../sdk-go/protocol/mqtt_paho/v2/protocol.go | 155 + .../protocol/mqtt_paho/v2/write_message.go | 133 + .../github.com/cloudevents/sdk-go/v2/LICENSE | 201 + .../github.com/cloudevents/sdk-go/v2/alias.go | 187 + .../sdk-go/v2/binding/binary_writer.go | 52 + .../cloudevents/sdk-go/v2/binding/doc.go | 66 + .../cloudevents/sdk-go/v2/binding/encoding.go | 50 + .../sdk-go/v2/binding/event_message.go | 110 + .../sdk-go/v2/binding/finish_message.go | 42 + .../sdk-go/v2/binding/format/doc.go | 12 + .../sdk-go/v2/binding/format/format.go | 105 + .../cloudevents/sdk-go/v2/binding/message.go | 153 + .../sdk-go/v2/binding/spec/attributes.go | 141 + .../cloudevents/sdk-go/v2/binding/spec/doc.go | 12 + .../v2/binding/spec/match_exact_version.go | 81 + .../sdk-go/v2/binding/spec/spec.go | 189 + .../sdk-go/v2/binding/structured_writer.go | 22 + .../cloudevents/sdk-go/v2/binding/to_event.go | 153 + .../sdk-go/v2/binding/transformer.go | 42 + .../cloudevents/sdk-go/v2/binding/write.go | 179 + .../cloudevents/sdk-go/v2/client/client.go | 295 ++ .../sdk-go/v2/client/client_http.go | 35 + .../sdk-go/v2/client/client_observed.go | 12 + .../sdk-go/v2/client/defaulters.go | 57 + .../cloudevents/sdk-go/v2/client/doc.go | 11 + .../sdk-go/v2/client/http_receiver.go | 45 + .../cloudevents/sdk-go/v2/client/invoker.go | 145 + .../sdk-go/v2/client/observability.go | 54 + .../cloudevents/sdk-go/v2/client/options.go | 141 + .../cloudevents/sdk-go/v2/client/receiver.go | 193 + .../cloudevents/sdk-go/v2/context/context.go | 110 + .../sdk-go/v2/context/delegating.go | 25 + .../cloudevents/sdk-go/v2/context/doc.go | 10 + .../cloudevents/sdk-go/v2/context/logger.go | 48 + .../cloudevents/sdk-go/v2/context/retry.go | 76 + .../sdk-go/v2/event/content_type.go | 47 + .../sdk-go/v2/event/data_content_encoding.go | 16 + .../sdk-go/v2/event/datacodec/codec.go | 78 + .../sdk-go/v2/event/datacodec/doc.go | 10 + .../sdk-go/v2/event/datacodec/json/data.go | 56 + .../sdk-go/v2/event/datacodec/json/doc.go | 9 + .../sdk-go/v2/event/datacodec/text/data.go | 30 + .../sdk-go/v2/event/datacodec/text/doc.go | 9 + .../sdk-go/v2/event/datacodec/xml/data.go | 40 + .../sdk-go/v2/event/datacodec/xml/doc.go | 9 + .../cloudevents/sdk-go/v2/event/doc.go | 9 + .../cloudevents/sdk-go/v2/event/event.go | 125 + .../cloudevents/sdk-go/v2/event/event_data.go | 118 + .../sdk-go/v2/event/event_interface.go | 102 + .../sdk-go/v2/event/event_marshal.go | 203 + .../sdk-go/v2/event/event_reader.go | 103 + .../sdk-go/v2/event/event_unmarshal.go | 480 ++ .../sdk-go/v2/event/event_validation.go | 50 + .../sdk-go/v2/event/event_writer.go | 117 + .../sdk-go/v2/event/eventcontext.go | 125 + .../sdk-go/v2/event/eventcontext_v03.go | 330 ++ .../v2/event/eventcontext_v03_reader.go | 99 + .../v2/event/eventcontext_v03_writer.go | 103 + .../sdk-go/v2/event/eventcontext_v1.go | 315 ++ .../sdk-go/v2/event/eventcontext_v1_reader.go | 104 + .../sdk-go/v2/event/eventcontext_v1_writer.go | 97 + .../cloudevents/sdk-go/v2/event/extensions.go | 57 + .../cloudevents/sdk-go/v2/protocol/doc.go | 25 + .../cloudevents/sdk-go/v2/protocol/error.go | 42 + .../v2/protocol/http/abuse_protection.go | 128 + .../sdk-go/v2/protocol/http/context.go | 48 + .../sdk-go/v2/protocol/http/doc.go | 9 + .../sdk-go/v2/protocol/http/headers.go | 55 + .../sdk-go/v2/protocol/http/message.go | 175 + .../sdk-go/v2/protocol/http/options.go | 300 ++ .../sdk-go/v2/protocol/http/protocol.go | 411 ++ .../v2/protocol/http/protocol_lifecycle.go | 143 + .../sdk-go/v2/protocol/http/protocol_rate.go | 34 + .../sdk-go/v2/protocol/http/protocol_retry.go | 126 + .../sdk-go/v2/protocol/http/result.go | 60 + .../sdk-go/v2/protocol/http/retries_result.go | 59 + .../sdk-go/v2/protocol/http/utility.go | 89 + .../sdk-go/v2/protocol/http/write_request.go | 142 + .../v2/protocol/http/write_responsewriter.go | 126 + .../cloudevents/sdk-go/v2/protocol/inbound.go | 54 + .../sdk-go/v2/protocol/lifecycle.go | 23 + .../sdk-go/v2/protocol/outbound.go | 49 + .../cloudevents/sdk-go/v2/protocol/result.go | 127 + .../cloudevents/sdk-go/v2/staticcheck.conf | 3 + .../cloudevents/sdk-go/v2/types/allocate.go | 41 + .../cloudevents/sdk-go/v2/types/doc.go | 45 + .../cloudevents/sdk-go/v2/types/timestamp.go | 75 + .../cloudevents/sdk-go/v2/types/uri.go | 86 + .../cloudevents/sdk-go/v2/types/uriref.go | 82 + .../cloudevents/sdk-go/v2/types/value.go | 337 ++ vendor/github.com/eclipse/paho.golang/LICENSE | 277 ++ .../eclipse/paho.golang/packets/auth.go | 77 + .../eclipse/paho.golang/packets/connack.go | 145 + .../eclipse/paho.golang/packets/connect.go | 189 + .../eclipse/paho.golang/packets/disconnect.go | 152 + .../eclipse/paho.golang/packets/packets.go | 447 ++ .../eclipse/paho.golang/packets/pingreq.go | 34 + .../eclipse/paho.golang/packets/pingresp.go | 34 + .../eclipse/paho.golang/packets/properties.go | 804 ++++ .../eclipse/paho.golang/packets/puback.go | 115 + .../eclipse/paho.golang/packets/pubcomp.go | 95 + .../eclipse/paho.golang/packets/publish.go | 80 + .../eclipse/paho.golang/packets/pubrec.go | 117 + .../eclipse/paho.golang/packets/pubrel.go | 77 + .../eclipse/paho.golang/packets/suback.go | 103 + .../eclipse/paho.golang/packets/subscribe.go | 116 + .../eclipse/paho.golang/packets/unsuback.go | 88 + .../paho.golang/packets/unsubscribe.go | 67 + .../eclipse/paho.golang/paho/acks_tracker.go | 79 + .../eclipse/paho.golang/paho/auth.go | 8 + .../eclipse/paho.golang/paho/client.go | 923 ++++ .../eclipse/paho.golang/paho/cp_auth.go | 92 + .../eclipse/paho.golang/paho/cp_connack.go | 84 + .../eclipse/paho.golang/paho/cp_connect.go | 180 + .../eclipse/paho.golang/paho/cp_disconnect.go | 58 + .../eclipse/paho.golang/paho/cp_publish.go | 123 + .../eclipse/paho.golang/paho/cp_pubresp.go | 55 + .../eclipse/paho.golang/paho/cp_suback.go | 41 + .../eclipse/paho.golang/paho/cp_subscribe.go | 67 + .../eclipse/paho.golang/paho/cp_unsuback.go | 41 + .../paho.golang/paho/cp_unsubscribe.go | 31 + .../eclipse/paho.golang/paho/cp_utils.go | 100 + .../eclipse/paho.golang/paho/message_ids.go | 93 + .../paho.golang/paho/noop_persistence.go | 23 + .../eclipse/paho.golang/paho/persistence.go | 98 + .../eclipse/paho.golang/paho/pinger.go | 122 + .../eclipse/paho.golang/paho/router.go | 212 + .../eclipse/paho.golang/paho/trace.go | 22 + .../golang/protobuf/ptypes/empty/empty.pb.go | 62 + .../gorilla/websocket/.editorconfig | 20 + .../github.com/gorilla/websocket/.gitignore | 1 + .../gorilla/websocket/.golangci.yml | 3 + vendor/github.com/gorilla/websocket/LICENSE | 27 + vendor/github.com/gorilla/websocket/Makefile | 34 + vendor/github.com/gorilla/websocket/README.md | 36 + vendor/github.com/gorilla/websocket/client.go | 444 ++ .../gorilla/websocket/compression.go | 153 + vendor/github.com/gorilla/websocket/conn.go | 1267 ++++++ vendor/github.com/gorilla/websocket/doc.go | 227 + vendor/github.com/gorilla/websocket/join.go | 42 + vendor/github.com/gorilla/websocket/json.go | 60 + vendor/github.com/gorilla/websocket/mask.go | 59 + .../github.com/gorilla/websocket/mask_safe.go | 16 + .../github.com/gorilla/websocket/prepared.go | 102 + vendor/github.com/gorilla/websocket/proxy.go | 86 + vendor/github.com/gorilla/websocket/server.go | 389 ++ .../gorilla/websocket/tls_handshake.go | 18 + vendor/github.com/gorilla/websocket/util.go | 298 ++ .../mochi-mqtt/server/v2/.gitignore | 5 + .../mochi-mqtt/server/v2/.golangci.yml | 103 + .../mochi-mqtt/server/v2/Dockerfile | 31 + .../mochi-mqtt/server/v2/LICENSE.md | 23 + .../mochi-mqtt/server/v2/README-CN.md | 505 +++ .../mochi-mqtt/server/v2/README-JP.md | 494 ++ .../github.com/mochi-mqtt/server/v2/README.md | 484 ++ .../mochi-mqtt/server/v2/clients.go | 644 +++ .../github.com/mochi-mqtt/server/v2/hooks.go | 854 ++++ .../server/v2/hooks/auth/allow_all.go | 41 + .../mochi-mqtt/server/v2/hooks/auth/auth.go | 103 + .../mochi-mqtt/server/v2/hooks/auth/ledger.go | 246 + .../server/v2/hooks/storage/storage.go | 194 + .../mochi-mqtt/server/v2/inflight.go | 156 + .../server/v2/listeners/http_healthcheck.go | 100 + .../server/v2/listeners/http_sysinfo.go | 123 + .../server/v2/listeners/listeners.go | 133 + .../mochi-mqtt/server/v2/listeners/mock.go | 103 + .../mochi-mqtt/server/v2/listeners/net.go | 92 + .../mochi-mqtt/server/v2/listeners/tcp.go | 111 + .../server/v2/listeners/unixsock.go | 98 + .../server/v2/listeners/websocket.go | 201 + .../mochi-mqtt/server/v2/mempool/bufpool.go | 81 + .../mochi-mqtt/server/v2/packets/codec.go | 172 + .../mochi-mqtt/server/v2/packets/codes.go | 149 + .../server/v2/packets/fixedheader.go | 63 + .../mochi-mqtt/server/v2/packets/packets.go | 1172 +++++ .../server/v2/packets/properties.go | 481 ++ .../mochi-mqtt/server/v2/packets/tpackets.go | 4005 +++++++++++++++++ .../github.com/mochi-mqtt/server/v2/server.go | 1679 +++++++ .../mochi-mqtt/server/v2/system/system.go | 61 + .../github.com/mochi-mqtt/server/v2/topics.go | 822 ++++ .../library-go/pkg/operator/events/OWNERS | 8 + .../pkg/operator/events/recorder.go | 238 + .../pkg/operator/events/recorder_in_memory.go | 86 + .../pkg/operator/events/recorder_logging.go | 58 + .../pkg/operator/events/recorder_upstream.go | 173 + vendor/github.com/rs/xid/.appveyor.yml | 27 + vendor/github.com/rs/xid/.travis.yml | 8 + vendor/github.com/rs/xid/LICENSE | 19 + vendor/github.com/rs/xid/README.md | 116 + vendor/github.com/rs/xid/error.go | 11 + vendor/github.com/rs/xid/hostid_darwin.go | 9 + vendor/github.com/rs/xid/hostid_fallback.go | 9 + vendor/github.com/rs/xid/hostid_freebsd.go | 9 + vendor/github.com/rs/xid/hostid_linux.go | 13 + vendor/github.com/rs/xid/hostid_windows.go | 38 + vendor/github.com/rs/xid/id.go | 392 ++ .../golang.org/x/net/internal/socks/client.go | 168 + .../golang.org/x/net/internal/socks/socks.go | 317 ++ vendor/golang.org/x/net/proxy/dial.go | 54 + vendor/golang.org/x/net/proxy/direct.go | 31 + vendor/golang.org/x/net/proxy/per_host.go | 155 + vendor/golang.org/x/net/proxy/proxy.go | 149 + vendor/golang.org/x/net/proxy/socks5.go | 42 + .../golang.org/x/sync/semaphore/semaphore.go | 136 + vendor/modules.txt | 71 +- .../api/utils/work/v1/utils/utils.go | 76 + .../utils/work/v1/workvalidator/validator.go | 64 + .../pkg/apis/work/v1/validator/validator.go | 64 + .../pkg/cloudevents/generic/agentclient.go | 326 ++ .../pkg/cloudevents/generic/baseclient.go | 243 + .../pkg/cloudevents/generic/interface.go | 74 + .../generic/options/grpc/agentoptions.go | 57 + .../generic/options/grpc/options.go | 149 + .../options/grpc/protobuf/v1/README.md | 30 + .../options/grpc/protobuf/v1/cloudevent.pb.go | 649 +++ .../options/grpc/protobuf/v1/cloudevent.proto | 81 + .../grpc/protobuf/v1/cloudevent_grpc.pb.go | 179 + .../generic/options/grpc/protobuf/v1/gen.go | 3 + .../generic/options/grpc/protocol/message.go | 205 + .../generic/options/grpc/protocol/option.go | 24 + .../generic/options/grpc/protocol/protocol.go | 149 + .../options/grpc/protocol/write_message.go | 215 + .../generic/options/grpc/sourceoptions.go | 52 + .../generic/options/mqtt/agentoptions.go | 113 + .../generic/options/mqtt/options.go | 321 ++ .../generic/options/mqtt/sourceoptions.go | 113 + .../cloudevents/generic/options/options.go | 75 + .../cloudevents/generic/payload/payload.go | 48 + .../pkg/cloudevents/generic/ratelimiter.go | 34 + .../pkg/cloudevents/generic/sourceclient.go | 314 ++ .../pkg/cloudevents/generic/types/types.go | 283 ++ .../work/agent/client/manifestwork.go | 160 + .../cloudevents/work/agent/codec/manifest.go | 193 + .../work/agent/codec/manifestbundle.go | 141 + .../work/agent/handler/resourcehandler.go | 59 + .../pkg/cloudevents/work/clientbuilder.go | 270 ++ .../pkg/cloudevents/work/common/common.go | 29 + .../pkg/cloudevents/work/configloader.go | 81 + .../cloudevents/work/internal/clientset.go | 54 + .../sdk-go/pkg/cloudevents/work/lister.go | 31 + .../pkg/cloudevents/work/payload/mainfiest.go | 54 + .../work/payload/manifestbundle.go | 43 + .../work/source/client/manifestwork.go | 216 + .../work/source/codec/manifestbundle.go | 101 + .../work/source/handler/resourcehandler.go | 167 + .../sdk-go/pkg/cloudevents/work/statushash.go | 18 + .../pkg/cloudevents/work/utils/utils.go | 55 + .../pkg/cloudevents/work/watcher/watcher.go | 64 + 304 files changed, 44556 insertions(+), 25 deletions(-) create mode 100644 .github/workflows/cloudevents-integration.yml create mode 100644 cmd/example/helloworld_cloudevents/main.go create mode 100644 examples/deploy/addon/helloworld-cloudevents/kustomization.yaml create mode 100644 examples/deploy/addon/helloworld-cloudevents/resources/addon_deployment_config.yaml create mode 100644 examples/deploy/addon/helloworld-cloudevents/resources/cluster_role.yaml create mode 100644 examples/deploy/addon/helloworld-cloudevents/resources/cluster_role_binding.yaml create mode 100644 examples/deploy/addon/helloworld-cloudevents/resources/helloworld_cloudevents_clustermanagementaddon.yaml create mode 100644 examples/deploy/addon/helloworld-cloudevents/resources/helloworld_cloudevents_controller.yaml create mode 100644 examples/deploy/addon/helloworld-cloudevents/resources/managed_clusterset_binding.yaml create mode 100644 examples/deploy/addon/helloworld-cloudevents/resources/placement.yaml create mode 100644 examples/deploy/addon/helloworld-cloudevents/resources/service_account.yaml create mode 100644 examples/deploy/addon/helloworld-cloudevents/resources/work-driver-config.yaml create mode 100644 examples/deploy/mqtt/mqtt-broker.yaml create mode 100755 examples/deploy/ocm-cloudevents/install.sh create mode 100644 examples/helloworld_cloudevents/helloworld.go create mode 100644 examples/helloworld_cloudevents/helloworld_test.go create mode 100644 examples/helloworld_cloudevents/manifests/templates/clusterrolebinding.yaml create mode 100644 examples/helloworld_cloudevents/manifests/templates/deployment.yaml create mode 100644 examples/helloworld_cloudevents/manifests/templates/serviceaccount.yaml create mode 100644 pkg/addonmanager/cloudevents/manager.go create mode 100644 pkg/addonmanager/cloudevents/options.go create mode 100644 test/e2ecloudevents/e2e_suite_test.go create mode 100644 test/e2ecloudevents/helloworld_cloudevents_test.go create mode 100644 test/integration/cloudevents/agent_deploy_test.go create mode 100644 test/integration/cloudevents/agent_hook_deploy_test.go create mode 100644 test/integration/cloudevents/agent_hosting_deploy_test.go create mode 100644 test/integration/cloudevents/agent_hosting_hook_deploy_test.go create mode 100644 test/integration/cloudevents/suite_test.go rename test/integration/{ => kube}/agent_deploy_test.go (99%) rename test/integration/{ => kube}/agent_hook_deploy_test.go (99%) rename test/integration/{ => kube}/agent_hosting_deploy_test.go (99%) rename test/integration/{ => kube}/agent_hosting_hook_deploy_test.go (99%) rename test/integration/{ => kube}/assertion_test.go (99%) rename test/integration/{ => kube}/cluster_management_addon_test.go (99%) rename test/integration/{ => kube}/csr_test.go (99%) rename test/integration/{ => kube}/deleted_managed_cluster_create_addon_test.go (99%) rename test/integration/{ => kube}/multiworks_test.go (99%) rename test/integration/{ => kube}/registration_test.go (99%) rename test/integration/{ => kube}/suite_test.go (99%) create mode 100644 test/integration/util/recorder.go create mode 100644 vendor/github.com/bwmarrin/snowflake/.travis.yml create mode 100644 vendor/github.com/bwmarrin/snowflake/LICENSE create mode 100644 vendor/github.com/bwmarrin/snowflake/README.md create mode 100644 vendor/github.com/bwmarrin/snowflake/snowflake.go create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/LICENSE create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/option.go create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/protocol.go create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/write_message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/LICENSE create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/alias.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/write.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/client.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/observability.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/options.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/context/context.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/context/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/context/logger.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/context/retry.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/staticcheck.conf create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/uri.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/value.go create mode 100644 vendor/github.com/eclipse/paho.golang/LICENSE create mode 100644 vendor/github.com/eclipse/paho.golang/packets/auth.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/connack.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/connect.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/disconnect.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/packets.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/pingreq.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/pingresp.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/properties.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/puback.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/pubcomp.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/publish.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/pubrec.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/pubrel.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/suback.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/subscribe.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/unsuback.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/unsubscribe.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/acks_tracker.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/auth.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/client.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_auth.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_connack.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_connect.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_disconnect.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_publish.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_pubresp.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_suback.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_subscribe.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_unsuback.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_unsubscribe.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_utils.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/message_ids.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/noop_persistence.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/persistence.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/pinger.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/router.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/trace.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go create mode 100644 vendor/github.com/gorilla/websocket/.editorconfig create mode 100644 vendor/github.com/gorilla/websocket/.gitignore create mode 100644 vendor/github.com/gorilla/websocket/.golangci.yml create mode 100644 vendor/github.com/gorilla/websocket/LICENSE create mode 100644 vendor/github.com/gorilla/websocket/Makefile create mode 100644 vendor/github.com/gorilla/websocket/README.md create mode 100644 vendor/github.com/gorilla/websocket/client.go create mode 100644 vendor/github.com/gorilla/websocket/compression.go create mode 100644 vendor/github.com/gorilla/websocket/conn.go create mode 100644 vendor/github.com/gorilla/websocket/doc.go create mode 100644 vendor/github.com/gorilla/websocket/join.go create mode 100644 vendor/github.com/gorilla/websocket/json.go create mode 100644 vendor/github.com/gorilla/websocket/mask.go create mode 100644 vendor/github.com/gorilla/websocket/mask_safe.go create mode 100644 vendor/github.com/gorilla/websocket/prepared.go create mode 100644 vendor/github.com/gorilla/websocket/proxy.go create mode 100644 vendor/github.com/gorilla/websocket/server.go create mode 100644 vendor/github.com/gorilla/websocket/tls_handshake.go create mode 100644 vendor/github.com/gorilla/websocket/util.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/.gitignore create mode 100644 vendor/github.com/mochi-mqtt/server/v2/.golangci.yml create mode 100644 vendor/github.com/mochi-mqtt/server/v2/Dockerfile create mode 100644 vendor/github.com/mochi-mqtt/server/v2/LICENSE.md create mode 100644 vendor/github.com/mochi-mqtt/server/v2/README-CN.md create mode 100644 vendor/github.com/mochi-mqtt/server/v2/README-JP.md create mode 100644 vendor/github.com/mochi-mqtt/server/v2/README.md create mode 100644 vendor/github.com/mochi-mqtt/server/v2/clients.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/hooks.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/hooks/auth/allow_all.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/hooks/auth/auth.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/hooks/auth/ledger.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/hooks/storage/storage.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/inflight.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/http_healthcheck.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/http_sysinfo.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/listeners.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/mock.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/net.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/tcp.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/unixsock.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/websocket.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/mempool/bufpool.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/packets/codec.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/packets/codes.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/packets/fixedheader.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/packets/packets.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/packets/properties.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/packets/tpackets.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/server.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/system/system.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/topics.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go create mode 100644 vendor/github.com/rs/xid/.appveyor.yml create mode 100644 vendor/github.com/rs/xid/.travis.yml create mode 100644 vendor/github.com/rs/xid/LICENSE create mode 100644 vendor/github.com/rs/xid/README.md create mode 100644 vendor/github.com/rs/xid/error.go create mode 100644 vendor/github.com/rs/xid/hostid_darwin.go create mode 100644 vendor/github.com/rs/xid/hostid_fallback.go create mode 100644 vendor/github.com/rs/xid/hostid_freebsd.go create mode 100644 vendor/github.com/rs/xid/hostid_linux.go create mode 100644 vendor/github.com/rs/xid/hostid_windows.go create mode 100644 vendor/github.com/rs/xid/id.go create mode 100644 vendor/golang.org/x/net/internal/socks/client.go create mode 100644 vendor/golang.org/x/net/internal/socks/socks.go create mode 100644 vendor/golang.org/x/net/proxy/dial.go create mode 100644 vendor/golang.org/x/net/proxy/direct.go create mode 100644 vendor/golang.org/x/net/proxy/per_host.go create mode 100644 vendor/golang.org/x/net/proxy/proxy.go create mode 100644 vendor/golang.org/x/net/proxy/socks5.go create mode 100644 vendor/golang.org/x/sync/semaphore/semaphore.go create mode 100644 vendor/open-cluster-management.io/api/utils/work/v1/utils/utils.go create mode 100644 vendor/open-cluster-management.io/api/utils/work/v1/workvalidator/validator.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/apis/work/v1/validator/validator.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/agentclient.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/baseclient.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/interface.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/agentoptions.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/options.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/README.md create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.pb.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.proto create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent_grpc.pb.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/gen.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/message.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/option.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/protocol.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/write_message.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/sourceoptions.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/agentoptions.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/options.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/sourceoptions.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/options.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/payload/payload.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/ratelimiter.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/sourceclient.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types/types.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client/manifestwork.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifest.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifestbundle.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/handler/resourcehandler.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/clientbuilder.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/common/common.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/configloader.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/internal/clientset.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/lister.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload/mainfiest.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload/manifestbundle.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client/manifestwork.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/codec/manifestbundle.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/handler/resourcehandler.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/statushash.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils/utils.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher/watcher.go diff --git a/.github/workflows/cloudevents-integration.yml b/.github/workflows/cloudevents-integration.yml new file mode 100644 index 000000000..a348f7db6 --- /dev/null +++ b/.github/workflows/cloudevents-integration.yml @@ -0,0 +1,32 @@ +name: CloudEventsIntegration + +on: + workflow_dispatch: {} + pull_request: + paths: + - 'pkg/addonmanager/cloudevents/*.go' + - 'test/integration/cloudevents/**' + branches: + - main + - release-* + +env: + GO_VERSION: '1.21' + GO_REQUIRED_MIN_VERSION: '' + +permissions: + contents: read + +jobs: + integration: + name: cloudevents-integration + runs-on: ubuntu-latest + steps: + - name: checkout code + uses: actions/checkout@v4 + - name: install Go + uses: actions/setup-go@v3 + with: + go-version: ${{ env.GO_VERSION }} + - name: integration + run: make test-cloudevents-integration diff --git a/.github/workflows/go-presubmit.yml b/.github/workflows/go-presubmit.yml index 823c2be26..8f24bf54a 100644 --- a/.github/workflows/go-presubmit.yml +++ b/.github/workflows/go-presubmit.yml @@ -119,6 +119,37 @@ jobs: env: KUBECONFIG: /home/runner/.kube/config + e2e-cloudevents: + name: e2e-cloudevents + runs-on: ubuntu-latest + steps: + - name: checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 1 + - name: install Go + uses: actions/setup-go@v3 + with: + go-version: ${{ env.GO_VERSION }} + - name: install imagebuilder + run: go install github.com/openshift/imagebuilder/cmd/imagebuilder@v1.2.3 + - name: addon-examples-image + run: imagebuilder --allow-pull -t quay.io/open-cluster-management/addon-examples:latest -t quay.io/ocm/addon-examples:latest -f ./build/Dockerfile.example . + - name: setup kind + uses: engineerd/setup-kind@v0.5.0 + with: + version: v0.11.1 + name: cluster1 + - name: Load image on the nodes of the cluster + run: | + kind load docker-image --name=cluster1 quay.io/open-cluster-management/addon-examples:latest + kind load docker-image --name=cluster1 quay.io/ocm/addon-examples:latest + - name: Run e2e test with cloudevents + run: | + make test-e2e-cloudevents + env: + KUBECONFIG: /home/runner/.kube/config + e2e-hosted: name: e2e-hosted runs-on: ubuntu-latest diff --git a/Makefile b/Makefile index 415d75d87..d42f025b8 100644 --- a/Makefile +++ b/Makefile @@ -56,6 +56,9 @@ verify: verify-gocilint deploy-ocm: examples/deploy/ocm/install.sh +deploy-ocm-cloudevents: + examples/deploy/ocm-cloudevents/install.sh + deploy-hosted-ocm: examples/deploy/hosted-ocm/install.sh @@ -71,6 +74,12 @@ deploy-helloworld: ensure-kustomize $(KUSTOMIZE) build examples/deploy/addon/helloworld | $(KUBECTL) apply -f - mv examples/deploy/addon/helloworld/kustomization.yaml.tmp examples/deploy/addon/helloworld/kustomization.yaml +deploy-helloworld-cloudevents: ensure-kustomize + cp examples/deploy/addon/helloworld-cloudevents/kustomization.yaml examples/deploy/addon/helloworld-cloudevents/kustomization.yaml.tmp + cd examples/deploy/addon/helloworld-cloudevents && ../../../../$(KUSTOMIZE) edit set image quay.io/open-cluster-management/addon-examples=$(EXAMPLE_IMAGE_NAME) + $(KUSTOMIZE) build examples/deploy/addon/helloworld-cloudevents | $(KUBECTL) apply -f - + mv examples/deploy/addon/helloworld-cloudevents/kustomization.yaml.tmp examples/deploy/addon/helloworld-cloudevents/kustomization.yaml + deploy-helloworld-helm: ensure-kustomize cp examples/deploy/addon/helloworld-helm/kustomization.yaml examples/deploy/addon/helloworld-helm/kustomization.yaml.tmp cd examples/deploy/addon/helloworld-helm && ../../../../$(KUSTOMIZE) edit set image quay.io/open-cluster-management/addon-examples=$(EXAMPLE_IMAGE_NAME) @@ -111,6 +120,9 @@ undeploy-busybox: ensure-kustomize undeploy-helloworld: ensure-kustomize $(KUSTOMIZE) build examples/deploy/addon/helloworld | $(KUBECTL) delete --ignore-not-found -f - +undeploy-helloworld-cloudevents: ensure-kustomize + $(KUSTOMIZE) build examples/deploy/addon/helloworld-cloudevents | $(KUBECTL) delete --ignore-not-found -f - + undeploy-helloworld-helm: ensure-kustomize $(KUSTOMIZE) build examples/deploy/addon/helloworld-helm | $(KUBECTL) delete --ignore-not-found -f - @@ -129,6 +141,12 @@ build-e2e: test-e2e: build-e2e deploy-ocm deploy-helloworld deploy-helloworld-helm ./e2e.test -test.v -ginkgo.v +build-e2e-cloudevents: + go test -c ./test/e2ecloudevents + +test-e2e-cloudevents: build-e2e-cloudevents deploy-ocm-cloudevents deploy-helloworld-cloudevents + ./e2ecloudevents.test -test.v -ginkgo.v + build-hosted-e2e: go test -c ./test/e2ehosted diff --git a/build/Dockerfile.example b/build/Dockerfile.example index fafd0f4d7..04a821f8c 100644 --- a/build/Dockerfile.example +++ b/build/Dockerfile.example @@ -8,6 +8,7 @@ RUN make build --warn-undefined-variables FROM registry.access.redhat.com/ubi8/ubi-minimal:latest COPY --from=builder /go/src/open-cluster-management.io/addon-framework/busybox / COPY --from=builder /go/src/open-cluster-management.io/addon-framework/helloworld / +COPY --from=builder /go/src/open-cluster-management.io/addon-framework/helloworld_cloudevents / COPY --from=builder /go/src/open-cluster-management.io/addon-framework/helloworld_helm / COPY --from=builder /go/src/open-cluster-management.io/addon-framework/helloworld_hosted / diff --git a/cmd/example/helloworld_cloudevents/main.go b/cmd/example/helloworld_cloudevents/main.go new file mode 100644 index 000000000..f1e22841a --- /dev/null +++ b/cmd/example/helloworld_cloudevents/main.go @@ -0,0 +1,145 @@ +package main + +import ( + "context" + goflag "flag" + "fmt" + "math/rand" + "os" + "time" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + utilrand "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/client-go/rest" + utilflag "k8s.io/component-base/cli/flag" + logs "k8s.io/component-base/logs" + "k8s.io/klog/v2" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + + "open-cluster-management.io/addon-framework/examples/helloworld_agent" + "open-cluster-management.io/addon-framework/examples/helloworld_cloudevents" + "open-cluster-management.io/addon-framework/pkg/addonfactory" + "open-cluster-management.io/addon-framework/pkg/addonmanager/cloudevents" + cmdfactory "open-cluster-management.io/addon-framework/pkg/cmd/factory" + "open-cluster-management.io/addon-framework/pkg/utils" + "open-cluster-management.io/addon-framework/pkg/version" +) + +func main() { + rand.Seed(time.Now().UTC().UnixNano()) + + pflag.CommandLine.SetNormalizeFunc(utilflag.WordSepNormalizeFunc) + pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) + + logs.AddFlags(pflag.CommandLine) + logs.InitLogs() + defer logs.FlushLogs() + + command := newCommand() + if err := command.Execute(); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } +} + +func newCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "addon", + Short: "helloworld example addon", + Run: func(cmd *cobra.Command, args []string) { + if err := cmd.Help(); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + } + os.Exit(1) + }, + } + + if v := version.Get().String(); len(v) == 0 { + cmd.Version = "" + } else { + cmd.Version = v + } + + cmd.AddCommand(newControllerCommand()) + cmd.AddCommand(helloworld_agent.NewAgentCommand(helloworld_cloudevents.AddonName)) + + return cmd +} + +func newControllerCommand() *cobra.Command { + o := cloudevents.NewCloudEventsOptions() + c := &addManagerConfig{cloudeventsOptions: o} + cmd := cmdfactory. + NewControllerCommandConfig("helloworld-addon-controller", version.Get(), c.runController). + NewCommand() + cmd.Use = "controller" + cmd.Short = "Start the addon controller with cloudevents" + o.AddFlags(cmd) + + return cmd +} + +// addManagerConfig holds cloudevents configuration for addon manager +type addManagerConfig struct { + cloudeventsOptions *cloudevents.CloudEventsOptions +} + +func (c *addManagerConfig) runController(ctx context.Context, kubeConfig *rest.Config) error { + addonClient, err := addonv1alpha1client.NewForConfig(kubeConfig) + if err != nil { + return err + } + + mgr, err := cloudevents.New(kubeConfig, c.cloudeventsOptions) + if err != nil { + return err + } + + registrationOption := helloworld_cloudevents.NewRegistrationOption( + kubeConfig, + helloworld_cloudevents.AddonName, + utilrand.String(5), + ) + + // Set agent install namespace from addon deployment config if it exists + registrationOption.AgentInstallNamespace = utils.AgentInstallNamespaceFromDeploymentConfigFunc( + utils.NewAddOnDeploymentConfigGetter(addonClient), + ) + + agentAddon, err := addonfactory.NewAgentAddonFactory(helloworld_cloudevents.AddonName, helloworld_cloudevents.FS, "manifests/templates"). + WithConfigGVRs(utils.AddOnDeploymentConfigGVR). + WithGetValuesFuncs( + helloworld_cloudevents.GetDefaultValues, + addonfactory.GetAddOnDeploymentConfigValues( + utils.NewAddOnDeploymentConfigGetter(addonClient), + addonfactory.ToAddOnDeploymentConfigValues, + addonfactory.ToImageOverrideValuesFunc("Image", helloworld_cloudevents.DefaultImage), + ), + ). + WithAgentRegistrationOption(registrationOption). + WithAgentInstallNamespace( + utils.AgentInstallNamespaceFromDeploymentConfigFunc( + utils.NewAddOnDeploymentConfigGetter(addonClient), + ), + ). + WithAgentHealthProber(helloworld_cloudevents.AgentHealthProber()). + BuildTemplateAgentAddon() + if err != nil { + klog.Errorf("failed to build agent %v", err) + return err + } + + err = mgr.AddAgent(agentAddon) + if err != nil { + klog.Fatal(err) + } + + err = mgr.Start(ctx) + if err != nil { + klog.Fatal(err) + } + <-ctx.Done() + + return nil +} diff --git a/examples/deploy/addon/helloworld-cloudevents/kustomization.yaml b/examples/deploy/addon/helloworld-cloudevents/kustomization.yaml new file mode 100644 index 000000000..6e31a765c --- /dev/null +++ b/examples/deploy/addon/helloworld-cloudevents/kustomization.yaml @@ -0,0 +1,28 @@ +namespace: open-cluster-management + +resources: +- resources/cluster_role.yaml +- resources/cluster_role_binding.yaml +- resources/service_account.yaml +- resources/managed_clusterset_binding.yaml +- resources/placement.yaml +- resources/addon_deployment_config.yaml +- resources/helloworld_cloudevents_clustermanagementaddon.yaml +- resources/helloworld_cloudevents_controller.yaml +- resources/work-driver-config.yaml + +images: +- name: quay.io/open-cluster-management/addon-examples + newName: quay.io/open-cluster-management/addon-examples + newTag: latest +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +vars: + - name: EXAMPLE_IMAGE_NAME + objref: + apiVersion: apps/v1 + kind: Deployment + name: helloworldcloudevents-controller + fieldref: + fieldpath: spec.template.spec.containers.[name=helloworldcloudevents-controller].image diff --git a/examples/deploy/addon/helloworld-cloudevents/resources/addon_deployment_config.yaml b/examples/deploy/addon/helloworld-cloudevents/resources/addon_deployment_config.yaml new file mode 100644 index 000000000..d29abd622 --- /dev/null +++ b/examples/deploy/addon/helloworld-cloudevents/resources/addon_deployment_config.yaml @@ -0,0 +1,6 @@ +apiVersion: addon.open-cluster-management.io/v1alpha1 +kind: AddOnDeploymentConfig +metadata: + name: global +spec: + agentInstallNamespace: open-cluster-management-agent-addon diff --git a/examples/deploy/addon/helloworld-cloudevents/resources/cluster_role.yaml b/examples/deploy/addon/helloworld-cloudevents/resources/cluster_role.yaml new file mode 100644 index 000000000..e3623c705 --- /dev/null +++ b/examples/deploy/addon/helloworld-cloudevents/resources/cluster_role.yaml @@ -0,0 +1,50 @@ + kind: ClusterRole + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: helloworldcloudevents-addon + rules: + - apiGroups: [""] + resources: ["configmaps", "events"] + verbs: ["get", "list", "watch", "create", "update", "delete", "deletecollection", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: ["rbac.authorization.k8s.io"] + resources: ["roles", "rolebindings"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["get", "create"] + - apiGroups: ["certificates.k8s.io"] + resources: ["certificatesigningrequests", "certificatesigningrequests/approval"] + verbs: ["get", "list", "watch", "create", "update"] + - apiGroups: ["certificates.k8s.io"] + resources: ["signers"] + verbs: ["approve"] + - apiGroups: ["cluster.open-cluster-management.io"] + resources: ["managedclusters"] + verbs: ["get", "list", "watch"] + - apiGroups: ["work.open-cluster-management.io"] + resources: ["manifestworks"] + verbs: ["create", "update", "get", "list", "watch", "delete", "deletecollection", "patch"] + - apiGroups: ["addon.open-cluster-management.io"] + resources: ["managedclusteraddons/finalizers"] + verbs: ["update"] + - apiGroups: [ "addon.open-cluster-management.io" ] + resources: [ "clustermanagementaddons/finalizers" ] + verbs: [ "update" ] + - apiGroups: [ "addon.open-cluster-management.io" ] + resources: [ "clustermanagementaddons/status" ] + verbs: ["update", "patch"] + - apiGroups: ["addon.open-cluster-management.io"] + resources: ["clustermanagementaddons"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["addon.open-cluster-management.io"] + resources: ["managedclusteraddons"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: ["addon.open-cluster-management.io"] + resources: ["managedclusteraddons/status"] + verbs: ["update", "patch"] + - apiGroups: ["addon.open-cluster-management.io"] + resources: ["addondeploymentconfigs"] + verbs: ["get", "list", "watch"] diff --git a/examples/deploy/addon/helloworld-cloudevents/resources/cluster_role_binding.yaml b/examples/deploy/addon/helloworld-cloudevents/resources/cluster_role_binding.yaml new file mode 100644 index 000000000..88a1823aa --- /dev/null +++ b/examples/deploy/addon/helloworld-cloudevents/resources/cluster_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: helloworldcloudevents-addon +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: helloworldcloudevents-addon +subjects: + - kind: ServiceAccount + name: helloworldcloudevents-addon-sa + namespace: open-cluster-management diff --git a/examples/deploy/addon/helloworld-cloudevents/resources/helloworld_cloudevents_clustermanagementaddon.yaml b/examples/deploy/addon/helloworld-cloudevents/resources/helloworld_cloudevents_clustermanagementaddon.yaml new file mode 100644 index 000000000..dcfec9820 --- /dev/null +++ b/examples/deploy/addon/helloworld-cloudevents/resources/helloworld_cloudevents_clustermanagementaddon.yaml @@ -0,0 +1,21 @@ +apiVersion: addon.open-cluster-management.io/v1alpha1 +kind: ClusterManagementAddOn +metadata: + name: helloworldcloudevents +spec: + addOnMeta: + displayName: helloworldcloudevents + description: "helloworldcloudevents is an example addon using cloudevents created by go template" + supportedConfigs: + - group: addon.open-cluster-management.io + resource: addondeploymentconfigs + installStrategy: + type: Placements + placements: + - name: global + namespace: open-cluster-management + configs: + - group: addon.open-cluster-management.io + resource: addondeploymentconfigs + name: global + namespace: open-cluster-management diff --git a/examples/deploy/addon/helloworld-cloudevents/resources/helloworld_cloudevents_controller.yaml b/examples/deploy/addon/helloworld-cloudevents/resources/helloworld_cloudevents_controller.yaml new file mode 100644 index 000000000..9bbb3bbe5 --- /dev/null +++ b/examples/deploy/addon/helloworld-cloudevents/resources/helloworld_cloudevents_controller.yaml @@ -0,0 +1,43 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: helloworldcloudevents-controller + labels: + app: helloworldcloudevents-controller +spec: + replicas: 1 + selector: + matchLabels: + app: helloworldcloudevents-controller + template: + metadata: + labels: + app: helloworldcloudevents-controller + spec: + serviceAccountName: helloworldcloudevents-addon-sa + containers: + - name: helloworldcloudevents-controller + image: quay.io/open-cluster-management/addon-examples + imagePullPolicy: IfNotPresent + env: + - name: EXAMPLE_IMAGE_NAME + value: $(EXAMPLE_IMAGE_NAME) + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + args: + - "/helloworld_cloudevents" + - "controller" + - "--work-driver=mqtt" + - "--work-driver-config=/var/run/secrets/hub/config.yaml" + - "--cloudevents-client-id=addon-manager-$(POD_NAME)" + - "--source-id=addon-manager" + volumeMounts: + - mountPath: /var/run/secrets/hub + name: workdriverconfig + readOnly: true + volumes: + - name: workdriverconfig + secret: + secretName: work-driver-config \ No newline at end of file diff --git a/examples/deploy/addon/helloworld-cloudevents/resources/managed_clusterset_binding.yaml b/examples/deploy/addon/helloworld-cloudevents/resources/managed_clusterset_binding.yaml new file mode 100644 index 000000000..7b1ce9ca7 --- /dev/null +++ b/examples/deploy/addon/helloworld-cloudevents/resources/managed_clusterset_binding.yaml @@ -0,0 +1,6 @@ +apiVersion: cluster.open-cluster-management.io/v1beta2 +kind: ManagedClusterSetBinding +metadata: + name: global +spec: + clusterSet: global diff --git a/examples/deploy/addon/helloworld-cloudevents/resources/placement.yaml b/examples/deploy/addon/helloworld-cloudevents/resources/placement.yaml new file mode 100644 index 000000000..da500bac2 --- /dev/null +++ b/examples/deploy/addon/helloworld-cloudevents/resources/placement.yaml @@ -0,0 +1,10 @@ +apiVersion: cluster.open-cluster-management.io/v1beta1 +kind: Placement +metadata: + name: global +spec: + clusterSets: + - global + predicates: + - requiredClusterSelector: + labelSelector: {} diff --git a/examples/deploy/addon/helloworld-cloudevents/resources/service_account.yaml b/examples/deploy/addon/helloworld-cloudevents/resources/service_account.yaml new file mode 100644 index 000000000..ba111648c --- /dev/null +++ b/examples/deploy/addon/helloworld-cloudevents/resources/service_account.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: helloworldcloudevents-addon-sa diff --git a/examples/deploy/addon/helloworld-cloudevents/resources/work-driver-config.yaml b/examples/deploy/addon/helloworld-cloudevents/resources/work-driver-config.yaml new file mode 100644 index 000000000..5070201c9 --- /dev/null +++ b/examples/deploy/addon/helloworld-cloudevents/resources/work-driver-config.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + name: work-driver-config +stringData: + config.yaml: | + brokerHost: mosquitto.mqtt:1883 + topics: + sourceEvents: sources/addon-manager/clusters/+/sourceevents + agentEvents: sources/addon-manager/clusters/+/agentevents + sourceBroadcast: sources/addon-manager/sourcebroadcast diff --git a/examples/deploy/mqtt/mqtt-broker.yaml b/examples/deploy/mqtt/mqtt-broker.yaml new file mode 100644 index 000000000..afa7e657e --- /dev/null +++ b/examples/deploy/mqtt/mqtt-broker.yaml @@ -0,0 +1,68 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: mqtt +--- +apiVersion: v1 +kind: Service +metadata: + name: mosquitto + namespace: mqtt +spec: + ports: + - name: mosquitto + protocol: TCP + port: 1883 + targetPort: 1883 + selector: + name: mosquitto + sessionAffinity: None + type: ClusterIP +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: mosquitto + namespace: mqtt +spec: + replicas: 1 + selector: + matchLabels: + name: mosquitto + strategy: + type: Recreate + template: + metadata: + labels: + name: mosquitto + spec: + containers: + - name: mosquitto + image: eclipse-mosquitto:2.0.18 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 1883 + name: mosquitto + volumeMounts: + - name: mosquitto-persistent-storage + mountPath: /mosquitto/data + - name: mosquitto-config + mountPath: /mosquitto/config/mosquitto.conf + subPath: mosquitto.conf + volumes: + - name: mosquitto-persistent-storage + emptyDir: {} + - name: mosquitto-config + configMap: + name: mosquitto +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: mosquitto + namespace: mqtt +data: + mosquitto.conf: | + listener 1883 0.0.0.0 + allow_anonymous true diff --git a/examples/deploy/ocm-cloudevents/install.sh b/examples/deploy/ocm-cloudevents/install.sh new file mode 100755 index 000000000..3ef0b26be --- /dev/null +++ b/examples/deploy/ocm-cloudevents/install.sh @@ -0,0 +1,87 @@ +#!/bin/bash + +set -o nounset +set -o pipefail + +KUBECTL=${KUBECTL:-kubectl} + +BUILD_DIR="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +DEPLOY_DIR="$(dirname "$BUILD_DIR")" +EXAMPLE_DIR="$(dirname "$DEPLOY_DIR")" +REPO_DIR="$(dirname "$EXAMPLE_DIR")" +WORK_DIR="${REPO_DIR}/_output" +CLUSTERADM="${WORK_DIR}/bin/clusteradm" + +export PATH=$PATH:${WORK_DIR}/bin + +echo "############ Download clusteradm" +mkdir -p "${WORK_DIR}/bin" +wget -qO- https://github.com/open-cluster-management-io/clusteradm/releases/latest/download/clusteradm_${GOHOSTOS}_${GOHOSTARCH}.tar.gz | sudo tar -xvz -C ${WORK_DIR}/bin/ +chmod +x "${CLUSTERADM}" + +echo "############ Init hub" +${CLUSTERADM} init --wait --bundle-version=latest +joincmd=$(${CLUSTERADM} get token | grep clusteradm) + +echo "############ Init agent as cluster1" +$(echo ${joincmd} --force-internal-endpoint-lookup --wait --bundle-version=latest | sed "s//${MANAGED_CLUSTER_NAME}/g") + +echo "############ Accept join of cluster1" +${CLUSTERADM} accept --clusters ${MANAGED_CLUSTER_NAME} --wait + +echo "############ All-in-one env is installed successfully!!" + +echo "############ Deploy mqtt broker" +${KUBECTL} apply -f ${DEPLOY_DIR}/mqtt/mqtt-broker.yaml + +echo "############ Configure the work-agent" +${KUBECTL} -n open-cluster-management scale --replicas=0 deployment/klusterlet + +cat << EOF | ${KUBECTL} -n open-cluster-management-agent apply -f - +apiVersion: v1 +kind: Secret +metadata: + name: work-driver-config +stringData: + config.yaml: | + brokerHost: mosquitto.mqtt:1883 + topics: + sourceEvents: sources/addon-manager/clusters/${MANAGED_CLUSTER_NAME}/sourceevents + agentEvents: sources/addon-manager/clusters/${MANAGED_CLUSTER_NAME}/agentevents + agentBroadcast: clusters/${MANAGED_CLUSTER_NAME}/agentbroadcast +EOF + +# patch klusterlet-work-agent deployment to use mqtt as workload source driver +${KUBECTL} -n open-cluster-management-agent patch deployment/klusterlet-work-agent --type=json \ + -p='[ + {"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--cloudevents-client-codecs=manifestbundle"}, + {"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--cloudevents-client-id=work-agent-$(POD_NAME)"}, + {"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--workload-source-driver=mqtt"}, + {"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--workload-source-config=/var/run/secrets/hub/config.yaml"} + ]' + +${KUBECTL} -n open-cluster-management-agent patch deployment/klusterlet-work-agent --type=json \ + -p='[{"op": "add", "path": "/spec/template/spec/volumes/-", "value": {"name": "workdriverconfig","secret": {"secretName": "work-driver-config"}}}]' + +${KUBECTL} -n open-cluster-management-agent patch deployment/klusterlet-work-agent --type=json \ + -p='[{"op": "add", "path": "/spec/template/spec/containers/0/volumeMounts/-", "value": {"name": "workdriverconfig","mountPath": "/var/run/secrets/hub"}}]' + +${KUBECTL} -n open-cluster-management-agent scale --replicas=1 deployment/klusterlet-work-agent +${KUBECTL} -n open-cluster-management-agent rollout status deployment/klusterlet-work-agent --timeout=120s +${KUBECTL} -n open-cluster-management-agent get pod -l app=klusterlet-manifestwork-agent + +# TODO: add live probe for the work-agent to check if it is connected to the mqtt broker +isRunning=false +for i in {1..10}; do + if ${KUBECTL} -n open-cluster-management-agent logs deployment/klusterlet-work-agent | grep "subscribing to topics"; then + echo "klusterlet-work-agent is subscribing to topics from mqtt broker" + isRunning=true + break + fi + sleep 12 +done + +if [ "$isRunning" = false ]; then + echo "timeout waiting for klusterlet-work-agent to subscribe to topics from mqtt broker" + exit 1 +fi diff --git a/examples/helloworld_cloudevents/helloworld.go b/examples/helloworld_cloudevents/helloworld.go new file mode 100644 index 000000000..3ec453dd9 --- /dev/null +++ b/examples/helloworld_cloudevents/helloworld.go @@ -0,0 +1,95 @@ +package helloworld_cloudevents + +import ( + "embed" + "fmt" + "os" + + "k8s.io/client-go/rest" + "open-cluster-management.io/addon-framework/examples/rbac" + "open-cluster-management.io/addon-framework/pkg/addonfactory" + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/utils" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + workapiv1 "open-cluster-management.io/api/work/v1" +) + +const ( + DefaultImage = "quay.io/open-cluster-management/addon-examples:latest" + AddonName = "helloworldcloudevents" + InstallationNamespace = "open-cluster-management-agent-addon" +) + +//go:embed manifests +//go:embed manifests/templates +var FS embed.FS + +func NewRegistrationOption(kubeConfig *rest.Config, addonName, agentName string) *agent.RegistrationOption { + return &agent.RegistrationOption{ + CSRConfigurations: agent.KubeClientSignerConfigurations(addonName, agentName), + CSRApproveCheck: utils.DefaultCSRApprover(agentName), + PermissionConfig: rbac.AddonRBAC(kubeConfig), + } +} + +func GetDefaultValues(cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) (addonfactory.Values, error) { + + image := os.Getenv("EXAMPLE_IMAGE_NAME") + if len(image) == 0 { + image = DefaultImage + } + + manifestConfig := struct { + KubeConfigSecret string + ClusterName string + Image string + }{ + KubeConfigSecret: fmt.Sprintf("%s-hub-kubeconfig", addon.Name), + ClusterName: cluster.Name, + Image: image, + } + + return addonfactory.StructToValues(manifestConfig), nil +} + +func AgentHealthProber() *agent.HealthProber { + return &agent.HealthProber{ + Type: agent.HealthProberTypeWork, + WorkProber: &agent.WorkHealthProber{ + ProbeFields: []agent.ProbeField{ + { + ResourceIdentifier: workapiv1.ResourceIdentifier{ + Group: "apps", + Resource: "deployments", + Name: "helloworldcloudevents-agent", + Namespace: InstallationNamespace, + }, + ProbeRules: []workapiv1.FeedbackRule{ + { + Type: workapiv1.WellKnownStatusType, + }, + }, + }, + }, + HealthCheck: func(identifier workapiv1.ResourceIdentifier, result workapiv1.StatusFeedbackResult) error { + if len(result.Values) == 0 { + return fmt.Errorf("no values are probed for deployment %s/%s", identifier.Namespace, identifier.Name) + } + for _, value := range result.Values { + if value.Name != "ReadyReplicas" { + continue + } + + if *value.Value.Integer >= 1 { + return nil + } + + return fmt.Errorf("readyReplica is %d for deployement %s/%s", *value.Value.Integer, identifier.Namespace, identifier.Name) + } + return fmt.Errorf("readyReplica is not probed") + }, + }, + } +} diff --git a/examples/helloworld_cloudevents/helloworld_test.go b/examples/helloworld_cloudevents/helloworld_test.go new file mode 100644 index 000000000..9939cd9be --- /dev/null +++ b/examples/helloworld_cloudevents/helloworld_test.go @@ -0,0 +1,192 @@ +package helloworld_cloudevents + +import ( + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilrand "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/klog/v2" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + fakeaddon "open-cluster-management.io/api/client/addon/clientset/versioned/fake" + clusterv1 "open-cluster-management.io/api/cluster/v1" + + "open-cluster-management.io/addon-framework/pkg/addonfactory" + "open-cluster-management.io/addon-framework/pkg/addonmanager/addontesting" + "open-cluster-management.io/addon-framework/pkg/utils" +) + +var ( + nodeSelector = map[string]string{"kubernetes.io/os": "linux"} + tolerations = []corev1.Toleration{{Key: "foo", Operator: corev1.TolerationOpExists, Effect: corev1.TaintEffectNoExecute}} +) + +func TestManifestAddonAgent(t *testing.T) { + cases := []struct { + name string + managedCluster *clusterv1.ManagedCluster + managedClusterAddOn *addonapiv1alpha1.ManagedClusterAddOn + configs []runtime.Object + verifyDeployment func(t *testing.T, objs []runtime.Object) + }{ + { + name: "no configs", + managedCluster: addontesting.NewManagedCluster("cluster1"), + managedClusterAddOn: addontesting.NewAddon("helloworldcloudevents", "cluster1"), + configs: []runtime.Object{}, + verifyDeployment: func(t *testing.T, objs []runtime.Object) { + deployment := findHelloWorldCloudEventsDeployment(objs) + if deployment == nil { + t.Fatalf("expected deployment, but failed") + } + + if deployment.Name != "helloworldcloudevents-agent" { + t.Errorf("unexpected deployment name %s", deployment.Name) + } + + if deployment.Namespace != addonfactory.AddonDefaultInstallNamespace { + t.Errorf("unexpected deployment namespace %s", deployment.Namespace) + } + + if deployment.Spec.Template.Spec.Containers[0].Image != DefaultImage { + t.Errorf("unexpected image %s", deployment.Spec.Template.Spec.Containers[0].Image) + } + }, + }, + { + name: "override image with annotation", + managedCluster: addontesting.NewManagedCluster("cluster1"), + managedClusterAddOn: func() *addonapiv1alpha1.ManagedClusterAddOn { + addon := addontesting.NewAddon("test", "cluster1") + addon.Annotations = map[string]string{ + "addon.open-cluster-management.io/values": `{"Image":"quay.io/test:test"}`} + return addon + }(), + configs: []runtime.Object{}, + verifyDeployment: func(t *testing.T, objs []runtime.Object) { + deployment := findHelloWorldCloudEventsDeployment(objs) + if deployment == nil { + t.Fatalf("expected deployment, but failed") + } + + if deployment.Name != "helloworldcloudevents-agent" { + t.Errorf("unexpected deployment name %s", deployment.Name) + } + + if deployment.Namespace != addonfactory.AddonDefaultInstallNamespace { + t.Errorf("unexpected deployment namespace %s", deployment.Namespace) + } + + if deployment.Spec.Template.Spec.Containers[0].Image != "quay.io/test:test" { + t.Errorf("unexpected image %s", deployment.Spec.Template.Spec.Containers[0].Image) + } + }, + }, + { + name: "with addon deployment config", + managedCluster: addontesting.NewManagedCluster("cluster1"), + managedClusterAddOn: func() *addonapiv1alpha1.ManagedClusterAddOn { + addon := addontesting.NewAddon("test", "cluster1") + addon.Status.ConfigReferences = []addonapiv1alpha1.ConfigReference{ + { + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: "addon.open-cluster-management.io", + Resource: "addondeploymentconfigs", + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: "cluster1", + Name: "config", + }, + }, + } + return addon + }(), + configs: []runtime.Object{ + &addonapiv1alpha1.AddOnDeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "config", + Namespace: "cluster1", + }, + Spec: addonapiv1alpha1.AddOnDeploymentConfigSpec{ + NodePlacement: &addonapiv1alpha1.NodePlacement{ + Tolerations: tolerations, + NodeSelector: nodeSelector, + }, + }, + }, + }, + verifyDeployment: func(t *testing.T, objs []runtime.Object) { + deployment := findHelloWorldCloudEventsDeployment(objs) + if deployment == nil { + t.Fatalf("expected deployment, but failed") + } + + if deployment.Name != "helloworldcloudevents-agent" { + t.Errorf("unexpected deployment name %s", deployment.Name) + } + + if deployment.Namespace != addonfactory.AddonDefaultInstallNamespace { + t.Errorf("unexpected deployment namespace %s", deployment.Namespace) + } + + if deployment.Spec.Template.Spec.Containers[0].Image != DefaultImage { + t.Errorf("unexpected image %s", deployment.Spec.Template.Spec.Containers[0].Image) + } + + if !equality.Semantic.DeepEqual(deployment.Spec.Template.Spec.NodeSelector, nodeSelector) { + t.Errorf("unexpected nodeSeletcor %v", deployment.Spec.Template.Spec.NodeSelector) + } + + if !equality.Semantic.DeepEqual(deployment.Spec.Template.Spec.Tolerations, tolerations) { + t.Errorf("unexpected tolerations %v", deployment.Spec.Template.Spec.Tolerations) + } + }, + }, + } + + for _, c := range cases { + fakeAddonClient := fakeaddon.NewSimpleClientset(c.configs...) + + agentAddon, err := addonfactory.NewAgentAddonFactory(AddonName, FS, "manifests/templates"). + WithConfigGVRs(utils.AddOnDeploymentConfigGVR). + WithGetValuesFuncs( + GetDefaultValues, + addonfactory.GetAddOnDeploymentConfigValues( + addonfactory.NewAddOnDeploymentConfigGetter(fakeAddonClient), + addonfactory.ToAddOnDeploymentConfigValues, + ), + addonfactory.GetValuesFromAddonAnnotation, + ). + WithAgentRegistrationOption(NewRegistrationOption(nil, AddonName, utilrand.String(5))). + WithAgentHealthProber(AgentHealthProber()). + BuildTemplateAgentAddon() + if err != nil { + klog.Fatalf("failed to build agent %v", err) + } + + objects, err := agentAddon.Manifests(c.managedCluster, c.managedClusterAddOn) + if err != nil { + t.Fatalf("failed to get manifests %v", err) + } + + if len(objects) != 3 { + t.Fatalf("expected 3 manifests, but %v", objects) + } + + c.verifyDeployment(t, objects) + } +} + +func findHelloWorldCloudEventsDeployment(objs []runtime.Object) *appsv1.Deployment { + for _, obj := range objs { + switch obj := obj.(type) { + case *appsv1.Deployment: + return obj + } + } + + return nil +} diff --git a/examples/helloworld_cloudevents/manifests/templates/clusterrolebinding.yaml b/examples/helloworld_cloudevents/manifests/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..f0271f974 --- /dev/null +++ b/examples/helloworld_cloudevents/manifests/templates/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: helloworldcloudevents-agent +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: helloworldcloudevents-agent-sa + namespace: {{ .AddonInstallNamespace }} diff --git a/examples/helloworld_cloudevents/manifests/templates/deployment.yaml b/examples/helloworld_cloudevents/manifests/templates/deployment.yaml new file mode 100644 index 000000000..f439d8ba7 --- /dev/null +++ b/examples/helloworld_cloudevents/manifests/templates/deployment.yaml @@ -0,0 +1,68 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: helloworldcloudevents-agent + namespace: {{ .AddonInstallNamespace }} + labels: + app: helloworldcloudevents-agent +spec: + replicas: 1 + selector: + matchLabels: + app: helloworldcloudevents-agent + template: + metadata: + labels: + app: helloworldcloudevents-agent + spec: + serviceAccountName: helloworldcloudevents-agent-sa +{{- if .NodeSelector }} + nodeSelector: + {{- range $key, $value := .NodeSelector }} + "{{ $key }}": "{{ $value }}" + {{- end }} +{{- end }} +{{- if .Tolerations }} + tolerations: + {{- range $toleration := .Tolerations }} + - key: "{{ $toleration.Key }}" + value: "{{ $toleration.Value }}" + effect: "{{ $toleration.Effect }}" + operator: "{{ $toleration.Operator }}" + {{- if $toleration.TolerationSeconds }} + tolerationSeconds: {{ $toleration.TolerationSeconds }} + {{- end }} + {{- end }} +{{- end }} + volumes: + - name: hub-config + secret: + secretName: {{ .KubeConfigSecret }} + containers: + - name: helloworldcloudevents-agent + image: {{ .Image }} + imagePullPolicy: IfNotPresent +{{- if or .HTTPProxy .HTTPSProxy}} + env: + {{- if .HTTPProxy }} + - name: HTTP_PROXY + value: {{ .HTTPProxy }} + {{- end }} + {{- if .HTTPSProxy }} + - name: HTTPS_PROXY + value: {{ .HTTPSProxy }} + {{- end }} + {{- if .NoProxy }} + - name: NO_PROXY + value: {{ .NoProxy }} + {{- end }} +{{- end }} + args: + - "/helloworld_cloudevents" + - "agent" + - "--hub-kubeconfig=/var/run/hub/kubeconfig" + - "--cluster-name={{ .ClusterName }}" + - "--addon-namespace={{ .AddonInstallNamespace }}" + volumeMounts: + - name: hub-config + mountPath: /var/run/hub diff --git a/examples/helloworld_cloudevents/manifests/templates/serviceaccount.yaml b/examples/helloworld_cloudevents/manifests/templates/serviceaccount.yaml new file mode 100644 index 000000000..0a2b1ecbf --- /dev/null +++ b/examples/helloworld_cloudevents/manifests/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: helloworldcloudevents-agent-sa + namespace: {{ .AddonInstallNamespace }} diff --git a/go.mod b/go.mod index 2c5b3fc3d..6bab039c6 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ go 1.21 require ( github.com/evanphx/json-patch v5.7.0+incompatible github.com/fatih/structs v1.1.0 + github.com/mochi-mqtt/server/v2 v2.4.6 github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.31.1 github.com/openshift/build-machinery-go v0.0.0-20231128094528-1e9b1b0595c8 @@ -12,6 +13,7 @@ require ( github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.4 + gopkg.in/yaml.v2 v2.4.0 helm.sh/helm/v3 v3.14.2 k8s.io/api v0.29.2 k8s.io/apiextensions-apiserver v0.29.0 @@ -22,7 +24,7 @@ require ( k8s.io/klog/v2 v2.120.1 k8s.io/utils v0.0.0-20240310230437-4693a0247e57 open-cluster-management.io/api v0.13.0 - open-cluster-management.io/sdk-go v0.13.1-0.20240416030555-aa744f426379 + open-cluster-management.io/sdk-go v0.13.1-0.20240416062924-20307e6fe090 sigs.k8s.io/controller-runtime v0.17.2 ) @@ -36,12 +38,16 @@ require ( github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect + github.com/bwmarrin/snowflake v0.3.0 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995 // indirect + github.com/cloudevents/sdk-go/v2 v2.15.2 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/eclipse/paho.golang v0.11.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch/v5 v5.8.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect @@ -60,6 +66,7 @@ require ( github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.1 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/huandu/xstrings v1.4.0 // indirect @@ -81,6 +88,7 @@ require ( github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect + github.com/rs/xid v1.4.0 // indirect github.com/shopspring/decimal v1.3.1 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect @@ -119,7 +127,6 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/kms v0.29.0 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect diff --git a/go.sum b/go.sum index 488f458c7..8235a81be 100644 --- a/go.sum +++ b/go.sum @@ -22,10 +22,16 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bwmarrin/snowflake v0.3.0 h1:xm67bEhkKh6ij1790JB83OujPR5CzNe8QuQqAgISZN0= +github.com/bwmarrin/snowflake v0.3.0/go.mod h1:NdZxfVWX+oR6y2K0o6qAYv6gIOP9rjG0/E9WsDpxqwE= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995 h1:pXyRKZ0T5WoB6X9QnHS5cEyW0Got39bNQIECxGUKVO4= +github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995/go.mod h1:mz9oS2Yhh/S7cvrrsgGMMR+6Shy0ZyL2lDN1sHQO1wE= +github.com/cloudevents/sdk-go/v2 v2.15.2 h1:54+I5xQEnI73RBhWHxbI1XJcqOFOVJN85vb41+8mHUc= +github.com/cloudevents/sdk-go/v2 v2.15.2/go.mod h1:lL7kSWAE/V8VI4Wh0jbL2v/jvqsm6tjmaQBSvxcv4uE= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= @@ -41,6 +47,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/eclipse/paho.golang v0.11.0 h1:6Avu5dkkCfcB61/y1vx+XrPQ0oAl4TPYtY0uw3HbQdM= +github.com/eclipse/paho.golang v0.11.0/go.mod h1:rhrV37IEwauUyx8FHrvmXOKo+QRKng5ncoN1vJiJMcs= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= @@ -118,6 +126,7 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLe github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= @@ -137,6 +146,8 @@ github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= +github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -162,6 +173,8 @@ github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HK github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mochi-mqtt/server/v2 v2.4.6 h1:3iaQLG4hD/2vSh0Rwu4+h//KUcWR2zAKQIxhJuoJmCg= +github.com/mochi-mqtt/server/v2 v2.4.6/go.mod h1:M1lZnLbyowXUyQBIlHYlX1wasxXqv/qFWwQxAzfphwA= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -200,6 +213,8 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY= +github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= @@ -223,6 +238,7 @@ github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpE github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= @@ -230,6 +246,8 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -311,6 +329,7 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= @@ -422,8 +441,8 @@ k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0g k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= open-cluster-management.io/api v0.13.0 h1:dlcJEZlNlE0DmSDctK2s7iWKg9l+Tgb0V78Z040nMuk= open-cluster-management.io/api v0.13.0/go.mod h1:CuCPEzXDvOyxBB0H1d1eSeajbHqaeGEKq9c63vQc63w= -open-cluster-management.io/sdk-go v0.13.1-0.20240416030555-aa744f426379 h1:8jXVHfgy+wgXq1mrWC1mTieoP77WsAAHNpzILMIzWB0= -open-cluster-management.io/sdk-go v0.13.1-0.20240416030555-aa744f426379/go.mod h1:w2OaxtCyegxeyFLU42UQ3oxUz01QdsBQkcHI17T/l48= +open-cluster-management.io/sdk-go v0.13.1-0.20240416062924-20307e6fe090 h1:zFmHuW+ztdfUUNslqNW+H1WEcfdEUQHoRDbmdajX340= +open-cluster-management.io/sdk-go v0.13.1-0.20240416062924-20307e6fe090/go.mod h1:w2OaxtCyegxeyFLU42UQ3oxUz01QdsBQkcHI17T/l48= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/controller-runtime v0.17.2 h1:FwHwD1CTUemg0pW2otk7/U5/i5m2ymzvOXdbeGOUvw0= diff --git a/pkg/addonmanager/addontesting/helpers.go b/pkg/addonmanager/addontesting/helpers.go index c0a6c43de..c33b1a014 100644 --- a/pkg/addonmanager/addontesting/helpers.go +++ b/pkg/addonmanager/addontesting/helpers.go @@ -16,6 +16,8 @@ import ( addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" clusterv1 "open-cluster-management.io/api/cluster/v1" workapiv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload" "open-cluster-management.io/addon-framework/pkg/basecontroller/events" ) @@ -187,6 +189,9 @@ func NewManifestWork(name, namespace string, objects ...*unstructured.Unstructur ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, + Annotations: map[string]string{ + common.CloudEventsDataTypeAnnotationKey: payload.ManifestBundleEventDataType.String(), + }, }, Spec: workapiv1.ManifestWorkSpec{ Workload: workapiv1.ManifestsTemplate{ diff --git a/pkg/addonmanager/cloudevents/manager.go b/pkg/addonmanager/cloudevents/manager.go new file mode 100644 index 000000000..2203fdf58 --- /dev/null +++ b/pkg/addonmanager/cloudevents/manager.go @@ -0,0 +1,337 @@ +package cloudevents + +import ( + "context" + "fmt" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/dynamic/dynamicinformer" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions" + clusterv1client "open-cluster-management.io/api/client/cluster/clientset/versioned" + clusterv1informers "open-cluster-management.io/api/client/cluster/informers/externalversions" + workclientset "open-cluster-management.io/api/client/work/clientset/versioned" + workinformers "open-cluster-management.io/api/client/work/informers/externalversions" + workv1informers "open-cluster-management.io/api/client/work/informers/externalversions/work/v1" + + "open-cluster-management.io/addon-framework/pkg/addonmanager" + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonconfig" + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy" + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate" + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/cmaconfig" + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/cmamanagedby" + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/registration" + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" + "open-cluster-management.io/addon-framework/pkg/index" + "open-cluster-management.io/addon-framework/pkg/utils" + cloudeventswork "open-cluster-management.io/sdk-go/pkg/cloudevents/work" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/codec" +) + +type cloudeventsAddonManager struct { + addonAgents map[string]agent.AgentAddon + addonConfigs map[schema.GroupVersionResource]bool + config *rest.Config + options *CloudEventsOptions + syncContexts []factory.SyncContext +} + +func (a *cloudeventsAddonManager) AddAgent(addon agent.AgentAddon) error { + addonOption := addon.GetAgentAddonOptions() + if len(addonOption.AddonName) == 0 { + return fmt.Errorf("addon name should be set") + } + if _, ok := a.addonAgents[addonOption.AddonName]; ok { + return fmt.Errorf("an agent is added for the addon already") + } + a.addonAgents[addonOption.AddonName] = addon + return nil +} + +func (a *cloudeventsAddonManager) Trigger(clusterName, addonName string) { + for _, syncContex := range a.syncContexts { + syncContex.Queue().Add(fmt.Sprintf("%s/%s", clusterName, addonName)) + } +} + +func (a *cloudeventsAddonManager) Start(ctx context.Context) error { + var addonNames []string + for key := range a.addonAgents { + addonNames = append(addonNames, key) + } + + // To support sending ManifestWorks to different drivers (like the Kubernetes apiserver or MQTT broker), we build + // ManifestWork client that implements the ManifestWorkInterface and ManifestWork informer based on different + // driver configuration. + // Refer to Event Based Manifestwork proposal in enhancements repo to get more details. + _, config, err := cloudeventswork.NewConfigLoader(a.options.WorkDriver, a.options.WorkDriverConfig). + WithKubeConfig(a.config). + LoadConfig() + if err != nil { + return err + } + + // we need a separated filtered manifestwork informers so we only watch the manifestworks that manifestworkreplicaset cares. + // This could reduce a lot of memory consumptions + workInformOption := workinformers.WithTweakListOptions( + func(listOptions *metav1.ListOptions) { + selector := &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: addonv1alpha1.AddonLabelKey, + Operator: metav1.LabelSelectorOpIn, + Values: addonNames, + }, + }, + } + listOptions.LabelSelector = metav1.FormatLabelSelector(selector) + }, + ) + + clientHolder, err := cloudeventswork.NewClientHolderBuilder(config). + WithClientID(a.options.CloudEventsClientID). + WithSourceID(a.options.SourceID). + WithInformerConfig(10*time.Minute, workInformOption). + WithCodecs(codec.NewManifestBundleCodec()). + NewSourceClientHolder(ctx) + if err != nil { + return err + } + + kubeClient, err := kubernetes.NewForConfig(a.config) + if err != nil { + return err + } + + dynamicClient, err := dynamic.NewForConfig(a.config) + if err != nil { + return err + } + + addonClient, err := addonv1alpha1client.NewForConfig(a.config) + if err != nil { + return err + } + + clusterClient, err := clusterv1client.NewForConfig(a.config) + if err != nil { + return err + } + + addonInformers := addoninformers.NewSharedInformerFactory(addonClient, 10*time.Minute) + clusterInformers := clusterv1informers.NewSharedInformerFactory(clusterClient, 10*time.Minute) + dynamicInformers := dynamicinformer.NewDynamicSharedInformerFactory(dynamicClient, 10*time.Minute) + + kubeInformers := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, 10*time.Minute, + kubeinformers.WithTweakListOptions(func(listOptions *metav1.ListOptions) { + selector := &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: addonv1alpha1.AddonLabelKey, + Operator: metav1.LabelSelectorOpIn, + Values: addonNames, + }, + }, + } + listOptions.LabelSelector = metav1.FormatLabelSelector(selector) + }), + ) + + workClient := clientHolder.WorkInterface() + workInformers := clientHolder.ManifestWorkInformer() + + // addonDeployController + err = workInformers.Informer().AddIndexers( + cache.Indexers{ + index.ManifestWorkByAddon: index.IndexManifestWorkByAddon, + index.ManifestWorkByHostedAddon: index.IndexManifestWorkByHostedAddon, + index.ManifestWorkHookByHostedAddon: index.IndexManifestWorkHookByHostedAddon, + }, + ) + if err != nil { + return err + } + + err = addonInformers.Addon().V1alpha1().ManagedClusterAddOns().Informer().AddIndexers( + cache.Indexers{ + index.ManagedClusterAddonByNamespace: index.IndexManagedClusterAddonByNamespace, // addonDeployController + index.ManagedClusterAddonByName: index.IndexManagedClusterAddonByName, // addonConfigController + index.AddonByConfig: index.IndexAddonByConfig, // addonConfigController + }, + ) + if err != nil { + return err + } + + err = addonInformers.Addon().V1alpha1().ClusterManagementAddOns().Informer().AddIndexers( + cache.Indexers{ + index.ClusterManagementAddonByConfig: index.IndexClusterManagementAddonByConfig, // managementAddonConfigController + index.ClusterManagementAddonByPlacement: index.IndexClusterManagementAddonByPlacement, // addonConfigController + }) + if err != nil { + return err + } + + err = a.StartWithInformers(ctx, workClient, workInformers, kubeInformers, addonInformers, clusterInformers, dynamicInformers) + if err != nil { + return err + } + + kubeInformers.Start(ctx.Done()) + go workInformers.Informer().Run(ctx.Done()) + addonInformers.Start(ctx.Done()) + clusterInformers.Start(ctx.Done()) + dynamicInformers.Start(ctx.Done()) + return nil +} + +func (a *cloudeventsAddonManager) StartWithInformers(ctx context.Context, + workClient workclientset.Interface, + workInformers workv1informers.ManifestWorkInformer, + kubeInformers kubeinformers.SharedInformerFactory, + addonInformers addoninformers.SharedInformerFactory, + clusterInformers clusterv1informers.SharedInformerFactory, + dynamicInformers dynamicinformer.DynamicSharedInformerFactory) error { + + kubeClient, err := kubernetes.NewForConfig(a.config) + if err != nil { + return err + } + + addonClient, err := addonv1alpha1client.NewForConfig(a.config) + if err != nil { + return err + } + + v1CSRSupported, v1beta1Supported, err := utils.IsCSRSupported(kubeClient) + if err != nil { + return err + } + + for _, agentImpl := range a.addonAgents { + for _, configGVR := range agentImpl.GetAgentAddonOptions().SupportedConfigGVRs { + a.addonConfigs[configGVR] = true + } + } + + deployController := agentdeploy.NewAddonDeployController( + workClient, + addonClient, + clusterInformers.Cluster().V1().ManagedClusters(), + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + workInformers, + a.addonAgents, + ) + + registrationController := registration.NewAddonRegistrationController( + addonClient, + clusterInformers.Cluster().V1().ManagedClusters(), + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + a.addonAgents, + ) + + // This controller is used during migrating addons to be managed by addon-manager. + // This should be removed when the migration is done. + // The migration plan refer to https://github.com/open-cluster-management-io/ocm/issues/355. + managementAddonController := cmamanagedby.NewCMAManagedByController( + addonClient, + addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), + a.addonAgents, + utils.FilterByAddonName(a.addonAgents), + ) + + var addonConfigController, managementAddonConfigController factory.Controller + if len(a.addonConfigs) != 0 { + addonConfigController = addonconfig.NewAddonConfigController( + addonClient, + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), + dynamicInformers, + a.addonConfigs, + utils.FilterByAddonName(a.addonAgents), + ) + managementAddonConfigController = cmaconfig.NewCMAConfigController( + addonClient, + addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), + dynamicInformers, + a.addonConfigs, + utils.FilterByAddonName(a.addonAgents), + ) + } + + var csrApproveController factory.Controller + var csrSignController factory.Controller + // Spawn the following controllers only if v1 CSR api is supported in the + // hub cluster. Under v1beta1 CSR api, all the CSR objects will be signed + // by the kube-controller-manager so custom CSR controller should be + // disabled to avoid conflict. + if v1CSRSupported { + csrApproveController = certificate.NewCSRApprovingController( + kubeClient, + clusterInformers.Cluster().V1().ManagedClusters(), + kubeInformers.Certificates().V1().CertificateSigningRequests(), + nil, + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + a.addonAgents, + ) + csrSignController = certificate.NewCSRSignController( + kubeClient, + clusterInformers.Cluster().V1().ManagedClusters(), + kubeInformers.Certificates().V1().CertificateSigningRequests(), + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + a.addonAgents, + ) + } else if v1beta1Supported { + csrApproveController = certificate.NewCSRApprovingController( + kubeClient, + clusterInformers.Cluster().V1().ManagedClusters(), + nil, + kubeInformers.Certificates().V1beta1().CertificateSigningRequests(), + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + a.addonAgents, + ) + } + + a.syncContexts = append(a.syncContexts, deployController.SyncContext()) + + go deployController.Run(ctx, 1) + go registrationController.Run(ctx, 1) + go managementAddonController.Run(ctx, 1) + + if addonConfigController != nil { + go addonConfigController.Run(ctx, 1) + } + if managementAddonConfigController != nil { + go managementAddonConfigController.Run(ctx, 1) + } + if csrApproveController != nil { + go csrApproveController.Run(ctx, 1) + } + if csrSignController != nil { + go csrSignController.Run(ctx, 1) + } + return nil +} + +// New returns a new addon manager with the given config and optional options +func New(config *rest.Config, opts *CloudEventsOptions) (addonmanager.AddonManager, error) { + cloudeventsAddonManager := &cloudeventsAddonManager{ + config: config, + syncContexts: []factory.SyncContext{}, + addonConfigs: map[schema.GroupVersionResource]bool{}, + addonAgents: map[string]agent.AgentAddon{}, + } + // set options from the given options + cloudeventsAddonManager.options = opts + + return cloudeventsAddonManager, nil +} diff --git a/pkg/addonmanager/cloudevents/options.go b/pkg/addonmanager/cloudevents/options.go new file mode 100644 index 000000000..c6657e136 --- /dev/null +++ b/pkg/addonmanager/cloudevents/options.go @@ -0,0 +1,35 @@ +package cloudevents + +import ( + "github.com/spf13/cobra" + cloudeventswork "open-cluster-management.io/sdk-go/pkg/cloudevents/work" +) + +// CloudEventsOptions defines the flags for addon manager +type CloudEventsOptions struct { + WorkDriver string + WorkDriverConfig string + CloudEventsClientID string + SourceID string +} + +// NewCloudEventsOptions returns the flags with default value set +func NewCloudEventsOptions() *CloudEventsOptions { + return &CloudEventsOptions{ + // set default work driver to kube + WorkDriver: cloudeventswork.ConfigTypeKube, + } +} + +// AddFlags adds the cloudevents options flags to the given command +func (o *CloudEventsOptions) AddFlags(cmd *cobra.Command) { + flags := cmd.Flags() + flags.StringVar(&o.WorkDriver, "work-driver", + o.WorkDriver, "The type of work driver, currently it can be kube, mqtt or grpc") + flags.StringVar(&o.WorkDriverConfig, "work-driver-config", + o.WorkDriverConfig, "The config file path of current work driver") + flags.StringVar(&o.CloudEventsClientID, "cloudevents-client-id", + o.CloudEventsClientID, "The ID of the cloudevents client when publishing works with cloudevents") + flags.StringVar(&o.SourceID, "source-id", + o.SourceID, "The ID of the source when publishing works with cloudevents") +} diff --git a/pkg/addonmanager/controllers/agentdeploy/utils.go b/pkg/addonmanager/controllers/agentdeploy/utils.go index 7df50357a..5ebf07475 100644 --- a/pkg/addonmanager/controllers/agentdeploy/utils.go +++ b/pkg/addonmanager/controllers/agentdeploy/utils.go @@ -13,6 +13,8 @@ import ( clusterv1 "open-cluster-management.io/api/cluster/v1" workapiv1 "open-cluster-management.io/api/work/v1" workbuilder "open-cluster-management.io/sdk-go/pkg/apis/work/v1/builder" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload" "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" "open-cluster-management.io/addon-framework/pkg/agent" @@ -90,6 +92,9 @@ func newManifestWork(addonNamespace, addonName, clusterName string, manifests [] Labels: map[string]string{ addonapiv1alpha1.AddonLabelKey: addonName, }, + Annotations: map[string]string{ + common.CloudEventsDataTypeAnnotationKey: payload.ManifestBundleEventDataType.String(), + }, }, Spec: workapiv1.ManifestWorkSpec{ Workload: workapiv1.ManifestsTemplate{ @@ -315,6 +320,14 @@ func (b *addonWorksBuilder) BuildDeployWorks(installMode, addonWorkNamespace str return nil, nil, err } + if len(annotations) == 0 { + annotations = map[string]string{ + common.CloudEventsDataTypeAnnotationKey: payload.ManifestBundleEventDataType.String(), + } + } else { + annotations[common.CloudEventsDataTypeAnnotationKey] = payload.ManifestBundleEventDataType.String() + } + return b.workBuilder.Build(deployObjects, newAddonWorkObjectMeta(b.processor.manifestWorkNamePrefix(addon.Namespace, addon.Name), addon.Name, addon.Namespace, addonWorkNamespace, owner), workbuilder.ExistingManifestWorksOption(existingWorks), diff --git a/pkg/addonmanager/manager.go b/pkg/addonmanager/manager.go index 57953b722..47db0f228 100644 --- a/pkg/addonmanager/manager.go +++ b/pkg/addonmanager/manager.go @@ -47,12 +47,12 @@ type AddonManager interface { Start(ctx context.Context) error // StartWithInformers starts all registered addon agent with the given informers. - StartWithInformers(ctx context.Context, - kubeInformers kubeinformers.SharedInformerFactory, - workInformers workv1informers.SharedInformerFactory, - addonInformers addoninformers.SharedInformerFactory, - clusterInformers clusterv1informers.SharedInformerFactory, - dynamicInformers dynamicinformer.DynamicSharedInformerFactory) error + // StartWithInformers(ctx context.Context, + // kubeInformers kubeinformers.SharedInformerFactory, + // workInformers workv1informers.SharedInformerFactory, + // addonInformers addoninformers.SharedInformerFactory, + // clusterInformers clusterv1informers.SharedInformerFactory, + // dynamicInformers dynamicinformer.DynamicSharedInformerFactory) error } type addonManager struct { diff --git a/test/e2ecloudevents/e2e_suite_test.go b/test/e2ecloudevents/e2e_suite_test.go new file mode 100644 index 000000000..14766faf2 --- /dev/null +++ b/test/e2ecloudevents/e2e_suite_test.go @@ -0,0 +1,155 @@ +package e2ecloudevents + +import ( + "context" + "fmt" + "os" + "strings" + "testing" + "time" + + ginkgo "github.com/onsi/ginkgo" + gomega "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/util/retry" + + certificatesv1 "k8s.io/api/certificates/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + addonclient "open-cluster-management.io/api/client/addon/clientset/versioned" + clusterclient "open-cluster-management.io/api/client/cluster/clientset/versioned" + clusterv1 "open-cluster-management.io/api/cluster/v1" +) + +func TestE2E(t *testing.T) { + gomega.RegisterFailHandler(ginkgo.Fail) + ginkgo.RunSpecs(t, "E2E suite") +} + +var ( + managedClusterName string + hubKubeClient kubernetes.Interface + hubAddOnClient addonclient.Interface + hubClusterClient clusterclient.Interface + clusterCfg *rest.Config +) + +// This suite is sensitive to the following environment variables: +// +// - MANAGED_CLUSTER_NAME sets the name of the cluster +// - KUBECONFIG is the location of the kubeconfig file to use +var _ = ginkgo.BeforeSuite(func() { + kubeconfig := os.Getenv("KUBECONFIG") + managedClusterName = os.Getenv("MANAGED_CLUSTER_NAME") + if managedClusterName == "" { + managedClusterName = "cluster1" + } + err := func() error { + var err error + clusterCfg, err = clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + return err + } + + hubKubeClient, err = kubernetes.NewForConfig(clusterCfg) + if err != nil { + return err + } + + hubAddOnClient, err = addonclient.NewForConfig(clusterCfg) + if err != nil { + return err + } + + hubClusterClient, err = clusterclient.NewForConfig(clusterCfg) + + return err + }() + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + var csrs *certificatesv1.CertificateSigningRequestList + // Waiting for the CSR for ManagedCluster to exist + err = wait.Poll(1*time.Second, 120*time.Second, func() (bool, error) { + var err error + csrs, err = hubKubeClient.CertificatesV1().CertificateSigningRequests().List(context.TODO(), metav1.ListOptions{ + LabelSelector: fmt.Sprintf("open-cluster-management.io/cluster-name = %v", managedClusterName), + }) + if err != nil { + return false, err + } + + if len(csrs.Items) >= 1 { + return true, nil + } + + return false, nil + }) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + // Approving all pending CSRs + for i := range csrs.Items { + csr := &csrs.Items[i] + if !strings.HasPrefix(csr.Name, managedClusterName) { + continue + } + + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + csr, err = hubKubeClient.CertificatesV1().CertificateSigningRequests().Get(context.TODO(), csr.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + // make the e2e test idempotent to ease debugging in local environment + for _, condition := range csr.Status.Conditions { + if condition.Type == certificatesv1.CertificateApproved { + if condition.Status == corev1.ConditionTrue { + return nil + } + } + } + + csr.Status.Conditions = append(csr.Status.Conditions, certificatesv1.CertificateSigningRequestCondition{ + Type: certificatesv1.CertificateApproved, + Status: corev1.ConditionTrue, + Reason: "Approved by E2E", + Message: "Approved as part of Loopback e2e", + }) + _, err := hubKubeClient.CertificatesV1().CertificateSigningRequests().UpdateApproval(context.TODO(), csr.Name, csr, metav1.UpdateOptions{}) + return err + }) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + } + + var managedCluster *clusterv1.ManagedCluster + // Waiting for ManagedCluster to exist + err = wait.Poll(1*time.Second, 120*time.Second, func() (bool, error) { + var err error + managedCluster, err = hubClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), managedClusterName, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return false, nil + } + if err != nil { + return false, err + } + return true, nil + }) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + // Accepting ManagedCluster + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + var err error + managedCluster, err = hubClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), managedCluster.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + managedCluster.Spec.HubAcceptsClient = true + managedCluster.Spec.LeaseDurationSeconds = 5 + _, err = hubClusterClient.ClusterV1().ManagedClusters().Update(context.TODO(), managedCluster, metav1.UpdateOptions{}) + return err + }) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) +}) diff --git a/test/e2ecloudevents/helloworld_cloudevents_test.go b/test/e2ecloudevents/helloworld_cloudevents_test.go new file mode 100644 index 000000000..fa9090536 --- /dev/null +++ b/test/e2ecloudevents/helloworld_cloudevents_test.go @@ -0,0 +1,536 @@ +package e2ecloudevents + +import ( + "context" + "fmt" + + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/rand" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" +) + +const ( + eventuallyTimeout = 300 // seconds + eventuallyInterval = 1 // seconds +) + +const ( + addonName = "helloworldcloudevents" + addonInstallNamespace = "open-cluster-management-agent-addon" + deployConfigName = "deploy-config" + deployImageOverrideConfigName = "image-override-deploy-config" + deployProxyConfigName = "proxy-deploy-config" + deployAgentInstallNamespaceConfigName = "agent-install-namespace-deploy-config" +) + +var ( + nodeSelector = map[string]string{"kubernetes.io/os": "linux"} + tolerations = []corev1.Toleration{{Key: "foo", Operator: corev1.TolerationOpExists, Effect: corev1.TaintEffectNoExecute}} + registries = []addonapiv1alpha1.ImageMirror{ + { + Source: "quay.io/open-cluster-management/addon-examples", + Mirror: "quay.io/ocm/addon-examples", + }, + } + + proxyConfig = addonapiv1alpha1.ProxyConfig{ + // settings of http proxy will not impact the communicaiton between + // hub kube-apiserver and the add-on agent running on managed cluster. + HTTPProxy: "http://127.0.0.1:3128", + NoProxy: "example.com", + } + + agentInstallNamespaceConfig = "test-ns" +) + +var _ = ginkgo.Describe("install/uninstall helloworldcloudevents addons", func() { + ginkgo.BeforeEach(func() { + gomega.Eventually(func() error { + _, err := hubClusterClient.ClusterV1().ManagedClusters().Get(context.Background(), managedClusterName, metav1.GetOptions{}) + if err != nil { + return err + } + + _, err = hubKubeClient.CoreV1().Namespaces().Get(context.Background(), managedClusterName, metav1.GetOptions{}) + if err != nil { + return err + } + + testNs := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: agentInstallNamespaceConfig, + }, + } + _, err = hubKubeClient.CoreV1().Namespaces().Create(context.Background(), testNs, metav1.CreateOptions{}) + if err != nil { + if errors.IsAlreadyExists(err) { + return nil + } + return err + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) + + ginkgo.AfterEach(func() { + ginkgo.By("Clean up the customized agent install namespace after each case.") + gomega.Eventually(func() error { + _, err := hubKubeClient.CoreV1().Namespaces().Get(context.Background(), + agentInstallNamespaceConfig, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return nil + } + + if err == nil { + errd := hubKubeClient.CoreV1().Namespaces().Delete(context.Background(), + agentInstallNamespaceConfig, metav1.DeleteOptions{}) + if errd != nil { + return errd + } + return fmt.Errorf("ns is deleting, need re-check if namespace is not found") + } + + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) + + ginkgo.It("addon should be worked", func() { + ginkgo.By("Make sure cma annotation managed by addon-manager is added") + gomega.Eventually(func() error { + cma, err := hubAddOnClient.AddonV1alpha1().ClusterManagementAddOns().Get(context.Background(), addonName, metav1.GetOptions{}) + if err != nil { + return err + } + + if cma.Annotations[addonapiv1alpha1.AddonLifecycleAnnotationKey] != addonapiv1alpha1.AddonLifecycleAddonManagerAnnotationValue { + return fmt.Errorf("addon should have annotation, but get %v", cma.Annotations) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + ginkgo.By("Make sure addon is available") + gomega.Eventually(func() error { + addon, err := hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), addonName, metav1.GetOptions{}) + if err != nil { + return err + } + + if !meta.IsStatusConditionTrue(addon.Status.Conditions, "ManifestApplied") { + return fmt.Errorf("addon should be applied to spoke, but get condition %v", addon.Status.Conditions) + } + + if !meta.IsStatusConditionTrue(addon.Status.Conditions, "Available") { + return fmt.Errorf("addon should be available on spoke, but get condition %v", addon.Status.Conditions) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + ginkgo.By("Make sure addon is functioning") + configmap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("config-%s", rand.String(6)), + Namespace: managedClusterName, + }, + Data: map[string]string{ + "key1": rand.String(6), + "key2": rand.String(6), + }, + } + + _, err := hubKubeClient.CoreV1().ConfigMaps(managedClusterName).Create(context.Background(), configmap, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + copyiedConfig, err := hubKubeClient.CoreV1().ConfigMaps(addonInstallNamespace).Get(context.Background(), configmap.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + if !apiequality.Semantic.DeepEqual(copyiedConfig.Data, configmap.Data) { + return fmt.Errorf("expected configmap is not correct, %v", copyiedConfig.Data) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + ginkgo.By("Prepare a AddOnDeploymentConfig for addon nodeSelector and tolerations") + gomega.Eventually(func() error { + return prepareAddOnDeploymentConfig(managedClusterName) + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + ginkgo.By("Add the configs to ManagedClusterAddOn") + gomega.Eventually(func() error { + addon, err := hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), addonName, metav1.GetOptions{}) + if err != nil { + return err + } + newAddon := addon.DeepCopy() + newAddon.Spec.Configs = []addonapiv1alpha1.AddOnConfig{ + { + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: "addon.open-cluster-management.io", + Resource: "addondeploymentconfigs", + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: managedClusterName, + Name: deployConfigName, + }, + }, + } + _, err = hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Update(context.Background(), newAddon, metav1.UpdateOptions{}) + if err != nil { + return err + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + ginkgo.By("Make sure addon is configured") + gomega.Eventually(func() error { + agentDeploy, err := hubKubeClient.AppsV1().Deployments(addonInstallNamespace).Get(context.Background(), "helloworldcloudevents-agent", metav1.GetOptions{}) + if err != nil { + return err + } + + if !equality.Semantic.DeepEqual(agentDeploy.Spec.Template.Spec.NodeSelector, nodeSelector) { + return fmt.Errorf("unexpected nodeSeletcor %v", agentDeploy.Spec.Template.Spec.NodeSelector) + } + + if !equality.Semantic.DeepEqual(agentDeploy.Spec.Template.Spec.Tolerations, tolerations) { + return fmt.Errorf("unexpected tolerations %v", agentDeploy.Spec.Template.Spec.Tolerations) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + ginkgo.By("Remove addon") + err = hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Delete(context.Background(), addonName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + + ginkgo.It("addon should be worked with proxy settings", func() { + ginkgo.By("Make sure addon is available") + gomega.Eventually(func() error { + addon, err := hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), addonName, metav1.GetOptions{}) + if err != nil { + return err + } + + if !meta.IsStatusConditionTrue(addon.Status.Conditions, "ManifestApplied") { + return fmt.Errorf("addon should be applied to spoke, but get condition %v", addon.Status.Conditions) + } + + if !meta.IsStatusConditionTrue(addon.Status.Conditions, "Available") { + return fmt.Errorf("addon should be available on spoke, but get condition %v", addon.Status.Conditions) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + ginkgo.By("Make sure addon is functioning") + configmap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("config-%s", rand.String(6)), + Namespace: managedClusterName, + }, + Data: map[string]string{ + "key1": rand.String(6), + "key2": rand.String(6), + }, + } + + _, err := hubKubeClient.CoreV1().ConfigMaps(managedClusterName).Create(context.Background(), configmap, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + copyiedConfig, err := hubKubeClient.CoreV1().ConfigMaps(addonInstallNamespace).Get(context.Background(), configmap.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + if !apiequality.Semantic.DeepEqual(copyiedConfig.Data, configmap.Data) { + return fmt.Errorf("expected configmap is not correct, %v", copyiedConfig.Data) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + ginkgo.By("Prepare a AddOnDeploymentConfig for proxy settings") + gomega.Eventually(func() error { + return prepareProxyConfigAddOnDeploymentConfig(managedClusterName) + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + ginkgo.By("Add the configs to ManagedClusterAddOn") + gomega.Eventually(func() error { + addon, err := hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), addonName, metav1.GetOptions{}) + if err != nil { + return err + } + newAddon := addon.DeepCopy() + newAddon.Spec.Configs = []addonapiv1alpha1.AddOnConfig{ + { + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: "addon.open-cluster-management.io", + Resource: "addondeploymentconfigs", + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: managedClusterName, + Name: deployProxyConfigName, + }, + }, + } + _, err = hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Update(context.Background(), newAddon, metav1.UpdateOptions{}) + if err != nil { + return err + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + ginkgo.By("Make sure addon is configured") + gomega.Eventually(func() error { + agentDeploy, err := hubKubeClient.AppsV1().Deployments(addonInstallNamespace).Get(context.Background(), "helloworldcloudevents-agent", metav1.GetOptions{}) + if err != nil { + return err + } + + containers := agentDeploy.Spec.Template.Spec.Containers + if len(containers) != 1 { + return fmt.Errorf("expect one container, but %v", containers) + } + + // check the proxy settings + deployProxyConfig := addonapiv1alpha1.ProxyConfig{} + for _, envVar := range containers[0].Env { + if envVar.Name == "HTTP_PROXY" { + deployProxyConfig.HTTPProxy = envVar.Value + } + + if envVar.Name == "HTTPS_PROXY" { + deployProxyConfig.HTTPSProxy = envVar.Value + } + + if envVar.Name == "NO_PROXY" { + deployProxyConfig.NoProxy = envVar.Value + } + } + + if !equality.Semantic.DeepEqual(proxyConfig, deployProxyConfig) { + return fmt.Errorf("expected proxy settings %v, but got %v", proxyConfig, deployProxyConfig) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + ginkgo.By("Remove addon") + err = hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Delete(context.Background(), addonName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + + ginkgo.It("addon registraion agent install namespace should work", func() { + ginkgo.By("Make sure addon is available") + gomega.Eventually(func() error { + addon, err := hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), addonName, metav1.GetOptions{}) + if err != nil { + return err + } + + if !meta.IsStatusConditionTrue(addon.Status.Conditions, "ManifestApplied") { + return fmt.Errorf("addon should be applied to spoke, but get condition %v", addon.Status.Conditions) + } + + if !meta.IsStatusConditionTrue(addon.Status.Conditions, "Available") { + return fmt.Errorf("addon should be available on spoke, but get condition %v", addon.Status.Conditions) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + ginkgo.By("Make sure addon is functioning") + configmap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("config-%s", rand.String(6)), + Namespace: managedClusterName, + }, + Data: map[string]string{ + "key1": rand.String(6), + "key2": rand.String(6), + }, + } + + _, err := hubKubeClient.CoreV1().ConfigMaps(managedClusterName).Create(context.Background(), configmap, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + copyiedConfig, err := hubKubeClient.CoreV1().ConfigMaps(addonInstallNamespace).Get(context.Background(), configmap.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + if !apiequality.Semantic.DeepEqual(copyiedConfig.Data, configmap.Data) { + return fmt.Errorf("expected configmap is not correct, %v", copyiedConfig.Data) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + ginkgo.By("Prepare a AddOnDeploymentConfig for addon agent install namespace") + gomega.Eventually(func() error { + return prepareAgentInstallNamespaceAddOnDeploymentConfig(managedClusterName) + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + ginkgo.By("Add the configs to ManagedClusterAddOn") + gomega.Eventually(func() error { + addon, err := hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get( + context.Background(), addonName, metav1.GetOptions{}) + if err != nil { + return err + } + newAddon := addon.DeepCopy() + newAddon.Spec.Configs = []addonapiv1alpha1.AddOnConfig{ + { + ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ + Group: "addon.open-cluster-management.io", + Resource: "addondeploymentconfigs", + }, + ConfigReferent: addonapiv1alpha1.ConfigReferent{ + Namespace: managedClusterName, + Name: deployAgentInstallNamespaceConfigName, + }, + }, + } + _, err = hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Update( + context.Background(), newAddon, metav1.UpdateOptions{}) + if err != nil { + return err + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + ginkgo.By("Make sure addon is configured") + gomega.Eventually(func() error { + _, err := hubKubeClient.CoreV1().Secrets(agentInstallNamespaceConfig).Get( + context.Background(), "helloworldcloudevents-hub-kubeconfig", metav1.GetOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + ginkgo.By("Remove addon") + err = hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Delete(context.Background(), addonName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) +}) + +func prepareAddOnDeploymentConfig(namespace string) error { + _, err := hubAddOnClient.AddonV1alpha1().AddOnDeploymentConfigs(namespace).Get(context.Background(), deployConfigName, metav1.GetOptions{}) + if errors.IsNotFound(err) { + if _, err := hubAddOnClient.AddonV1alpha1().AddOnDeploymentConfigs(managedClusterName).Create( + context.Background(), + &addonapiv1alpha1.AddOnDeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: deployConfigName, + Namespace: namespace, + }, + Spec: addonapiv1alpha1.AddOnDeploymentConfigSpec{ + NodePlacement: &addonapiv1alpha1.NodePlacement{ + NodeSelector: nodeSelector, + Tolerations: tolerations, + }, + AgentInstallNamespace: addonInstallNamespace, + }, + }, + metav1.CreateOptions{}, + ); err != nil { + return err + } + + return nil + } + + return err +} + +func prepareImageOverrideAddOnDeploymentConfig(namespace string) error { + _, err := hubAddOnClient.AddonV1alpha1().AddOnDeploymentConfigs(namespace).Get( + context.Background(), deployImageOverrideConfigName, metav1.GetOptions{}) + if errors.IsNotFound(err) { + if _, err := hubAddOnClient.AddonV1alpha1().AddOnDeploymentConfigs(managedClusterName).Create( + context.Background(), + &addonapiv1alpha1.AddOnDeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: deployImageOverrideConfigName, + Namespace: namespace, + }, + Spec: addonapiv1alpha1.AddOnDeploymentConfigSpec{ + Registries: registries, + AgentInstallNamespace: addonInstallNamespace, + }, + }, + metav1.CreateOptions{}, + ); err != nil { + return err + } + + return nil + } + + return err +} + +func prepareProxyConfigAddOnDeploymentConfig(namespace string) error { + _, err := hubAddOnClient.AddonV1alpha1().AddOnDeploymentConfigs(namespace).Get(context.Background(), deployProxyConfigName, metav1.GetOptions{}) + if errors.IsNotFound(err) { + if _, err := hubAddOnClient.AddonV1alpha1().AddOnDeploymentConfigs(managedClusterName).Create( + context.Background(), + &addonapiv1alpha1.AddOnDeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: deployProxyConfigName, + Namespace: namespace, + }, + Spec: addonapiv1alpha1.AddOnDeploymentConfigSpec{ + ProxyConfig: proxyConfig, + AgentInstallNamespace: addonInstallNamespace, + }, + }, + metav1.CreateOptions{}, + ); err != nil { + return err + } + + return nil + } + + return err +} + +func prepareAgentInstallNamespaceAddOnDeploymentConfig(namespace string) error { + _, err := hubAddOnClient.AddonV1alpha1().AddOnDeploymentConfigs(namespace).Get( + context.Background(), deployAgentInstallNamespaceConfigName, metav1.GetOptions{}) + if errors.IsNotFound(err) { + if _, err := hubAddOnClient.AddonV1alpha1().AddOnDeploymentConfigs(managedClusterName).Create( + context.Background(), + &addonapiv1alpha1.AddOnDeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: deployAgentInstallNamespaceConfigName, + Namespace: namespace, + }, + Spec: addonapiv1alpha1.AddOnDeploymentConfigSpec{ + AgentInstallNamespace: agentInstallNamespaceConfig, + }, + }, + metav1.CreateOptions{}, + ); err != nil { + return err + } + + return nil + } + + return err +} diff --git a/test/integration-test.mk b/test/integration-test.mk index 96a771513..5042ba041 100644 --- a/test/integration-test.mk +++ b/test/integration-test.mk @@ -26,7 +26,15 @@ clean-integration-test: clean: clean-integration-test -test-integration: ensure-kubebuilder-tools - go test -c ./test/integration - ./integration.test -ginkgo.slowSpecThreshold=15 -ginkgo.v -ginkgo.failFast +test-kube-integration: ensure-kubebuilder-tools + go test -c ./test/integration/kube -o ./kube-integration.test + ./kube-integration.test -ginkgo.slowSpecThreshold=15 -ginkgo.v -ginkgo.failFast +.PHONY: test-kube-integration + +test-cloudevents-integration: ensure-kubebuilder-tools + go test -c ./test/integration/cloudevents -o ./cloudevents-integration.test + ./cloudevents-integration.test -ginkgo.slowSpecThreshold=15 -ginkgo.v -ginkgo.failFast +.PHONY: test-cloudevents-integration + +test-integration: test-kube-integration .PHONY: test-integration diff --git a/test/integration/cloudevents/agent_deploy_test.go b/test/integration/cloudevents/agent_deploy_test.go new file mode 100644 index 000000000..d55815336 --- /dev/null +++ b/test/integration/cloudevents/agent_deploy_test.go @@ -0,0 +1,559 @@ +package cloudevents + +import ( + "context" + "encoding/json" + "fmt" + "time" + + jsonpatch "github.com/evanphx/json-patch" + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/utils" + + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/rand" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" + workv1informers "open-cluster-management.io/api/client/work/informers/externalversions/work/v1" + workv1listers "open-cluster-management.io/api/client/work/listers/work/v1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + workv1 "open-cluster-management.io/api/work/v1" +) + +const ( + deploymentJson = `{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": "nginx-deployment", + "namespace": "default" + }, + "spec": { + "replicas": 1, + "selector": { + "matchLabels": { + "app": "nginx" + } + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "app": "nginx" + } + }, + "spec": { + "containers": [ + { + "image": "nginx:1.14.2", + "name": "nginx", + "ports": [ + { + "containerPort": 80, + "protocol": "TCP" + } + ] + } + ] + } + } + } + }` + + deploymentZeroReplicasJson = `{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": "nginx-deployment-no-replica", + "namespace": "default" + }, + "spec": { + "replicas": 0, + "selector": { + "matchLabels": { + "app": "nginx" + } + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "app": "nginx" + } + }, + "spec": { + "containers": [ + { + "image": "nginx:1.14.2", + "name": "nginx", + "ports": [ + { + "containerPort": 80, + "protocol": "TCP" + } + ] + } + ] + } + } + } + }` +) + +var _ = ginkgo.Describe("Agent deploy", func() { + var ctx context.Context + var cancel context.CancelFunc + var err error + var managedClusterName string + var cma *addonapiv1alpha1.ClusterManagementAddOn + // var agentWorkClient workclientset.Interface + var agentWorkClient workv1client.ManifestWorkInterface + var agentWorkInformer workv1informers.ManifestWorkInformer + var agentWorkLister workv1listers.ManifestWorkNamespaceLister + + ginkgo.BeforeEach(func() { + ctx, cancel = context.WithCancel(context.Background()) + suffix := rand.String(5) + managedClusterName = fmt.Sprintf("managedcluster-%s", suffix) + + managedCluster := &clusterv1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: managedClusterName, + }, + Spec: clusterv1.ManagedClusterSpec{ + HubAcceptsClient: true, + }, + } + _, err = hubClusterClient.ClusterV1().ManagedClusters().Create(context.Background(), managedCluster, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: managedClusterName}} + _, err = hubKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + cma = newClusterManagementAddon(testAddonImpl.name) + cma, err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Create(context.Background(), + cma, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // start work agent for the managed cluster + ginkgo.By("Start agent for managed cluster") + clientHolder, err := startWorkAgent(ctx, managedClusterName) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + agentWorkInformer = clientHolder.ManifestWorkInformer() + agentWorkLister = agentWorkInformer.Lister().ManifestWorks(managedClusterName) + agentWorkClient = clientHolder.ManifestWorks(managedClusterName) + }) + + ginkgo.AfterEach(func() { + err = hubKubeClient.CoreV1().Namespaces().Delete(context.Background(), managedClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = hubClusterClient.ClusterV1().ManagedClusters().Delete(context.Background(), managedClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Delete(context.Background(), + testAddonImpl.name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // cancel the context to stop the work agent + cancel() + }) + + ginkgo.It("Should deploy agent when cma is managed by self successfully", func() { + obj := &unstructured.Unstructured{} + err := obj.UnmarshalJSON([]byte(deploymentJson)) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + testAddonImpl.manifests[managedClusterName] = []runtime.Object{obj} + testAddonImpl.prober = &agent.HealthProber{ + Type: agent.HealthProberTypeWork, + } + + // Create ManagedClusterAddOn + addon := &addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: testAddonImpl.name, + }, + Spec: addonapiv1alpha1.ManagedClusterAddOnSpec{ + InstallNamespace: "default", + }, + } + createManagedClusterAddOnwithOwnerRefs(managedClusterName, addon, cma) + + // Check the work is created + var work *workv1.ManifestWork + gomega.Eventually(func() error { + works, err := agentWorkLister.List(labels.Everything()) + if err != nil { + return fmt.Errorf("failed to list works: %v", err) + } + + if len(works) != 1 { + return fmt.Errorf("Unexpected number of work manifests") + } + + work = works[0] + if len(work.Spec.Workload.Manifests) != 1 { + return fmt.Errorf("Unexpected number of work manifests") + } + + if apiequality.Semantic.DeepEqual(work.Spec.Workload.Manifests[0].Raw, []byte(deploymentJson)) { + return fmt.Errorf("expected manifest is no correct, get %v", work.Spec.Workload.Manifests[0].Raw) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Update work status to trigger addon status + newWork := work.DeepCopy() + meta.SetStatusCondition(&newWork.Status.Conditions, metav1.Condition{Type: workv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied", ObservedGeneration: work.Generation}) + workBytes, err := json.Marshal(work) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + newWorkBytes, err := json.Marshal(newWork) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + patchBytes, err := jsonpatch.CreateMergePatch(workBytes, newWorkBytes) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = agentWorkClient.Patch(context.Background(), work.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // wait for addon to be applied + gomega.Eventually(func() error { + addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), testAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnManifestApplied) { + return fmt.Errorf("Unexpected addon applied condition, %v", addon.Status.Conditions) + } + if cond := meta.FindStatusCondition(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing); cond != nil { + return fmt.Errorf("expected no addon progressing condition, %v", addon.Status.Conditions) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // update work to available so addon becomes available + work, err = agentWorkClient.Get(context.Background(), work.Name, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + newWork = work.DeepCopy() + meta.SetStatusCondition(&newWork.Status.Conditions, metav1.Condition{Type: workv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable", ObservedGeneration: work.Generation}) + + workBytes, err = json.Marshal(work) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + newWorkBytes, err = json.Marshal(newWork) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + patchBytes, err = jsonpatch.CreateMergePatch(workBytes, newWorkBytes) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = agentWorkClient.Patch(context.Background(), work.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // wait for addon to be available + gomega.Eventually(func() error { + addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), testAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionAvailable) { + return fmt.Errorf("Unexpected addon available condition, %v", addon.Status.Conditions) + } + if cond := meta.FindStatusCondition(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing); cond != nil { + return fmt.Errorf("expected no addon progressing condition, %v", addon.Status.Conditions) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // do nothing if cluster is deleting and addon is not deleted + cluster, err := hubClusterClient.ClusterV1().ManagedClusters().Get(context.Background(), managedClusterName, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + cluster.SetFinalizers([]string{"cluster.open-cluster-management.io/api-resource-cleanup"}) + _, err = hubClusterClient.ClusterV1().ManagedClusters().Update(context.Background(), cluster, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = hubClusterClient.ClusterV1().ManagedClusters().Delete(context.Background(), managedClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + time.Sleep(5 * time.Second) // wait 5 seconds to sync + gomega.Eventually(func() error { + _, err = agentWorkClient.Get(context.Background(), work.Name, metav1.GetOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) + + ginkgo.It("Should deploy agent and get available with prober func", func() { + obj := &unstructured.Unstructured{} + err := obj.UnmarshalJSON([]byte(deploymentJson)) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + testAddonImpl.manifests[managedClusterName] = []runtime.Object{obj} + testAddonImpl.prober = utils.NewDeploymentProber(types.NamespacedName{Name: "nginx-deployment", Namespace: "default"}) + + addon := &addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: testAddonImpl.name, + }, + Spec: addonapiv1alpha1.ManagedClusterAddOnSpec{ + InstallNamespace: "default", + }, + } + createManagedClusterAddOnwithOwnerRefs(managedClusterName, addon, cma) + + // Check the work is created + var work *workv1.ManifestWork + gomega.Eventually(func() error { + works, err := agentWorkLister.List(labels.Everything()) + if err != nil { + return fmt.Errorf("failed to list works: %v", err) + } + + if len(works) != 1 { + return fmt.Errorf("Unexpected number of work manifests") + } + + work = works[0] + if len(work.Spec.Workload.Manifests) != 1 { + return fmt.Errorf("Unexpected number of work manifests") + } + + if len(work.Spec.ManifestConfigs) != 1 { + return fmt.Errorf("Unexpected number of work manifests configuration") + } + + if apiequality.Semantic.DeepEqual(work.Spec.Workload.Manifests[0].Raw, []byte(deploymentJson)) { + return fmt.Errorf("expected manifest is no correct, get %v", work.Spec.Workload.Manifests[0].Raw) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Update work status to trigger addon status + newWork := work.DeepCopy() + meta.SetStatusCondition(&newWork.Status.Conditions, metav1.Condition{Type: workv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied"}) + meta.SetStatusCondition(&newWork.Status.Conditions, metav1.Condition{Type: workv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable"}) + + replica := int64(1) + newWork.Status.ResourceStatus = workv1.ManifestResourceStatus{ + Manifests: []workv1.ManifestCondition{ + { + ResourceMeta: workv1.ManifestResourceMeta{ + Ordinal: 0, + Group: "apps", + Resource: "deployments", + Name: "nginx-deployment", + Namespace: "default", + }, + StatusFeedbacks: workv1.StatusFeedbackResult{ + Values: []workv1.FeedbackValue{ + { + Name: "Replicas", + Value: workv1.FieldValue{ + Type: workv1.Integer, + Integer: &replica, + }, + }, + { + Name: "ReadyReplicas", + Value: workv1.FieldValue{ + Type: workv1.Integer, + Integer: &replica, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: "Available", + Status: metav1.ConditionTrue, + Reason: "MinimumReplicasAvailable", + Message: "Deployment has minimum availability.", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + }, + }, + }, + } + + workBytes, err := json.Marshal(work) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + newWorkBytes, err := json.Marshal(newWork) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + patchBytes, err := jsonpatch.CreateMergePatch(workBytes, newWorkBytes) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = agentWorkClient.Patch(context.Background(), work.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // wait for addon to be available + gomega.Eventually(func() error { + addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), testAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnManifestApplied) { + return fmt.Errorf("Unexpected addon applied condition, %v", addon.Status.Conditions) + } + if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionAvailable) { + return fmt.Errorf("Unexpected addon available condition, %v", addon.Status.Conditions) + } + if cond := meta.FindStatusCondition(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing); cond != nil { + return fmt.Errorf("expected no addon progressing condition, %v", addon.Status.Conditions) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) + + ginkgo.It("Should deploy agent and get available with deployment availability prober func", func() { + obj := &unstructured.Unstructured{} + err := obj.UnmarshalJSON([]byte(deploymentJson)) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + objZeroReplicaDeployment := &unstructured.Unstructured{} + err = objZeroReplicaDeployment.UnmarshalJSON([]byte(deploymentZeroReplicasJson)) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + testAddonImpl.manifests[managedClusterName] = []runtime.Object{obj, objZeroReplicaDeployment} + testAddonImpl.prober = &agent.HealthProber{ + Type: agent.HealthProberTypeDeploymentAvailability, + } + + addon := &addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: testAddonImpl.name, + }, + Spec: addonapiv1alpha1.ManagedClusterAddOnSpec{ + InstallNamespace: "default", + }, + } + createManagedClusterAddOnwithOwnerRefs(managedClusterName, addon, cma) + + var work *workv1.ManifestWork + gomega.Eventually(func() error { + works, err := agentWorkLister.List(labels.Everything()) + if err != nil { + return fmt.Errorf("failed to list works: %v", err) + } + + if len(works) != 1 { + return fmt.Errorf("Unexpected number of work manifests") + } + + work = works[0] + if len(work.Spec.Workload.Manifests) != 2 { + return fmt.Errorf("Unexpected number of work manifests: %d", len(work.Spec.Workload.Manifests)) + } + + if len(work.Spec.ManifestConfigs) != 2 { + return fmt.Errorf("Unexpected number of work manifests configuration: %d", len(work.Spec.ManifestConfigs)) + } + + if apiequality.Semantic.DeepEqual(work.Spec.Workload.Manifests[0].Raw, []byte(deploymentJson)) { + return fmt.Errorf("expected manifest is no correct, get %v", work.Spec.Workload.Manifests[0].Raw) + } + if apiequality.Semantic.DeepEqual(work.Spec.Workload.Manifests[1].Raw, []byte(deploymentZeroReplicasJson)) { + return fmt.Errorf("expected manifest is no correct, get %v", work.Spec.Workload.Manifests[1].Raw) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Update work status to trigger addon status + newWork := work.DeepCopy() + meta.SetStatusCondition(&newWork.Status.Conditions, metav1.Condition{Type: workv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied"}) + meta.SetStatusCondition(&newWork.Status.Conditions, metav1.Condition{Type: workv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable"}) + + replica := int64(1) + newWork.Status.ResourceStatus = workv1.ManifestResourceStatus{ + Manifests: []workv1.ManifestCondition{ + { + ResourceMeta: workv1.ManifestResourceMeta{ + Ordinal: 0, + Group: "apps", + Resource: "deployments", + Name: "nginx-deployment", + Namespace: "default", + }, + StatusFeedbacks: workv1.StatusFeedbackResult{ + Values: []workv1.FeedbackValue{ + { + Name: "Replicas", + Value: workv1.FieldValue{ + Type: workv1.Integer, + Integer: &replica, + }, + }, + { + Name: "ReadyReplicas", + Value: workv1.FieldValue{ + Type: workv1.Integer, + Integer: &replica, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: "Available", + Status: metav1.ConditionTrue, + Reason: "MinimumReplicasAvailable", + Message: "Deployment has minimum availability.", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + }, + }, + }, + } + + workBytes, err := json.Marshal(work) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + newWorkBytes, err := json.Marshal(newWork) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + patchBytes, err := jsonpatch.CreateMergePatch(workBytes, newWorkBytes) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = agentWorkClient.Patch(context.Background(), work.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // wait for addon to be available + gomega.Eventually(func() error { + addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), testAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnManifestApplied) { + return fmt.Errorf("Unexpected addon applied condition, %v", addon.Status.Conditions) + } + if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionAvailable) { + return fmt.Errorf("Unexpected addon available condition, %v", addon.Status.Conditions) + } + if cond := meta.FindStatusCondition(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing); cond != nil { + return fmt.Errorf("expected no addon progressing condition, %v", addon.Status.Conditions) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) +}) + +// The addon owner controller exist in general addon manager. +// This is for integration testing to assume that addon manager has already added the OwnerReferences. +func createManagedClusterAddOnwithOwnerRefs(namespace string, addon *addonapiv1alpha1.ManagedClusterAddOn, cma *addonapiv1alpha1.ClusterManagementAddOn) { + addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(namespace).Create(context.Background(), addon, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + addonCopy := addon.DeepCopy() + + // This is to assume that addon-manager has already added the OwnerReferences. + owner := metav1.NewControllerRef(cma, addonapiv1alpha1.GroupVersion.WithKind("ClusterManagementAddOn")) + modified := utils.MergeOwnerRefs(&addonCopy.OwnerReferences, *owner, false) + if modified { + _, err = hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(addonCopy.Namespace).Update(context.Background(), addonCopy, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + } +} diff --git a/test/integration/cloudevents/agent_hook_deploy_test.go b/test/integration/cloudevents/agent_hook_deploy_test.go new file mode 100644 index 000000000..6ff707e92 --- /dev/null +++ b/test/integration/cloudevents/agent_hook_deploy_test.go @@ -0,0 +1,333 @@ +package cloudevents + +import ( + "context" + "encoding/json" + "fmt" + "time" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/rand" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" + workv1informers "open-cluster-management.io/api/client/work/informers/externalversions/work/v1" + workv1listers "open-cluster-management.io/api/client/work/listers/work/v1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + workv1 "open-cluster-management.io/api/work/v1" +) + +const ( + hookJobJson = ` +{ + "apiVersion": "batch/v1", + "kind": "Job", + "metadata": { + "name": "test", + "namespace": "default", + "annotations": { + "addon.open-cluster-management.io/addon-pre-delete":"" + } + }, + "spec": { + "manualSelector": true, + "selector": { + "matchLabels": { + "job": "test" + } + }, + "template": { + "metadata": { + "labels": { + "job": "test" + }, + "name": "test" + }, + "spec": { + "restartPolicy": "Never", + "containers": [ + { + "args": [ + "/helloworld_helm", + "cleanup", + "--addon-namespace=default" + ], + "image": "quay.io/open-cluster-management/addon-examples:latest", + "imagePullPolicy": "Always", + "name": "helloworld-cleanup-agent" + } + ] + } + } + } +} +` +) + +var jobCompleteValue = "True" + +var _ = ginkgo.Describe("Agent hook deploy", func() { + var ctx context.Context + var cancel context.CancelFunc + var err error + var managedClusterName string + var cma *addonapiv1alpha1.ClusterManagementAddOn + // var agentWorkClient workclientset.Interface + var agentWorkClient workv1client.ManifestWorkInterface + var agentWorkInformer workv1informers.ManifestWorkInformer + var agentWorkLister workv1listers.ManifestWorkNamespaceLister + + ginkgo.BeforeEach(func() { + ctx, cancel = context.WithCancel(context.Background()) + suffix := rand.String(5) + managedClusterName = fmt.Sprintf("managedcluster-%s", suffix) + + managedCluster := &clusterv1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: managedClusterName, + }, + Spec: clusterv1.ManagedClusterSpec{ + HubAcceptsClient: true, + }, + } + _, err = hubClusterClient.ClusterV1().ManagedClusters().Create(context.Background(), managedCluster, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: managedClusterName}} + _, err = hubKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + cma = newClusterManagementAddon(testAddonImpl.name) + cma, err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Create(context.Background(), + cma, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // start work agent for the managed cluster + ginkgo.By("Start agent for managed cluster") + clientHolder, err := startWorkAgent(ctx, managedClusterName) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + agentWorkInformer = clientHolder.ManifestWorkInformer() + agentWorkLister = agentWorkInformer.Lister().ManifestWorks(managedClusterName) + agentWorkClient = clientHolder.ManifestWorks(managedClusterName) + }) + + ginkgo.AfterEach(func() { + err = hubKubeClient.CoreV1().Namespaces().Delete(context.Background(), managedClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = hubClusterClient.ClusterV1().ManagedClusters().Delete(context.Background(), managedClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Delete(context.Background(), + testAddonImpl.name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // cancel the context to stop the work agent + cancel() + }) + + ginkgo.It("Should install and uninstall agent successfully", func() { + deployObj := &unstructured.Unstructured{} + err := deployObj.UnmarshalJSON([]byte(deploymentJson)) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + hookObj := &unstructured.Unstructured{} + err = hookObj.UnmarshalJSON([]byte(hookJobJson)) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + testAddonImpl.manifests[managedClusterName] = []runtime.Object{deployObj, hookObj} + + addon := &addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: testAddonImpl.name, + }, + Spec: addonapiv1alpha1.ManagedClusterAddOnSpec{ + InstallNamespace: "default", + }, + } + createManagedClusterAddOnwithOwnerRefs(managedClusterName, addon, cma) + + // deploy manifest is deployed + var work *workv1.ManifestWork + gomega.Eventually(func() error { + works, err := agentWorkLister.List(labels.Everything()) + if err != nil { + return fmt.Errorf("failed to list works: %v", err) + } + + if len(works) != 1 { + return fmt.Errorf("Unexpected number of work manifests") + } + + work = works[0] + if len(work.Spec.Workload.Manifests) != 1 { + return fmt.Errorf("Unexpected number of work manifests") + } + + if apiequality.Semantic.DeepEqual(work.Spec.Workload.Manifests[0].Raw, []byte(deploymentJson)) { + return fmt.Errorf("expected manifest is no correct, get %v", work.Spec.Workload.Manifests[0].Raw) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // addon has a finalizer + gomega.Eventually(func() error { + addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), testAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + finalizers := addon.GetFinalizers() + for _, f := range finalizers { + if f == addonapiv1alpha1.AddonPreDeleteHookFinalizer { + return nil + } + } + return fmt.Errorf("these is no hook finalizer in addon") + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Update work status to trigger addon status + newWork := work.DeepCopy() + meta.SetStatusCondition(&newWork.Status.Conditions, metav1.Condition{Type: workv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied", ObservedGeneration: work.Generation}) + workBytes, err := json.Marshal(work) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + newWorkBytes, err := json.Marshal(newWork) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + patchBytes, err := jsonpatch.CreateMergePatch(workBytes, newWorkBytes) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = agentWorkClient.Patch(context.Background(), work.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // wait for addon to be applied + gomega.Eventually(func() error { + addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), testAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnManifestApplied) { + return fmt.Errorf("Unexpected addon applied condition, %v", addon.Status.Conditions) + } + if cond := meta.FindStatusCondition(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing); cond != nil { + return fmt.Errorf("expected no addon progressing condition, %v", addon.Status.Conditions) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // delete addon, hook manifestwork will be applied, addon is deleting and addon status will be updated. + err = hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Delete(context.Background(), testAddonImpl.name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // hook manifest is deployed + var hookWork *workv1.ManifestWork + gomega.Eventually(func() error { + works, err := agentWorkLister.List(labels.Everything()) + if err != nil { + return fmt.Errorf("failed to list works: %v", err) + } + + if len(works) != 2 { + return fmt.Errorf("Unexpected number of work manifests") + } + + for _, w := range works { + if w.Name != work.Name { + hookWork = w + break + } + } + + if len(hookWork.Spec.Workload.Manifests) != 1 { + return fmt.Errorf("Unexpected number of hookWork manifests") + } + + if apiequality.Semantic.DeepEqual(hookWork.Spec.Workload.Manifests[0].Raw, []byte(hookJobJson)) { + return fmt.Errorf("expected manifest is no correct, get %v", hookWork.Spec.Workload.Manifests[0].Raw) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), testAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + + if !meta.IsStatusConditionFalse(addon.Status.Conditions, "HookManifestCompleted") { + return fmt.Errorf("Unexpected addon applied condition, %v", addon.Status.Conditions) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // update hook manifest feedbackResult, addon will be deleted + newHookWork := hookWork.DeepCopy() + meta.SetStatusCondition(&newHookWork.Status.Conditions, metav1.Condition{Type: workv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied"}) + meta.SetStatusCondition(&newHookWork.Status.Conditions, metav1.Condition{Type: workv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "ResourceAvailable"}) + newHookWork.Status.ResourceStatus = workv1.ManifestResourceStatus{ + Manifests: []workv1.ManifestCondition{ + { + ResourceMeta: workv1.ManifestResourceMeta{ + Group: "batch", + Version: "v1", + Resource: "jobs", + Namespace: "default", + Name: "test", + }, + StatusFeedbacks: workv1.StatusFeedbackResult{ + Values: []workv1.FeedbackValue{ + { + Name: "JobComplete", + Value: workv1.FieldValue{ + Type: workv1.String, + String: &jobCompleteValue, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: "Available", + Status: metav1.ConditionTrue, + Reason: "MinimumReplicasAvailable", + Message: "Deployment has minimum availability.", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + }, + }, + }, + } + + hookWorkBytes, err := json.Marshal(hookWork) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + newHookWorkBytes, err := json.Marshal(newHookWork) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + hookWorkPatchBytes, err := jsonpatch.CreateMergePatch(hookWorkBytes, newHookWorkBytes) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = agentWorkClient.Patch(context.Background(), hookWork.Name, types.MergePatchType, hookWorkPatchBytes, metav1.PatchOptions{}, "status") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // wait for addon to be deleted + gomega.Eventually(func() error { + _, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), testAddonImpl.name, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return nil + } + return err + } + return fmt.Errorf("addon is expceted to be deleted") + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + }) + +}) diff --git a/test/integration/cloudevents/agent_hosting_deploy_test.go b/test/integration/cloudevents/agent_hosting_deploy_test.go new file mode 100644 index 000000000..baa490f3b --- /dev/null +++ b/test/integration/cloudevents/agent_hosting_deploy_test.go @@ -0,0 +1,421 @@ +package cloudevents + +import ( + "context" + "encoding/json" + "fmt" + "time" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/rand" + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/utils" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" + workv1informers "open-cluster-management.io/api/client/work/informers/externalversions/work/v1" + workv1listers "open-cluster-management.io/api/client/work/listers/work/v1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + workv1 "open-cluster-management.io/api/work/v1" +) + +const ( + deploymentHostingJson = `{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": "nginx-deployment", + "namespace": "default", + "annotations": { + "addon.open-cluster-management.io/hosted-manifest-location": "hosting" + } + }, + "spec": { + "replicas": 1, + "selector": { + "matchLabels": { + "app": "nginx" + } + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "app": "nginx" + } + }, + "spec": { + "containers": [ + { + "image": "nginx:1.14.2", + "name": "nginx", + "ports": [ + { + "containerPort": 80, + "protocol": "TCP" + } + ] + } + ] + } + } + } + }` +) + +var _ = ginkgo.Describe("Agent deploy", func() { + var ctx context.Context + var cancel context.CancelFunc + var err error + var managedClusterName, hostingClusterName string + var cma *addonapiv1alpha1.ClusterManagementAddOn + // var agentWorkClient workclientset.Interface + var agentWorkClient workv1client.ManifestWorkInterface + var agentWorkInformer workv1informers.ManifestWorkInformer + var agentWorkLister workv1listers.ManifestWorkNamespaceLister + + ginkgo.BeforeEach(func() { + ctx, cancel = context.WithCancel(context.Background()) + suffix := rand.String(5) + managedClusterName = fmt.Sprintf("managedcluster-%s", suffix) + hostingClusterName = fmt.Sprintf("hostingcluster-%s", suffix) + + managedCluster := &clusterv1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: managedClusterName, + }, + Spec: clusterv1.ManagedClusterSpec{ + HubAcceptsClient: true, + }, + } + hostingCluster := &clusterv1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: hostingClusterName, + }, + Spec: clusterv1.ManagedClusterSpec{ + HubAcceptsClient: true, + }, + } + _, err = hubClusterClient.ClusterV1().ManagedClusters().Create( + context.Background(), managedCluster, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: managedClusterName}} + _, err = hubKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = hubClusterClient.ClusterV1().ManagedClusters().Create( + context.Background(), hostingCluster, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + ns = &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: hostingClusterName}} + _, err = hubKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + cma = newClusterManagementAddon(testHostedAddonImpl.name) + cma, err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Create(context.Background(), + cma, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // start work agent for the hosting cluster + ginkgo.By("Start agent for hosting cluster") + clientHolder, err := startWorkAgent(ctx, hostingClusterName) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + agentWorkInformer = clientHolder.ManifestWorkInformer() + agentWorkLister = agentWorkInformer.Lister().ManifestWorks(hostingClusterName) + agentWorkClient = clientHolder.ManifestWorks(hostingClusterName) + }) + + ginkgo.AfterEach(func() { + err = hubKubeClient.CoreV1().Namespaces().Delete( + context.Background(), managedClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = hubClusterClient.ClusterV1().ManagedClusters().Delete( + context.Background(), managedClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + err = hubKubeClient.CoreV1().Namespaces().Delete( + context.Background(), hostingClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = hubClusterClient.ClusterV1().ManagedClusters().Delete( + context.Background(), hostingClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Delete(context.Background(), + testHostedAddonImpl.name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // cancel the context to stop the work agent + cancel() + }) + + ginkgo.It("Should deploy and delete agent on hosting cluster successfully", func() { + obj := &unstructured.Unstructured{} + err := obj.UnmarshalJSON([]byte(deploymentHostingJson)) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + testHostedAddonImpl.manifests[managedClusterName] = []runtime.Object{obj} + testHostedAddonImpl.prober = &agent.HealthProber{ + Type: agent.HealthProberTypeWork, + } + + addon := &addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: testHostedAddonImpl.name, + Annotations: map[string]string{ + addonapiv1alpha1.HostingClusterNameAnnotationKey: hostingClusterName, + }, + }, + Spec: addonapiv1alpha1.ManagedClusterAddOnSpec{ + InstallNamespace: "default", + }, + } + createManagedClusterAddOnwithOwnerRefs(managedClusterName, addon, cma) + + // check the work is created + var work *workv1.ManifestWork + gomega.Eventually(func() error { + works, err := agentWorkLister.List(labels.Everything()) + if err != nil { + return fmt.Errorf("failed to list works: %v", err) + } + + if len(works) != 1 { + return fmt.Errorf("Unexpected number of work manifests") + } + + work = works[0] + if len(work.Spec.Workload.Manifests) != 1 { + return fmt.Errorf("Unexpected number of work manifests") + } + + if apiequality.Semantic.DeepEqual(work.Spec.Workload.Manifests[0].Raw, []byte(deploymentHostingJson)) { + return fmt.Errorf("expected manifest is no correct, get %v", work.Spec.Workload.Manifests[0].Raw) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Update work status to trigger addon status + newWork := work.DeepCopy() + meta.SetStatusCondition(&newWork.Status.Conditions, metav1.Condition{Type: workv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied", ObservedGeneration: work.Generation}) + workBytes, err := json.Marshal(work) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + newWorkBytes, err := json.Marshal(newWork) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + patchBytes, err := jsonpatch.CreateMergePatch(workBytes, newWorkBytes) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = agentWorkClient.Patch(context.Background(), work.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get( + context.Background(), testHostedAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + + if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnHostingManifestApplied) { + return fmt.Errorf("Unexpected addon applied condition, %v", addon.Status.Conditions) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // update work to available so addon becomes available + work, err = agentWorkClient.Get(context.Background(), work.Name, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + newWork = work.DeepCopy() + meta.SetStatusCondition(&newWork.Status.Conditions, metav1.Condition{Type: workv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable", ObservedGeneration: work.Generation}) + + workBytes, err = json.Marshal(work) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + newWorkBytes, err = json.Marshal(newWork) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + patchBytes, err = jsonpatch.CreateMergePatch(workBytes, newWorkBytes) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = agentWorkClient.Patch(context.Background(), work.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + addon, err = hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get( + context.Background(), testHostedAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + + // HealthProberTypeWork for hosting manifestwork is not supported by now + // TODO: consider to support it + // if !meta.IsStatusConditionTrue(addon.Status.Conditions, "Available") { + // return fmt.Errorf("Unexpected addon available condition, %v", addon.Status.Conditions) + // } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // do nothing if cluster is deleting and addon is not deleted + cluster, err := hubClusterClient.ClusterV1().ManagedClusters().Get(context.Background(), managedClusterName, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + cluster.SetFinalizers([]string{"cluster.open-cluster-management.io/api-resource-cleanup"}) + _, err = hubClusterClient.ClusterV1().ManagedClusters().Update(context.Background(), cluster, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = hubClusterClient.ClusterV1().ManagedClusters().Delete(context.Background(), managedClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + time.Sleep(5 * time.Second) // wait 5 seconds to sync + gomega.Eventually(func() error { + _, err = agentWorkClient.Get(context.Background(), work.Name, metav1.GetOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // delete managedclusteraddon + // err = hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Delete( + // context.Background(), testHostedAddonImpl.name, metav1.DeleteOptions{}) + // gomega.Expect(err).ToNot(gomega.HaveOccurred()) + // gomega.Eventually(func() bool { + // _, err := agentWorkClient.Get(context.Background(), work.Name, metav1.GetOptions{}) + // return errors.IsNotFound(err) + // }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // _, err = hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get( + // context.Background(), testHostedAddonImpl.name, metav1.GetOptions{}) + // if err != nil { + // gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) + // } + }) + + ginkgo.It("Should deploy agent on hosting cluster and get available with prober func", func() { + obj := &unstructured.Unstructured{} + err := obj.UnmarshalJSON([]byte(deploymentHostingJson)) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + testHostedAddonImpl.manifests[managedClusterName] = []runtime.Object{obj} + testHostedAddonImpl.prober = utils.NewDeploymentProber( + types.NamespacedName{Name: "nginx-deployment", Namespace: "default"}) + + addon := &addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: testHostedAddonImpl.name, + Annotations: map[string]string{ + addonapiv1alpha1.HostingClusterNameAnnotationKey: hostingClusterName, + }, + }, + Spec: addonapiv1alpha1.ManagedClusterAddOnSpec{ + InstallNamespace: "default", + }, + } + createManagedClusterAddOnwithOwnerRefs(managedClusterName, addon, cma) + + // Check the work is created + var work *workv1.ManifestWork + gomega.Eventually(func() error { + works, err := agentWorkLister.List(labels.Everything()) + if err != nil { + return fmt.Errorf("failed to list works: %v", err) + } + + if len(works) != 1 { + return fmt.Errorf("Unexpected number of work manifests") + } + + work = works[0] + if len(work.Spec.Workload.Manifests) != 1 { + return fmt.Errorf("Unexpected number of work manifests") + } + + if len(work.Spec.ManifestConfigs) != 1 { + return fmt.Errorf("Unexpected number of work manifests configuration") + } + + if apiequality.Semantic.DeepEqual(work.Spec.Workload.Manifests[0].Raw, []byte(deploymentHostingJson)) { + return fmt.Errorf("expected manifest is no correct, get %v", work.Spec.Workload.Manifests[0].Raw) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Update work status to trigger addon status + newWork := work.DeepCopy() + meta.SetStatusCondition(&newWork.Status.Conditions, metav1.Condition{Type: workv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied"}) + meta.SetStatusCondition(&newWork.Status.Conditions, metav1.Condition{Type: workv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable"}) + + replica := int64(1) + newWork.Status.ResourceStatus = workv1.ManifestResourceStatus{ + Manifests: []workv1.ManifestCondition{ + { + ResourceMeta: workv1.ManifestResourceMeta{ + Ordinal: 0, + Group: "apps", + Resource: "deployments", + Name: "nginx-deployment", + Namespace: "default", + }, + StatusFeedbacks: workv1.StatusFeedbackResult{ + Values: []workv1.FeedbackValue{ + { + Name: "Replicas", + Value: workv1.FieldValue{ + Type: workv1.Integer, + Integer: &replica, + }, + }, + { + Name: "ReadyReplicas", + Value: workv1.FieldValue{ + Type: workv1.Integer, + Integer: &replica, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: "Available", + Status: metav1.ConditionTrue, + Reason: "MinimumReplicasAvailable", + Message: "Deployment has minimum availability.", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + }, + }, + }, + } + + workBytes, err := json.Marshal(work) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + newWorkBytes, err := json.Marshal(newWork) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + patchBytes, err := jsonpatch.CreateMergePatch(workBytes, newWorkBytes) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = agentWorkClient.Patch(context.Background(), work.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // wait for addon to be available + gomega.Eventually(func() error { + addon, err = hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get( + context.Background(), testHostedAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + // Health prober func for hosting cluster is not supported by now + // TODO: consider to support it + // if !meta.IsStatusConditionTrue(addon.Status.Conditions, "Available") { + // return fmt.Errorf("Unexpected addon available condition, %v", addon.Status.Conditions) + // } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) +}) diff --git a/test/integration/cloudevents/agent_hosting_hook_deploy_test.go b/test/integration/cloudevents/agent_hosting_hook_deploy_test.go new file mode 100644 index 000000000..db119a0d5 --- /dev/null +++ b/test/integration/cloudevents/agent_hosting_hook_deploy_test.go @@ -0,0 +1,454 @@ +package cloudevents + +import ( + "context" + "encoding/json" + "fmt" + "time" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/rand" + "open-cluster-management.io/addon-framework/pkg/agent" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" + workv1informers "open-cluster-management.io/api/client/work/informers/externalversions/work/v1" + workv1listers "open-cluster-management.io/api/client/work/listers/work/v1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + workv1 "open-cluster-management.io/api/work/v1" +) + +const ( + hookHostingJobJson = ` +{ + "apiVersion": "batch/v1", + "kind": "Job", + "metadata": { + "name": "test", + "namespace": "default", + "annotations": { + "addon.open-cluster-management.io/addon-pre-delete": "", + "addon.open-cluster-management.io/hosted-manifest-location": "hosting" + } + }, + "spec": { + "manualSelector": true, + "selector": { + "matchLabels": { + "job": "test" + } + }, + "template": { + "metadata": { + "labels": { + "job": "test" + }, + "name": "test" + }, + "spec": { + "restartPolicy": "Never", + "containers": [ + { + "args": [ + "/helloworld_hosted", + "cleanup", + "--addon-namespace=default" + ], + "image": "quay.io/open-cluster-management/helloworld-addon:latest", + "imagePullPolicy": "Always", + "name": "helloworld-cleanup-agent" + } + ] + } + } + } +} +` +) + +var _ = ginkgo.Describe("Agent hook deploy", func() { + var ctx context.Context + var cancel context.CancelFunc + var err error + var managedClusterName, hostingClusterName string + var hostingJobCompleteValue = "True" + var cma *addonapiv1alpha1.ClusterManagementAddOn + // var agentWorkClient workclientset.Interface + var agentWorkClient workv1client.ManifestWorkInterface + var agentWorkInformer workv1informers.ManifestWorkInformer + var agentWorkLister workv1listers.ManifestWorkNamespaceLister + + ginkgo.BeforeEach(func() { + ctx, cancel = context.WithCancel(context.Background()) + suffix := rand.String(5) + managedClusterName = fmt.Sprintf("managedcluster-%s", suffix) + hostingClusterName = fmt.Sprintf("hostingcluster-%s", suffix) + + managedCluster := &clusterv1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: managedClusterName, + }, + Spec: clusterv1.ManagedClusterSpec{ + HubAcceptsClient: true, + }, + } + hostingCluster := &clusterv1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: hostingClusterName, + }, + Spec: clusterv1.ManagedClusterSpec{ + HubAcceptsClient: true, + }, + } + _, err = hubClusterClient.ClusterV1().ManagedClusters().Create( + context.Background(), managedCluster, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: managedClusterName}} + _, err = hubKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = hubClusterClient.ClusterV1().ManagedClusters().Create( + context.Background(), hostingCluster, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + ns = &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: hostingClusterName}} + _, err = hubKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + cma = newClusterManagementAddon(testHostedAddonImpl.name) + cma, err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Create(context.Background(), + cma, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // start work agent for the hosting cluster + ginkgo.By("Start agent for hosting cluster") + clientHolder, err := startWorkAgent(ctx, hostingClusterName) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + agentWorkInformer = clientHolder.ManifestWorkInformer() + agentWorkLister = agentWorkInformer.Lister().ManifestWorks(hostingClusterName) + agentWorkClient = clientHolder.ManifestWorks(hostingClusterName) + }) + + ginkgo.AfterEach(func() { + err = hubKubeClient.CoreV1().Namespaces().Delete( + context.Background(), managedClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = hubClusterClient.ClusterV1().ManagedClusters().Delete( + context.Background(), managedClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + err = hubKubeClient.CoreV1().Namespaces().Delete( + context.Background(), hostingClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = hubClusterClient.ClusterV1().ManagedClusters().Delete( + context.Background(), hostingClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Delete(context.Background(), + testHostedAddonImpl.name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // cancel the context to stop the work agent + cancel() + }) + + ginkgo.It("Should install and uninstall agent successfully", func() { + deployObj := &unstructured.Unstructured{} + err := deployObj.UnmarshalJSON([]byte(deploymentHostingJson)) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + hookObj := &unstructured.Unstructured{} + err = hookObj.UnmarshalJSON([]byte(hookHostingJobJson)) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + testHostedAddonImpl.manifests[managedClusterName] = []runtime.Object{deployObj, hookObj} + testHostedAddonImpl.prober = &agent.HealthProber{ + Type: agent.HealthProberTypeWork, + } + + addon := &addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: testHostedAddonImpl.name, + Annotations: map[string]string{ + addonapiv1alpha1.HostingClusterNameAnnotationKey: hostingClusterName, + }, + // this finalizer is to prevent the addon from being deleted for test, it will be deleted at the end. + Finalizers: []string{"pending"}, + }, + Spec: addonapiv1alpha1.ManagedClusterAddOnSpec{ + InstallNamespace: "default", + }, + } + createManagedClusterAddOnwithOwnerRefs(managedClusterName, addon, cma) + + // deploy manifest is deployed + var work *workv1.ManifestWork + gomega.Eventually(func() error { + works, err := agentWorkLister.List(labels.Everything()) + if err != nil { + return fmt.Errorf("failed to list works: %v", err) + } + + if len(works) != 1 { + return fmt.Errorf("Unexpected number of work manifests") + } + + work = works[0] + if len(work.Spec.Workload.Manifests) != 1 { + return fmt.Errorf("Unexpected number of work manifests") + } + + if apiequality.Semantic.DeepEqual(work.Spec.Workload.Manifests[0].Raw, []byte(deploymentHostingJson)) { + return fmt.Errorf("expected manifest is no correct, get %v", work.Spec.Workload.Manifests[0].Raw) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // addon has a finalizer + gomega.Eventually(func() error { + addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName). + Get(context.Background(), testHostedAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + finalizers := addon.GetFinalizers() + for _, f := range finalizers { + if f == addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer { + return nil + } + } + return fmt.Errorf("these is no hook finalizer in addon") + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Update work status to trigger addon status + newWork := work.DeepCopy() + meta.SetStatusCondition(&newWork.Status.Conditions, metav1.Condition{Type: workv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied", ObservedGeneration: work.Generation}) + workBytes, err := json.Marshal(work) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + newWorkBytes, err := json.Marshal(newWork) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + patchBytes, err := jsonpatch.CreateMergePatch(workBytes, newWorkBytes) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = agentWorkClient.Patch(context.Background(), work.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get( + context.Background(), testHostedAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + + if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnHostingManifestApplied) { + return fmt.Errorf("Unexpected addon applied condition, %v", addon.Status.Conditions) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // update work to available so addon becomes available + work, err = agentWorkClient.Get(context.Background(), work.Name, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + newWork = work.DeepCopy() + meta.SetStatusCondition(&newWork.Status.Conditions, metav1.Condition{Type: workv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "WorkAvailable", ObservedGeneration: work.Generation}) + + workBytes, err = json.Marshal(work) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + newWorkBytes, err = json.Marshal(newWork) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + patchBytes, err = jsonpatch.CreateMergePatch(workBytes, newWorkBytes) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = agentWorkClient.Patch(context.Background(), work.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + addon, err = hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get( + context.Background(), testHostedAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + + // HealthProberTypeWork for hosting manifestwork is not supported by now + // TODO: consider to support it + // if !meta.IsStatusConditionTrue(addon.Status.Conditions, "Available") { + // return fmt.Errorf("Unexpected addon available condition, %v", addon.Status.Conditions) + // } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // delete addon, hook manifestwork will be applied, addon is deleting and addon status will be updated. + err = hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName). + Delete(context.Background(), testHostedAddonImpl.name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // hook manifest is deployed + var hookWork *workv1.ManifestWork + gomega.Eventually(func() error { + works, err := agentWorkLister.List(labels.Everything()) + if err != nil { + return fmt.Errorf("failed to list works: %v", err) + } + + if len(works) != 2 { + return fmt.Errorf("Unexpected number of work manifests") + } + + for _, w := range works { + if w.Name != work.Name { + hookWork = w + break + } + } + + if len(hookWork.Spec.Workload.Manifests) != 1 { + return fmt.Errorf("Unexpected number of hookWork manifests") + } + + if apiequality.Semantic.DeepEqual(hookWork.Spec.Workload.Manifests[0].Raw, []byte(hookHostingJobJson)) { + return fmt.Errorf("expected manifest is no correct, get %v", hookWork.Spec.Workload.Manifests[0].Raw) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName). + Get(context.Background(), testHostedAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + + if !meta.IsStatusConditionFalse(addon.Status.Conditions, "HookManifestCompleted") { + return fmt.Errorf("unexpected addon applied condition, %v", addon.Status.Conditions) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // update hook manifest feedbackResult, addon status will be updated and finalizer and pre-delete manifestwork + // will be deleted. + newHookWork := hookWork.DeepCopy() + meta.SetStatusCondition(&newHookWork.Status.Conditions, metav1.Condition{Type: workv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "WorkApplied"}) + meta.SetStatusCondition(&newHookWork.Status.Conditions, metav1.Condition{Type: workv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "ResourceAvailable"}) + newHookWork.Status.ResourceStatus = workv1.ManifestResourceStatus{ + Manifests: []workv1.ManifestCondition{ + { + ResourceMeta: workv1.ManifestResourceMeta{ + Group: "batch", + Version: "v1", + Resource: "jobs", + Namespace: "default", + Name: "test", + }, + StatusFeedbacks: workv1.StatusFeedbackResult{ + Values: []workv1.FeedbackValue{ + { + Name: "JobComplete", + Value: workv1.FieldValue{ + Type: workv1.String, + String: &hostingJobCompleteValue, + }, + }, + }, + }, + Conditions: []metav1.Condition{ + { + Type: "Available", + Status: metav1.ConditionTrue, + Reason: "MinimumReplicasAvailable", + Message: "Deployment has minimum availability.", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + }, + }, + }, + } + + hookWorkBytes, err := json.Marshal(hookWork) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + newHookWorkBytes, err := json.Marshal(newHookWork) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + hookWorkPatchBytes, err := jsonpatch.CreateMergePatch(hookWorkBytes, newHookWorkBytes) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = agentWorkClient.Patch(context.Background(), hookWork.Name, types.MergePatchType, hookWorkPatchBytes, metav1.PatchOptions{}, "status") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // addon only has pending finalizer, and status is updated + gomega.Eventually(func() error { + addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName). + Get(context.Background(), testHostedAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + if len(addon.Finalizers) != 1 { + return fmt.Errorf("addon is expected to only 1 finalizer,but got %v", len(addon.Finalizers)) + } + if addon.Finalizers[0] != "pending" { + return fmt.Errorf("addon is expected to only pending finalizer,but got %v", len(addon.Finalizers)) + } + if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnHookManifestCompleted) { + return fmt.Errorf("addon HookManifestCompleted condition is expecte to true, but got false") + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // update addon to trigger reconcile 3 times, and per-delete manifestwork should be deleted and not be re-created + // for i := 0; i < 3; i++ { + // gomega.Eventually(func() error { + // addon, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName). + // Get(context.Background(), testHostedAddonImpl.name, metav1.GetOptions{}) + // if err != nil { + // return err + // } + // addon.Labels = map[string]string{"test": fmt.Sprintf("%d", i)} + // _, err = hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName). + // Update(context.Background(), addon, metav1.UpdateOptions{}) + // return err + // }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // time.Sleep(2 * time.Second) + // hookWork, err = agentWorkClient.Get(context.Background(), hookWork.Name, metav1.GetOptions{}) + // gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) + // } + + // remove pending finalizer to delete addon + gomega.Eventually(func() error { + addon, err = hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName). + Get(context.Background(), testHostedAddonImpl.name, metav1.GetOptions{}) + if err != nil { + return err + } + addon.SetFinalizers([]string{}) + _, err = hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName). + Update(context.Background(), addon, metav1.UpdateOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + _, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName). + Get(context.Background(), testHostedAddonImpl.name, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return nil + } + return err + } + return fmt.Errorf("addon is expceted to be deleted") + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) +}) diff --git a/test/integration/cloudevents/suite_test.go b/test/integration/cloudevents/suite_test.go new file mode 100644 index 000000000..97a103dfb --- /dev/null +++ b/test/integration/cloudevents/suite_test.go @@ -0,0 +1,278 @@ +package cloudevents + +import ( + "context" + "fmt" + "os" + "path" + "path/filepath" + "testing" + "time" + + mochimqtt "github.com/mochi-mqtt/server/v2" + "github.com/mochi-mqtt/server/v2/hooks/auth" + "github.com/mochi-mqtt/server/v2/listeners" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" + "gopkg.in/yaml.v2" + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" + "open-cluster-management.io/addon-framework/pkg/agent" + "sigs.k8s.io/controller-runtime/pkg/envtest" + + certificatesv1 "k8s.io/api/certificates/v1" + "open-cluster-management.io/addon-framework/pkg/addonmanager" + "open-cluster-management.io/addon-framework/pkg/addonmanager/cloudevents" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + clusterv1client "open-cluster-management.io/api/client/cluster/clientset/versioned" + clusterv1 "open-cluster-management.io/api/cluster/v1" +) + +const ( + eventuallyTimeout = 30 // seconds + eventuallyInterval = 1 // seconds +) + +const ( + sourceID = "addon-manager-integration-test" + cloudEventsClientID = "addon-manager-integration-test" + mqttBrokerHost = "127.0.0.1:1883" + workDriverType = "mqtt" +) + +var mqttBroker *mochimqtt.Server +var tempDir string +var workDriverConfigFile string + +var testEnv *envtest.Environment +var hubClusterClient clusterv1client.Interface +var hubAddonClient addonv1alpha1client.Interface +var hubKubeClient kubernetes.Interface +var testAddonImpl *testAddon +var testHostedAddonImpl *testAddon +var testInstallByLableAddonImpl *testAddon +var testMultiWorksAddonImpl *testAddon +var mgrCtx context.Context +var mgrCtxCancel context.CancelFunc +var addonManager addonmanager.AddonManager + +func TestIntegration(t *testing.T) { + gomega.RegisterFailHandler(ginkgo.Fail) + ginkgo.RunSpecs(t, "Integration Suite") +} + +var _ = ginkgo.BeforeSuite(func(done ginkgo.Done) { + ginkgo.By("bootstrapping test mqtt broker") + + mqttBroker = mochimqtt.New(nil) + err := mqttBroker.AddHook(new(auth.AllowHook), nil) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + err = mqttBroker.AddListener(listeners.NewTCP("test-mqtt-broker", mqttBrokerHost, nil)) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // start the mqtt broker + go func() { + err := mqttBroker.Serve() + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }() + + tempDir, err = os.MkdirTemp("", "test") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(tempDir).ToNot(gomega.BeEmpty()) + workDriverConfigFile = path.Join(tempDir, "mqttconfig") + + // write the mqtt broker config to a file + workDriverConfig := mqtt.MQTTConfig{ + BrokerHost: mqttBrokerHost, + Topics: &types.Topics{ + SourceEvents: fmt.Sprintf("sources/%s/clusters/+/sourceevents", sourceID), + AgentEvents: fmt.Sprintf("sources/%s/clusters/+/agentevents", sourceID), + SourceBroadcast: fmt.Sprintf("sources/%s/sourcebroadcast", sourceID), + }, + } + workDriverConfigYAML, err := yaml.Marshal(workDriverConfig) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = os.WriteFile(workDriverConfigFile, workDriverConfigYAML, 0600) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + ginkgo.By("bootstrapping test environment") + + // start a kube-apiserver + testEnv = &envtest.Environment{ + ErrorIfCRDPathMissing: true, + CRDDirectoryPaths: []string{ + filepath.Join(".", "vendor", "open-cluster-management.io", "api", "work", "v1", "0000_00_work.open-cluster-management.io_manifestworks.crd.yaml"), + filepath.Join(".", "vendor", "open-cluster-management.io", "api", "cluster", "v1"), + filepath.Join(".", "vendor", "open-cluster-management.io", "api", "cluster", "v1beta1"), + filepath.Join(".", "vendor", "open-cluster-management.io", "api", "addon", "v1alpha1"), + }, + } + + cfg, err := testEnv.Start() + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(cfg).ToNot(gomega.BeNil()) + hubClusterClient, err = clusterv1client.NewForConfig(cfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + hubAddonClient, err = addonv1alpha1client.NewForConfig(cfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + hubKubeClient, err = kubernetes.NewForConfig(cfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + testAddonImpl = &testAddon{ + name: "test", + manifests: map[string][]runtime.Object{}, + registrations: map[string][]addonapiv1alpha1.RegistrationConfig{}, + } + + testHostedAddonImpl = &testAddon{ + name: "test-hosted", + manifests: map[string][]runtime.Object{}, + registrations: map[string][]addonapiv1alpha1.RegistrationConfig{}, + hostedModeEnabled: true, + hostInfoFn: constants.GetHostedModeInfo, + } + + testInstallByLableAddonImpl = &testAddon{ + name: "test-install-all", + manifests: map[string][]runtime.Object{}, + registrations: map[string][]addonapiv1alpha1.RegistrationConfig{}, + } + + testMultiWorksAddonImpl = &testAddon{ + name: "test-multi-works", + manifests: map[string][]runtime.Object{}, + registrations: map[string][]addonapiv1alpha1.RegistrationConfig{}, + } + + mgrCtx, mgrCtxCancel = context.WithCancel(context.TODO()) + // start hub controller + go func() { + cloudEventsOptions := cloudevents.NewCloudEventsOptions() + cloudEventsOptions.WorkDriver = workDriverType + cloudEventsOptions.WorkDriverConfig = workDriverConfigFile + cloudEventsOptions.SourceID = sourceID + cloudEventsOptions.CloudEventsClientID = cloudEventsClientID + addonManager, err = cloudevents.New(cfg, cloudEventsOptions) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = addonManager.AddAgent(testAddonImpl) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = addonManager.AddAgent(testInstallByLableAddonImpl) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = addonManager.AddAgent(testHostedAddonImpl) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = addonManager.AddAgent(testMultiWorksAddonImpl) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = addonManager.Start(mgrCtx) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + close(done) +}, 300) + +var _ = ginkgo.AfterSuite(func() { + ginkgo.By("tearing down the test environment") + + mgrCtxCancel() + err := testEnv.Stop() + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + err = mqttBroker.Close() + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + if tempDir != "" { + os.RemoveAll(tempDir) + } +}) + +type testAddon struct { + name string + manifests map[string][]runtime.Object + registrations map[string][]addonapiv1alpha1.RegistrationConfig + approveCSR bool + cert []byte + prober *agent.HealthProber + hostedModeEnabled bool + hostInfoFn func(addon *addonapiv1alpha1.ManagedClusterAddOn, cluster *clusterv1.ManagedCluster) (string, string) + supportedConfigGVRs []schema.GroupVersionResource +} + +func (t *testAddon) Manifests(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) ([]runtime.Object, error) { + return t.manifests[cluster.Name], nil +} + +func (t *testAddon) GetAgentAddonOptions() agent.AgentAddonOptions { + option := agent.AgentAddonOptions{ + AddonName: t.name, + HealthProber: t.prober, + HostedModeEnabled: t.hostedModeEnabled, + HostedModeInfoFunc: t.hostInfoFn, + SupportedConfigGVRs: t.supportedConfigGVRs, + } + + if len(t.registrations) > 0 { + option.Registration = &agent.RegistrationOption{ + CSRConfigurations: func(cluster *clusterv1.ManagedCluster) []addonapiv1alpha1.RegistrationConfig { + return t.registrations[cluster.Name] + }, + CSRApproveCheck: func(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn, csr *certificatesv1.CertificateSigningRequest) bool { + return t.approveCSR + }, + CSRSign: func(csr *certificatesv1.CertificateSigningRequest) []byte { + return t.cert + }, + } + } + + return option +} + +func newClusterManagementAddon(name string) *addonapiv1alpha1.ClusterManagementAddOn { + return &addonapiv1alpha1.ClusterManagementAddOn{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: addonapiv1alpha1.ClusterManagementAddOnSpec{ + InstallStrategy: addonapiv1alpha1.InstallStrategy{ + Type: addonapiv1alpha1.AddonInstallStrategyManual, + }, + }, + } +} + +func startWorkAgent(ctx context.Context, clusterName string) (*work.ClientHolder, error) { + config := &mqtt.MQTTOptions{ + KeepAlive: 60, + PubQoS: 1, + SubQoS: 1, + DialTimeout: 5 * time.Second, + BrokerHost: mqttBrokerHost, + Topics: types.Topics{ + SourceEvents: fmt.Sprintf("sources/%s/clusters/%s/sourceevents", sourceID, clusterName), + AgentEvents: fmt.Sprintf("sources/%s/clusters/%s/agentevents", sourceID, clusterName), + AgentBroadcast: fmt.Sprintf("clusters/%s/agentbroadcast", clusterName), + }, + } + clientHolder, err := work.NewClientHolderBuilder(config). + WithClientID(clusterName). + WithClusterName(clusterName). + WithCodecs(codec.NewManifestBundleCodec()). + NewAgentClientHolder(ctx) + if err != nil { + return nil, err + } + + go clientHolder.ManifestWorkInformer().Informer().Run(ctx.Done()) + + return clientHolder, nil +} diff --git a/test/integration/agent_deploy_test.go b/test/integration/kube/agent_deploy_test.go similarity index 99% rename from test/integration/agent_deploy_test.go rename to test/integration/kube/agent_deploy_test.go index a02d40185..195069fb6 100644 --- a/test/integration/agent_deploy_test.go +++ b/test/integration/kube/agent_deploy_test.go @@ -1,4 +1,4 @@ -package integration +package kube import ( "context" diff --git a/test/integration/agent_hook_deploy_test.go b/test/integration/kube/agent_hook_deploy_test.go similarity index 99% rename from test/integration/agent_hook_deploy_test.go rename to test/integration/kube/agent_hook_deploy_test.go index 809a3891b..8838f18d6 100644 --- a/test/integration/agent_hook_deploy_test.go +++ b/test/integration/kube/agent_hook_deploy_test.go @@ -1,4 +1,4 @@ -package integration +package kube import ( "context" diff --git a/test/integration/agent_hosting_deploy_test.go b/test/integration/kube/agent_hosting_deploy_test.go similarity index 99% rename from test/integration/agent_hosting_deploy_test.go rename to test/integration/kube/agent_hosting_deploy_test.go index 2b8198e0c..58b1e9b89 100644 --- a/test/integration/agent_hosting_deploy_test.go +++ b/test/integration/kube/agent_hosting_deploy_test.go @@ -1,4 +1,4 @@ -package integration +package kube import ( "context" diff --git a/test/integration/agent_hosting_hook_deploy_test.go b/test/integration/kube/agent_hosting_hook_deploy_test.go similarity index 99% rename from test/integration/agent_hosting_hook_deploy_test.go rename to test/integration/kube/agent_hosting_hook_deploy_test.go index afbd9a0d1..5507f12bf 100644 --- a/test/integration/agent_hosting_hook_deploy_test.go +++ b/test/integration/kube/agent_hosting_hook_deploy_test.go @@ -1,4 +1,4 @@ -package integration +package kube import ( "context" diff --git a/test/integration/assertion_test.go b/test/integration/kube/assertion_test.go similarity index 99% rename from test/integration/assertion_test.go rename to test/integration/kube/assertion_test.go index 15c2be70e..3f847fd57 100644 --- a/test/integration/assertion_test.go +++ b/test/integration/kube/assertion_test.go @@ -1,4 +1,4 @@ -package integration +package kube import ( "context" diff --git a/test/integration/cluster_management_addon_test.go b/test/integration/kube/cluster_management_addon_test.go similarity index 99% rename from test/integration/cluster_management_addon_test.go rename to test/integration/kube/cluster_management_addon_test.go index 2658655ee..92e41d06e 100644 --- a/test/integration/cluster_management_addon_test.go +++ b/test/integration/kube/cluster_management_addon_test.go @@ -1,4 +1,4 @@ -package integration +package kube import ( "context" diff --git a/test/integration/csr_test.go b/test/integration/kube/csr_test.go similarity index 99% rename from test/integration/csr_test.go rename to test/integration/kube/csr_test.go index a70af2c22..01ec6567e 100644 --- a/test/integration/csr_test.go +++ b/test/integration/kube/csr_test.go @@ -1,4 +1,4 @@ -package integration +package kube import ( "bytes" diff --git a/test/integration/deleted_managed_cluster_create_addon_test.go b/test/integration/kube/deleted_managed_cluster_create_addon_test.go similarity index 99% rename from test/integration/deleted_managed_cluster_create_addon_test.go rename to test/integration/kube/deleted_managed_cluster_create_addon_test.go index 3cb9b7811..25298640b 100644 --- a/test/integration/deleted_managed_cluster_create_addon_test.go +++ b/test/integration/kube/deleted_managed_cluster_create_addon_test.go @@ -1,4 +1,4 @@ -package integration +package kube import ( "context" diff --git a/test/integration/multiworks_test.go b/test/integration/kube/multiworks_test.go similarity index 99% rename from test/integration/multiworks_test.go rename to test/integration/kube/multiworks_test.go index 293cfeb50..88ff4a511 100644 --- a/test/integration/multiworks_test.go +++ b/test/integration/kube/multiworks_test.go @@ -1,4 +1,4 @@ -package integration +package kube import ( "context" diff --git a/test/integration/registration_test.go b/test/integration/kube/registration_test.go similarity index 99% rename from test/integration/registration_test.go rename to test/integration/kube/registration_test.go index a243defc4..bcef10224 100644 --- a/test/integration/registration_test.go +++ b/test/integration/kube/registration_test.go @@ -1,4 +1,4 @@ -package integration +package kube import ( "context" diff --git a/test/integration/suite_test.go b/test/integration/kube/suite_test.go similarity index 99% rename from test/integration/suite_test.go rename to test/integration/kube/suite_test.go index f192ef3c5..3f7010d0a 100644 --- a/test/integration/suite_test.go +++ b/test/integration/kube/suite_test.go @@ -1,4 +1,4 @@ -package integration +package kube import ( "context" diff --git a/test/integration/util/recorder.go b/test/integration/util/recorder.go new file mode 100644 index 000000000..a4de21f4b --- /dev/null +++ b/test/integration/util/recorder.go @@ -0,0 +1,84 @@ +package util + +import ( + "context" + "fmt" + + "github.com/onsi/ginkgo" + "github.com/openshift/library-go/pkg/operator/events" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func NewIntegrationTestEventRecorder(component string) events.Recorder { + return &IntegrationTestEventRecorder{component: component} +} + +type IntegrationTestEventRecorder struct { + component string + ctx context.Context +} + +func (r *IntegrationTestEventRecorder) ComponentName() string { + return r.component +} + +func (r *IntegrationTestEventRecorder) ForComponent(c string) events.Recorder { + return &IntegrationTestEventRecorder{component: c} +} + +func (r *IntegrationTestEventRecorder) WithComponentSuffix(suffix string) events.Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +func (r *IntegrationTestEventRecorder) WithContext(ctx context.Context) events.Recorder { + r.ctx = ctx + return r +} + +func (r *IntegrationTestEventRecorder) Event(reason, message string) { + fmt.Fprintf(ginkgo.GinkgoWriter, "Event: [%s] %v: %v \n", r.component, reason, message) +} + +func (r *IntegrationTestEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +func (r *IntegrationTestEventRecorder) Warning(reason, message string) { + fmt.Fprintf(ginkgo.GinkgoWriter, "Warning: [%s] %v: %v \n", r.component, reason, message) +} + +func (r *IntegrationTestEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} + +func (r *IntegrationTestEventRecorder) Shutdown() {} + +func HasCondition( + conditions []metav1.Condition, + expectedType, expectedReason string, + expectedStatus metav1.ConditionStatus, +) bool { + + for _, condition := range conditions { + if condition.Type != expectedType { + continue + } + + if condition.Status != expectedStatus { + return false + } + + // skip checking reason + if len(expectedReason) == 0 { + return true + } + + if condition.Reason != expectedReason { + return false + } + + return true + } + + return false +} diff --git a/vendor/github.com/bwmarrin/snowflake/.travis.yml b/vendor/github.com/bwmarrin/snowflake/.travis.yml new file mode 100644 index 000000000..75d9f5c24 --- /dev/null +++ b/vendor/github.com/bwmarrin/snowflake/.travis.yml @@ -0,0 +1,12 @@ +language: go +go: + - 1.11.x + - 1.12.x +install: + - go get -v . + - go get -v golang.org/x/lint/golint +script: + - diff <(gofmt -d .) <(echo -n) + - go vet -x ./... + - golint -set_exit_status ./... + - go test -v -race ./... diff --git a/vendor/github.com/bwmarrin/snowflake/LICENSE b/vendor/github.com/bwmarrin/snowflake/LICENSE new file mode 100644 index 000000000..ef39145e5 --- /dev/null +++ b/vendor/github.com/bwmarrin/snowflake/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2016, Bruce +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/bwmarrin/snowflake/README.md b/vendor/github.com/bwmarrin/snowflake/README.md new file mode 100644 index 000000000..23af210b6 --- /dev/null +++ b/vendor/github.com/bwmarrin/snowflake/README.md @@ -0,0 +1,143 @@ +snowflake +==== +[![GoDoc](https://godoc.org/github.com/bwmarrin/snowflake?status.svg)](https://godoc.org/github.com/bwmarrin/snowflake) [![Go report](http://goreportcard.com/badge/bwmarrin/snowflake)](http://goreportcard.com/report/bwmarrin/snowflake) [![Coverage](http://gocover.io/_badge/github.com/bwmarrin/snowflake)](https://gocover.io/github.com/bwmarrin/snowflake) [![Build Status](https://travis-ci.org/bwmarrin/snowflake.svg?branch=master)](https://travis-ci.org/bwmarrin/snowflake) [![Discord Gophers](https://img.shields.io/badge/Discord%20Gophers-%23info-blue.svg)](https://discord.gg/0f1SbxBZjYq9jLBk) + +snowflake is a [Go](https://golang.org/) package that provides +* A very simple Twitter snowflake generator. +* Methods to parse existing snowflake IDs. +* Methods to convert a snowflake ID into several other data types and back. +* JSON Marshal/Unmarshal functions to easily use snowflake IDs within a JSON API. +* Monotonic Clock calculations protect from clock drift. + +**For help with this package or general Go discussion, please join the [Discord +Gophers](https://discord.gg/0f1SbxBZjYq9jLBk) chat server.** + +## Status +This package should be considered stable and completed. Any additions in the +future will strongly avoid API changes to existing functions. + +### ID Format +By default, the ID format follows the original Twitter snowflake format. +* The ID as a whole is a 63 bit integer stored in an int64 +* 41 bits are used to store a timestamp with millisecond precision, using a custom epoch. +* 10 bits are used to store a node id - a range from 0 through 1023. +* 12 bits are used to store a sequence number - a range from 0 through 4095. + +### Custom Format +You can alter the number of bits used for the node id and step number (sequence) +by setting the snowflake.NodeBits and snowflake.StepBits values. Remember that +There is a maximum of 22 bits available that can be shared between these two +values. You do not have to use all 22 bits. + +### Custom Epoch +By default this package uses the Twitter Epoch of 1288834974657 or Nov 04 2010 01:42:54. +You can set your own epoch value by setting snowflake.Epoch to a time in milliseconds +to use as the epoch. + +### Custom Notes +When setting custom epoch or bit values you need to set them prior to calling +any functions on the snowflake package, including NewNode(). Otherwise the +custom values you set will not be applied correctly. + +### How it Works. +Each time you generate an ID, it works, like this. +* A timestamp with millisecond precision is stored using 41 bits of the ID. +* Then the NodeID is added in subsequent bits. +* Then the Sequence Number is added, starting at 0 and incrementing for each ID generated in the same millisecond. If you generate enough IDs in the same millisecond that the sequence would roll over or overfill then the generate function will pause until the next millisecond. + +The default Twitter format shown below. +``` ++--------------------------------------------------------------------------+ +| 1 Bit Unused | 41 Bit Timestamp | 10 Bit NodeID | 12 Bit Sequence ID | ++--------------------------------------------------------------------------+ +``` + +Using the default settings, this allows for 4096 unique IDs to be generated every millisecond, per Node ID. +## Getting Started + +### Installing + +This assumes you already have a working Go environment, if not please see +[this page](https://golang.org/doc/install) first. + +```sh +go get github.com/bwmarrin/snowflake +``` + + +### Usage + +Import the package into your project then construct a new snowflake Node using a +unique node number. The default settings permit a node number range from 0 to 1023. +If you have set a custom NodeBits value, you will need to calculate what your +node number range will be. With the node object call the Generate() method to +generate and return a unique snowflake ID. + +Keep in mind that each node you create must have a unique node number, even +across multiple servers. If you do not keep node numbers unique the generator +cannot guarantee unique IDs across all nodes. + + +**Example Program:** + +```go +package main + +import ( + "fmt" + + "github.com/bwmarrin/snowflake" +) + +func main() { + + // Create a new Node with a Node number of 1 + node, err := snowflake.NewNode(1) + if err != nil { + fmt.Println(err) + return + } + + // Generate a snowflake ID. + id := node.Generate() + + // Print out the ID in a few different ways. + fmt.Printf("Int64 ID: %d\n", id) + fmt.Printf("String ID: %s\n", id) + fmt.Printf("Base2 ID: %s\n", id.Base2()) + fmt.Printf("Base64 ID: %s\n", id.Base64()) + + // Print out the ID's timestamp + fmt.Printf("ID Time : %d\n", id.Time()) + + // Print out the ID's node number + fmt.Printf("ID Node : %d\n", id.Node()) + + // Print out the ID's sequence number + fmt.Printf("ID Step : %d\n", id.Step()) + + // Generate and print, all in one. + fmt.Printf("ID : %d\n", node.Generate().Int64()) +} +``` + +### Performance + +With default settings, this snowflake generator should be sufficiently fast +enough on most systems to generate 4096 unique ID's per millisecond. This is +the maximum that the snowflake ID format supports. That is, around 243-244 +nanoseconds per operation. + +Since the snowflake generator is single threaded the primary limitation will be +the maximum speed of a single processor on your system. + +To benchmark the generator on your system run the following command inside the +snowflake package directory. + +```sh +go test -run=^$ -bench=. +``` + +If your curious, check out this commit that shows benchmarks that compare a few +different ways of implementing a snowflake generator in Go. +* https://github.com/bwmarrin/snowflake/tree/9befef8908df13f4102ed21f42b083dd862b5036 diff --git a/vendor/github.com/bwmarrin/snowflake/snowflake.go b/vendor/github.com/bwmarrin/snowflake/snowflake.go new file mode 100644 index 000000000..3ea0856d3 --- /dev/null +++ b/vendor/github.com/bwmarrin/snowflake/snowflake.go @@ -0,0 +1,365 @@ +// Package snowflake provides a very simple Twitter snowflake generator and parser. +package snowflake + +import ( + "encoding/base64" + "encoding/binary" + "errors" + "fmt" + "strconv" + "sync" + "time" +) + +var ( + // Epoch is set to the twitter snowflake epoch of Nov 04 2010 01:42:54 UTC in milliseconds + // You may customize this to set a different epoch for your application. + Epoch int64 = 1288834974657 + + // NodeBits holds the number of bits to use for Node + // Remember, you have a total 22 bits to share between Node/Step + NodeBits uint8 = 10 + + // StepBits holds the number of bits to use for Step + // Remember, you have a total 22 bits to share between Node/Step + StepBits uint8 = 12 + + // DEPRECATED: the below four variables will be removed in a future release. + mu sync.Mutex + nodeMax int64 = -1 ^ (-1 << NodeBits) + nodeMask = nodeMax << StepBits + stepMask int64 = -1 ^ (-1 << StepBits) + timeShift = NodeBits + StepBits + nodeShift = StepBits +) + +const encodeBase32Map = "ybndrfg8ejkmcpqxot1uwisza345h769" + +var decodeBase32Map [256]byte + +const encodeBase58Map = "123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ" + +var decodeBase58Map [256]byte + +// A JSONSyntaxError is returned from UnmarshalJSON if an invalid ID is provided. +type JSONSyntaxError struct{ original []byte } + +func (j JSONSyntaxError) Error() string { + return fmt.Sprintf("invalid snowflake ID %q", string(j.original)) +} + +// ErrInvalidBase58 is returned by ParseBase58 when given an invalid []byte +var ErrInvalidBase58 = errors.New("invalid base58") + +// ErrInvalidBase32 is returned by ParseBase32 when given an invalid []byte +var ErrInvalidBase32 = errors.New("invalid base32") + +// Create maps for decoding Base58/Base32. +// This speeds up the process tremendously. +func init() { + + for i := 0; i < len(encodeBase58Map); i++ { + decodeBase58Map[i] = 0xFF + } + + for i := 0; i < len(encodeBase58Map); i++ { + decodeBase58Map[encodeBase58Map[i]] = byte(i) + } + + for i := 0; i < len(encodeBase32Map); i++ { + decodeBase32Map[i] = 0xFF + } + + for i := 0; i < len(encodeBase32Map); i++ { + decodeBase32Map[encodeBase32Map[i]] = byte(i) + } +} + +// A Node struct holds the basic information needed for a snowflake generator +// node +type Node struct { + mu sync.Mutex + epoch time.Time + time int64 + node int64 + step int64 + + nodeMax int64 + nodeMask int64 + stepMask int64 + timeShift uint8 + nodeShift uint8 +} + +// An ID is a custom type used for a snowflake ID. This is used so we can +// attach methods onto the ID. +type ID int64 + +// NewNode returns a new snowflake node that can be used to generate snowflake +// IDs +func NewNode(node int64) (*Node, error) { + + // re-calc in case custom NodeBits or StepBits were set + // DEPRECATED: the below block will be removed in a future release. + mu.Lock() + nodeMax = -1 ^ (-1 << NodeBits) + nodeMask = nodeMax << StepBits + stepMask = -1 ^ (-1 << StepBits) + timeShift = NodeBits + StepBits + nodeShift = StepBits + mu.Unlock() + + n := Node{} + n.node = node + n.nodeMax = -1 ^ (-1 << NodeBits) + n.nodeMask = n.nodeMax << StepBits + n.stepMask = -1 ^ (-1 << StepBits) + n.timeShift = NodeBits + StepBits + n.nodeShift = StepBits + + if n.node < 0 || n.node > n.nodeMax { + return nil, errors.New("Node number must be between 0 and " + strconv.FormatInt(n.nodeMax, 10)) + } + + var curTime = time.Now() + // add time.Duration to curTime to make sure we use the monotonic clock if available + n.epoch = curTime.Add(time.Unix(Epoch/1000, (Epoch%1000)*1000000).Sub(curTime)) + + return &n, nil +} + +// Generate creates and returns a unique snowflake ID +// To help guarantee uniqueness +// - Make sure your system is keeping accurate system time +// - Make sure you never have multiple nodes running with the same node ID +func (n *Node) Generate() ID { + + n.mu.Lock() + + now := time.Since(n.epoch).Nanoseconds() / 1000000 + + if now == n.time { + n.step = (n.step + 1) & n.stepMask + + if n.step == 0 { + for now <= n.time { + now = time.Since(n.epoch).Nanoseconds() / 1000000 + } + } + } else { + n.step = 0 + } + + n.time = now + + r := ID((now)<= 32 { + b = append(b, encodeBase32Map[f%32]) + f /= 32 + } + b = append(b, encodeBase32Map[f]) + + for x, y := 0, len(b)-1; x < y; x, y = x+1, y-1 { + b[x], b[y] = b[y], b[x] + } + + return string(b) +} + +// ParseBase32 parses a base32 []byte into a snowflake ID +// NOTE: There are many different base32 implementations so becareful when +// doing any interoperation. +func ParseBase32(b []byte) (ID, error) { + + var id int64 + + for i := range b { + if decodeBase32Map[b[i]] == 0xFF { + return -1, ErrInvalidBase32 + } + id = id*32 + int64(decodeBase32Map[b[i]]) + } + + return ID(id), nil +} + +// Base36 returns a base36 string of the snowflake ID +func (f ID) Base36() string { + return strconv.FormatInt(int64(f), 36) +} + +// ParseBase36 converts a Base36 string into a snowflake ID +func ParseBase36(id string) (ID, error) { + i, err := strconv.ParseInt(id, 36, 64) + return ID(i), err +} + +// Base58 returns a base58 string of the snowflake ID +func (f ID) Base58() string { + + if f < 58 { + return string(encodeBase58Map[f]) + } + + b := make([]byte, 0, 11) + for f >= 58 { + b = append(b, encodeBase58Map[f%58]) + f /= 58 + } + b = append(b, encodeBase58Map[f]) + + for x, y := 0, len(b)-1; x < y; x, y = x+1, y-1 { + b[x], b[y] = b[y], b[x] + } + + return string(b) +} + +// ParseBase58 parses a base58 []byte into a snowflake ID +func ParseBase58(b []byte) (ID, error) { + + var id int64 + + for i := range b { + if decodeBase58Map[b[i]] == 0xFF { + return -1, ErrInvalidBase58 + } + id = id*58 + int64(decodeBase58Map[b[i]]) + } + + return ID(id), nil +} + +// Base64 returns a base64 string of the snowflake ID +func (f ID) Base64() string { + return base64.StdEncoding.EncodeToString(f.Bytes()) +} + +// ParseBase64 converts a base64 string into a snowflake ID +func ParseBase64(id string) (ID, error) { + b, err := base64.StdEncoding.DecodeString(id) + if err != nil { + return -1, err + } + return ParseBytes(b) + +} + +// Bytes returns a byte slice of the snowflake ID +func (f ID) Bytes() []byte { + return []byte(f.String()) +} + +// ParseBytes converts a byte slice into a snowflake ID +func ParseBytes(id []byte) (ID, error) { + i, err := strconv.ParseInt(string(id), 10, 64) + return ID(i), err +} + +// IntBytes returns an array of bytes of the snowflake ID, encoded as a +// big endian integer. +func (f ID) IntBytes() [8]byte { + var b [8]byte + binary.BigEndian.PutUint64(b[:], uint64(f)) + return b +} + +// ParseIntBytes converts an array of bytes encoded as big endian integer as +// a snowflake ID +func ParseIntBytes(id [8]byte) ID { + return ID(int64(binary.BigEndian.Uint64(id[:]))) +} + +// Time returns an int64 unix timestamp in milliseconds of the snowflake ID time +// DEPRECATED: the below function will be removed in a future release. +func (f ID) Time() int64 { + return (int64(f) >> timeShift) + Epoch +} + +// Node returns an int64 of the snowflake ID node number +// DEPRECATED: the below function will be removed in a future release. +func (f ID) Node() int64 { + return int64(f) & nodeMask >> nodeShift +} + +// Step returns an int64 of the snowflake step (or sequence) number +// DEPRECATED: the below function will be removed in a future release. +func (f ID) Step() int64 { + return int64(f) & stepMask +} + +// MarshalJSON returns a json byte array string of the snowflake ID. +func (f ID) MarshalJSON() ([]byte, error) { + buff := make([]byte, 0, 22) + buff = append(buff, '"') + buff = strconv.AppendInt(buff, int64(f), 10) + buff = append(buff, '"') + return buff, nil +} + +// UnmarshalJSON converts a json byte array of a snowflake ID into an ID type. +func (f *ID) UnmarshalJSON(b []byte) error { + if len(b) < 3 || b[0] != '"' || b[len(b)-1] != '"' { + return JSONSyntaxError{b} + } + + i, err := strconv.ParseInt(string(b[1:len(b)-1]), 10, 64) + if err != nil { + return err + } + + *f = ID(i) + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/LICENSE b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/message.go b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/message.go new file mode 100644 index 000000000..8dd938545 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/message.go @@ -0,0 +1,119 @@ +/* + Copyright 2023 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package mqtt_paho + +import ( + "bytes" + "context" + "strings" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/eclipse/paho.golang/paho" +) + +const ( + prefix = "ce-" + contentType = "Content-Type" +) + +var specs = spec.WithPrefix(prefix) + +// Message represents a MQTT message. +// This message *can* be read several times safely +type Message struct { + internal *paho.Publish + version spec.Version + format format.Format +} + +// Check if Message implements binding.Message +var ( + _ binding.Message = (*Message)(nil) + _ binding.MessageMetadataReader = (*Message)(nil) +) + +func NewMessage(msg *paho.Publish) *Message { + var f format.Format + var v spec.Version + if msg.Properties != nil { + // Use properties.User["Content-type"] to determine if message is structured + if s := msg.Properties.User.Get(contentType); format.IsFormat(s) { + f = format.Lookup(s) + } else if s := msg.Properties.User.Get(specs.PrefixedSpecVersionName()); s != "" { + v = specs.Version(s) + } + } + return &Message{ + internal: msg, + version: v, + format: f, + } +} + +func (m *Message) ReadEncoding() binding.Encoding { + if m.version != nil { + return binding.EncodingBinary + } + if m.format != nil { + return binding.EncodingStructured + } + return binding.EncodingUnknown +} + +func (m *Message) ReadStructured(ctx context.Context, encoder binding.StructuredWriter) error { + if m.version != nil { + return binding.ErrNotStructured + } + if m.format == nil { + return binding.ErrNotStructured + } + return encoder.SetStructuredEvent(ctx, m.format, bytes.NewReader(m.internal.Payload)) +} + +func (m *Message) ReadBinary(ctx context.Context, encoder binding.BinaryWriter) (err error) { + if m.format != nil { + return binding.ErrNotBinary + } + + for _, userProperty := range m.internal.Properties.User { + if strings.HasPrefix(userProperty.Key, prefix) { + attr := m.version.Attribute(userProperty.Key) + if attr != nil { + err = encoder.SetAttribute(attr, userProperty.Value) + } else { + err = encoder.SetExtension(strings.TrimPrefix(userProperty.Key, prefix), userProperty.Value) + } + } else if userProperty.Key == contentType { + err = encoder.SetAttribute(m.version.AttributeFromKind(spec.DataContentType), string(userProperty.Value)) + } + if err != nil { + return + } + } + + if m.internal.Payload != nil { + return encoder.SetData(bytes.NewBuffer(m.internal.Payload)) + } + return nil +} + +func (m *Message) Finish(error) error { + return nil +} + +func (m *Message) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) { + attr := m.version.AttributeFromKind(k) + if attr != nil { + return attr, m.internal.Properties.User.Get(prefix + attr.Name()) + } + return nil, nil +} + +func (m *Message) GetExtension(name string) interface{} { + return m.internal.Properties.User.Get(prefix + name) +} diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/option.go b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/option.go new file mode 100644 index 000000000..955a16219 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/option.go @@ -0,0 +1,48 @@ +/* + Copyright 2023 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package mqtt_paho + +import ( + "fmt" + + "github.com/eclipse/paho.golang/paho" +) + +// Option is the function signature required to be considered an mqtt_paho.Option. +type Option func(*Protocol) error + +// WithConnect sets the paho.Connect configuration for the client. This option is not required. +func WithConnect(connOpt *paho.Connect) Option { + return func(p *Protocol) error { + if connOpt == nil { + return fmt.Errorf("the paho.Connect option must not be nil") + } + p.connOption = connOpt + return nil + } +} + +// WithPublish sets the paho.Publish configuration for the client. This option is required if you want to send messages. +func WithPublish(publishOpt *paho.Publish) Option { + return func(p *Protocol) error { + if publishOpt == nil { + return fmt.Errorf("the paho.Publish option must not be nil") + } + p.publishOption = publishOpt + return nil + } +} + +// WithSubscribe sets the paho.Subscribe configuration for the client. This option is required if you want to receive messages. +func WithSubscribe(subscribeOpt *paho.Subscribe) Option { + return func(p *Protocol) error { + if subscribeOpt == nil { + return fmt.Errorf("the paho.Subscribe option must not be nil") + } + p.subscribeOption = subscribeOpt + return nil + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/protocol.go b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/protocol.go new file mode 100644 index 000000000..261fc6c37 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/protocol.go @@ -0,0 +1,155 @@ +/* + Copyright 2023 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package mqtt_paho + +import ( + "context" + "fmt" + "io" + "sync" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/protocol" + "github.com/eclipse/paho.golang/paho" + + cecontext "github.com/cloudevents/sdk-go/v2/context" +) + +type Protocol struct { + client *paho.Client + config *paho.ClientConfig + connOption *paho.Connect + publishOption *paho.Publish + subscribeOption *paho.Subscribe + + // receiver + incoming chan *paho.Publish + // inOpen + openerMutex sync.Mutex + + closeChan chan struct{} +} + +var ( + _ protocol.Sender = (*Protocol)(nil) + _ protocol.Opener = (*Protocol)(nil) + _ protocol.Receiver = (*Protocol)(nil) + _ protocol.Closer = (*Protocol)(nil) +) + +func New(ctx context.Context, config *paho.ClientConfig, opts ...Option) (*Protocol, error) { + if config == nil { + return nil, fmt.Errorf("the paho.ClientConfig must not be nil") + } + + p := &Protocol{ + client: paho.NewClient(*config), + // default connect option + connOption: &paho.Connect{ + KeepAlive: 30, + CleanStart: true, + }, + incoming: make(chan *paho.Publish), + closeChan: make(chan struct{}), + } + if err := p.applyOptions(opts...); err != nil { + return nil, err + } + + // Connect to the MQTT broker + connAck, err := p.client.Connect(ctx, p.connOption) + if err != nil { + return nil, err + } + if connAck.ReasonCode != 0 { + return nil, fmt.Errorf("failed to connect to %q : %d - %q", p.client.Conn.RemoteAddr(), connAck.ReasonCode, + connAck.Properties.ReasonString) + } + + return p, nil +} + +func (p *Protocol) applyOptions(opts ...Option) error { + for _, fn := range opts { + if err := fn(p); err != nil { + return err + } + } + return nil +} + +func (p *Protocol) Send(ctx context.Context, m binding.Message, transformers ...binding.Transformer) error { + if p.publishOption == nil { + return fmt.Errorf("the paho.Publish option must not be nil") + } + + var err error + defer m.Finish(err) + + msg := p.publishOption + if cecontext.TopicFrom(ctx) != "" { + msg.Topic = cecontext.TopicFrom(ctx) + cecontext.WithTopic(ctx, "") + } + + err = WritePubMessage(ctx, m, msg, transformers...) + if err != nil { + return err + } + + _, err = p.client.Publish(ctx, msg) + if err != nil { + return err + } + return err +} + +func (p *Protocol) OpenInbound(ctx context.Context) error { + if p.subscribeOption == nil { + return fmt.Errorf("the paho.Subscribe option must not be nil") + } + + p.openerMutex.Lock() + defer p.openerMutex.Unlock() + + logger := cecontext.LoggerFrom(ctx) + + p.client.Router = paho.NewSingleHandlerRouter(func(m *paho.Publish) { + p.incoming <- m + }) + + logger.Infof("subscribing to topics: %v", p.subscribeOption.Subscriptions) + _, err := p.client.Subscribe(ctx, p.subscribeOption) + if err != nil { + return err + } + + // Wait until external or internal context done + select { + case <-ctx.Done(): + case <-p.closeChan: + } + return p.client.Disconnect(&paho.Disconnect{ReasonCode: 0}) +} + +// Receive implements Receiver.Receive +func (p *Protocol) Receive(ctx context.Context) (binding.Message, error) { + select { + case m, ok := <-p.incoming: + if !ok { + return nil, io.EOF + } + msg := NewMessage(m) + return msg, nil + case <-ctx.Done(): + return nil, io.EOF + } +} + +func (p *Protocol) Close(ctx context.Context) error { + close(p.closeChan) + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/write_message.go b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/write_message.go new file mode 100644 index 000000000..a4b87f4aa --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/write_message.go @@ -0,0 +1,133 @@ +/* +Copyright 2023 The CloudEvents Authors +SPDX-License-Identifier: Apache-2.0 +*/ + +package mqtt_paho + +import ( + "bytes" + "context" + "io" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/types" + "github.com/eclipse/paho.golang/paho" +) + +// WritePubMessage fills the provided pubMessage with the message m. +// Using context you can tweak the encoding processing (more details on binding.Write documentation). +func WritePubMessage(ctx context.Context, m binding.Message, pubMessage *paho.Publish, transformers ...binding.Transformer) error { + structuredWriter := (*pubMessageWriter)(pubMessage) + binaryWriter := (*pubMessageWriter)(pubMessage) + + _, err := binding.Write( + ctx, + m, + structuredWriter, + binaryWriter, + transformers..., + ) + return err +} + +type pubMessageWriter paho.Publish + +var ( + _ binding.StructuredWriter = (*pubMessageWriter)(nil) + _ binding.BinaryWriter = (*pubMessageWriter)(nil) +) + +func (b *pubMessageWriter) SetStructuredEvent(ctx context.Context, f format.Format, event io.Reader) error { + if b.Properties == nil { + b.Properties = &paho.PublishProperties{ + User: make([]paho.UserProperty, 0), + } + } + b.Properties.User.Add(contentType, f.MediaType()) + var buf bytes.Buffer + _, err := io.Copy(&buf, event) + if err != nil { + return err + } + b.Payload = buf.Bytes() + return nil +} + +func (b *pubMessageWriter) Start(ctx context.Context) error { + if b.Properties == nil { + b.Properties = &paho.PublishProperties{} + } + // the UserProperties of publish message is used to load event extensions + b.Properties.User = make([]paho.UserProperty, 0) + return nil +} + +func (b *pubMessageWriter) End(ctx context.Context) error { + return nil +} + +func (b *pubMessageWriter) SetData(reader io.Reader) error { + buf, ok := reader.(*bytes.Buffer) + if !ok { + buf = new(bytes.Buffer) + _, err := io.Copy(buf, reader) + if err != nil { + return err + } + } + b.Payload = buf.Bytes() + return nil +} + +func (b *pubMessageWriter) SetAttribute(attribute spec.Attribute, value interface{}) error { + if attribute.Kind() == spec.DataContentType { + if value == nil { + b.removeProperty(contentType) + } + s, err := types.Format(value) + if err != nil { + return err + } + if err := b.addProperty(contentType, s); err != nil { + return err + } + } else { + if value == nil { + b.removeProperty(prefix + attribute.Name()) + } + return b.addProperty(prefix+attribute.Name(), value) + } + return nil +} + +func (b *pubMessageWriter) SetExtension(name string, value interface{}) error { + if value == nil { + b.removeProperty(prefix + name) + } + return b.addProperty(prefix+name, value) +} + +func (b *pubMessageWriter) removeProperty(key string) { + for i, v := range b.Properties.User { + if v.Key == key { + b.Properties.User = append(b.Properties.User[:i], b.Properties.User[i+1:]...) + break + } + } +} + +func (b *pubMessageWriter) addProperty(key string, value interface{}) error { + s, err := types.Format(value) + if err != nil { + return err + } + + b.Properties.User = append(b.Properties.User, paho.UserProperty{ + Key: key, + Value: s, + }) + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/LICENSE b/vendor/github.com/cloudevents/sdk-go/v2/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/alias.go b/vendor/github.com/cloudevents/sdk-go/v2/alias.go new file mode 100644 index 000000000..2fbfaa9a7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/alias.go @@ -0,0 +1,187 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +// Package v2 reexports a subset of the SDK v2 API. +package v2 + +// Package cloudevents alias' common functions and types to improve discoverability and reduce +// the number of imports for simple HTTP clients. + +import ( + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/client" + "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" + "github.com/cloudevents/sdk-go/v2/protocol/http" + "github.com/cloudevents/sdk-go/v2/types" +) + +// Client + +type ClientOption = client.Option +type Client = client.Client + +// Event + +type Event = event.Event +type Result = protocol.Result + +// Context + +type EventContext = event.EventContext +type EventContextV1 = event.EventContextV1 +type EventContextV03 = event.EventContextV03 + +// Custom Types + +type Timestamp = types.Timestamp +type URIRef = types.URIRef + +// HTTP Protocol + +type HTTPOption = http.Option + +type HTTPProtocol = http.Protocol + +// Encoding + +type Encoding = binding.Encoding + +// Message + +type Message = binding.Message + +const ( + // ReadEncoding + + ApplicationXML = event.ApplicationXML + ApplicationJSON = event.ApplicationJSON + TextPlain = event.TextPlain + ApplicationCloudEventsJSON = event.ApplicationCloudEventsJSON + ApplicationCloudEventsBatchJSON = event.ApplicationCloudEventsBatchJSON + Base64 = event.Base64 + + // Event Versions + + VersionV1 = event.CloudEventsVersionV1 + VersionV03 = event.CloudEventsVersionV03 + + // Encoding + + EncodingBinary = binding.EncodingBinary + EncodingStructured = binding.EncodingStructured +) + +var ( + + // ContentType Helpers + + StringOfApplicationJSON = event.StringOfApplicationJSON + StringOfApplicationXML = event.StringOfApplicationXML + StringOfTextPlain = event.StringOfTextPlain + StringOfApplicationCloudEventsJSON = event.StringOfApplicationCloudEventsJSON + StringOfApplicationCloudEventsBatchJSON = event.StringOfApplicationCloudEventsBatchJSON + StringOfBase64 = event.StringOfBase64 + + // Client Creation + + NewClient = client.New + NewClientHTTP = client.NewHTTP + // Deprecated: please use New with the observability options. + NewClientObserved = client.NewObserved + // Deprecated: Please use NewClientHTTP with the observability options. + NewDefaultClient = client.NewDefault + NewHTTPReceiveHandler = client.NewHTTPReceiveHandler + + // Client Options + + WithEventDefaulter = client.WithEventDefaulter + WithUUIDs = client.WithUUIDs + WithTimeNow = client.WithTimeNow + // Deprecated: this is now noop and will be removed in future releases. + WithTracePropagation = client.WithTracePropagation() + + // Event Creation + + NewEvent = event.New + + // Results + + NewResult = protocol.NewResult + ResultIs = protocol.ResultIs + ResultAs = protocol.ResultAs + + // Receipt helpers + + NewReceipt = protocol.NewReceipt + + ResultACK = protocol.ResultACK + ResultNACK = protocol.ResultNACK + + IsACK = protocol.IsACK + IsNACK = protocol.IsNACK + IsUndelivered = protocol.IsUndelivered + + // HTTP Results + + NewHTTPResult = http.NewResult + NewHTTPRetriesResult = http.NewRetriesResult + + // Message Creation + + ToMessage = binding.ToMessage + + // Event Creation + + NewEventFromHTTPRequest = http.NewEventFromHTTPRequest + NewEventFromHTTPResponse = http.NewEventFromHTTPResponse + NewEventsFromHTTPRequest = http.NewEventsFromHTTPRequest + NewEventsFromHTTPResponse = http.NewEventsFromHTTPResponse + NewHTTPRequestFromEvent = http.NewHTTPRequestFromEvent + NewHTTPRequestFromEvents = http.NewHTTPRequestFromEvents + IsHTTPBatch = http.IsHTTPBatch + + // HTTP Messages + + WriteHTTPRequest = http.WriteRequest + + // Context + + ContextWithTarget = context.WithTarget + TargetFromContext = context.TargetFrom + ContextWithRetriesConstantBackoff = context.WithRetriesConstantBackoff + ContextWithRetriesLinearBackoff = context.WithRetriesLinearBackoff + ContextWithRetriesExponentialBackoff = context.WithRetriesExponentialBackoff + + WithEncodingBinary = binding.WithForceBinary + WithEncodingStructured = binding.WithForceStructured + + // Custom Types + + ParseTimestamp = types.ParseTimestamp + ParseURIRef = types.ParseURIRef + ParseURI = types.ParseURI + + // HTTP Protocol + + NewHTTP = http.New + + // HTTP Protocol Options + + WithTarget = http.WithTarget + WithHeader = http.WithHeader + WithShutdownTimeout = http.WithShutdownTimeout + //WithEncoding = http.WithEncoding + //WithStructuredEncoding = http.WithStructuredEncoding // TODO: expose new way + WithPort = http.WithPort + WithPath = http.WithPath + WithMiddleware = http.WithMiddleware + WithListener = http.WithListener + WithRoundTripper = http.WithRoundTripper + WithGetHandlerFunc = http.WithGetHandlerFunc + WithOptionsHandlerFunc = http.WithOptionsHandlerFunc + WithDefaultOptionsHandlerFunc = http.WithDefaultOptionsHandlerFunc +) diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go new file mode 100644 index 000000000..97f2c4dd7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go @@ -0,0 +1,52 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "context" + "io" + + "github.com/cloudevents/sdk-go/v2/binding/spec" +) + +// MessageMetadataWriter is used to set metadata when a binary Message is visited. +type MessageMetadataWriter interface { + // Set a standard attribute. + // + // The value can either be the correct golang type for the attribute, or a canonical + // string encoding, or nil. If value is nil, then the attribute should be deleted. + // See package types to perform the needed conversions. + SetAttribute(attribute spec.Attribute, value interface{}) error + + // Set an extension attribute. + // + // The value can either be the correct golang type for the attribute, or a canonical + // string encoding, or nil. If value is nil, then the extension should be deleted. + // See package types to perform the needed conversions. + SetExtension(name string, value interface{}) error +} + +// BinaryWriter is used to visit a binary Message and generate a new representation. +// +// Protocols that supports binary encoding should implement this interface to implement direct +// binary to binary encoding and event to binary encoding. +// +// Start() and End() methods must be invoked by the caller of Message.ReadBinary() every time +// the BinaryWriter implementation is used to visit a Message. +type BinaryWriter interface { + MessageMetadataWriter + + // Method invoked at the beginning of the visit. Useful to perform initial memory allocations + Start(ctx context.Context) error + + // SetData receives an io.Reader for the data attribute. + // io.Reader is not invoked when the data attribute is empty + SetData(data io.Reader) error + + // End method is invoked only after the whole encoding process ends successfully. + // If it fails, it's never invoked. It can be used to finalize the message. + End(ctx context.Context) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go new file mode 100644 index 000000000..ff92f6836 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go @@ -0,0 +1,66 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package binding defines interfaces for protocol bindings. + +NOTE: Most applications that emit or consume events should use the ../client +package, which provides a simpler API to the underlying binding. + +The interfaces in this package provide extra encoding and protocol information +to allow efficient forwarding and end-to-end reliable delivery between a +Receiver and a Sender belonging to different bindings. This is useful for +intermediary applications that route or forward events, but not necessary for +most "endpoint" applications that emit or consume events. + +# Protocol Bindings + +A protocol binding usually implements a Message, a Sender and Receiver, a StructuredWriter and a BinaryWriter (depending on the supported encodings of the protocol) and an Write[ProtocolMessage] method. + +# Read and write events + +The core of this package is the binding.Message interface. +Through binding.MessageReader It defines how to read a protocol specific message for an +encoded event in structured mode or binary mode. +The entity who receives a protocol specific data structure representing a message +(e.g. an HttpRequest) encapsulates it in a binding.Message implementation using a NewMessage method (e.g. http.NewMessage). +Then the entity that wants to send the binding.Message back on the wire, +translates it back to the protocol specific data structure (e.g. a Kafka ConsumerMessage), using +the writers BinaryWriter and StructuredWriter specific to that protocol. +Binding implementations exposes their writers +through a specific Write[ProtocolMessage] function (e.g. kafka.EncodeProducerMessage), +in order to simplify the encoding process. + +The encoding process can be customized in order to mutate the final result with binding.TransformerFactory. +A bunch of these are provided directly by the binding/transformer module. + +Usually binding.Message implementations can be encoded only one time, because the encoding process drain the message itself. +In order to consume a message several times, the binding/buffering package provides several APIs to buffer the Message. + +A message can be converted to an event.Event using binding.ToEvent() method. +An event.Event can be used as Message casting it to binding.EventMessage. + +In order to simplify the encoding process for each protocol, this package provide several utility methods like binding.Write and binding.DirectWrite. +The binding.Write method tries to preserve the structured/binary encoding, in order to be as much efficient as possible. + +Messages can be eventually wrapped to change their behaviours and binding their lifecycle, like the binding.FinishMessage. +Every Message wrapper implements the MessageWrapper interface + +# Sender and Receiver + +A Receiver receives protocol specific messages and wraps them to into binding.Message implementations. + +A Sender converts arbitrary Message implementations to a protocol-specific form using the protocol specific Write method +and sends them. + +Message and ExactlyOnceMessage provide methods to allow acknowledgments to +propagate when a reliable messages is forwarded from a Receiver to a Sender. +QoS 0 (unreliable), 1 (at-least-once) and 2 (exactly-once) are supported. + +# Transport + +A binding implementation providing Sender and Receiver implementations can be used as a Transport through the BindingTransport adapter. +*/ +package binding diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go new file mode 100644 index 000000000..bb8f91424 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go @@ -0,0 +1,50 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import "errors" + +// Encoding enum specifies the type of encodings supported by binding interfaces +type Encoding int + +const ( + // Binary encoding as specified in https://github.com/cloudevents/spec/blob/main/cloudevents/spec.md#message + EncodingBinary Encoding = iota + // Structured encoding as specified in https://github.com/cloudevents/spec/blob/main/cloudevents/spec.md#message + EncodingStructured + // Message is an instance of EventMessage or it contains EventMessage nested (through MessageWrapper) + EncodingEvent + // When the encoding is unknown (which means that the message is a non-event) + EncodingUnknown + + // EncodingBatch is an instance of JSON Batched Events + EncodingBatch +) + +func (e Encoding) String() string { + switch e { + case EncodingBinary: + return "binary" + case EncodingStructured: + return "structured" + case EncodingEvent: + return "event" + case EncodingBatch: + return "batch" + case EncodingUnknown: + return "unknown" + } + return "" +} + +// ErrUnknownEncoding specifies that the Message is not an event or it is encoded with an unknown encoding +var ErrUnknownEncoding = errors.New("unknown Message encoding") + +// ErrNotStructured returned by Message.Structured for non-structured messages. +var ErrNotStructured = errors.New("message is not in structured mode") + +// ErrNotBinary returned by Message.Binary for non-binary messages. +var ErrNotBinary = errors.New("message is not in binary mode") diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go new file mode 100644 index 000000000..83d613af4 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go @@ -0,0 +1,110 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "bytes" + "context" + + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/event" +) + +type eventFormatKey int + +const ( + formatEventStructured eventFormatKey = iota +) + +// EventMessage type-converts a event.Event object to implement Message. +// This allows local event.Event objects to be sent directly via Sender.Send() +// +// s.Send(ctx, binding.EventMessage(e)) +// +// When an event is wrapped into a EventMessage, the original event could be +// potentially mutated. If you need to use the Event again, after wrapping it into +// an Event message, you should copy it before +type EventMessage event.Event + +func ToMessage(e *event.Event) Message { + return (*EventMessage)(e) +} + +func (m *EventMessage) ReadEncoding() Encoding { + return EncodingEvent +} + +func (m *EventMessage) ReadStructured(ctx context.Context, builder StructuredWriter) error { + f := GetOrDefaultFromCtx(ctx, formatEventStructured, format.JSON).(format.Format) + b, err := f.Marshal((*event.Event)(m)) + if err != nil { + return err + } + return builder.SetStructuredEvent(ctx, f, bytes.NewReader(b)) +} + +func (m *EventMessage) ReadBinary(ctx context.Context, b BinaryWriter) (err error) { + err = eventContextToBinaryWriter(m.Context, b) + if err != nil { + return err + } + // Pass the body + body := (*event.Event)(m).Data() + if len(body) > 0 { + err = b.SetData(bytes.NewBuffer(body)) + if err != nil { + return err + } + } + return nil +} + +func (m *EventMessage) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) { + sv := spec.VS.Version(m.Context.GetSpecVersion()) + a := sv.AttributeFromKind(k) + if a != nil { + return a, a.Get(m.Context) + } + return nil, nil +} + +func (m *EventMessage) GetExtension(name string) interface{} { + ext, _ := m.Context.GetExtension(name) + return ext +} + +func eventContextToBinaryWriter(c event.EventContext, b BinaryWriter) (err error) { + // Pass all attributes + sv := spec.VS.Version(c.GetSpecVersion()) + for _, a := range sv.Attributes() { + value := a.Get(c) + if value != nil { + err = b.SetAttribute(a, value) + } + if err != nil { + return err + } + } + // Pass all extensions + for k, v := range c.GetExtensions() { + err = b.SetExtension(k, v) + if err != nil { + return err + } + } + return nil +} + +func (*EventMessage) Finish(error) error { return nil } + +var _ Message = (*EventMessage)(nil) // Test it conforms to the interface +var _ MessageMetadataReader = (*EventMessage)(nil) // Test it conforms to the interface + +// UseFormatForEvent configures which format to use when marshalling the event to structured mode +func UseFormatForEvent(ctx context.Context, f format.Format) context.Context { + return context.WithValue(ctx, formatEventStructured, f) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go new file mode 100644 index 000000000..8b51c4c61 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go @@ -0,0 +1,42 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import "github.com/cloudevents/sdk-go/v2/binding/spec" + +type finishMessage struct { + Message + finish func(error) +} + +func (m *finishMessage) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) { + return m.Message.(MessageMetadataReader).GetAttribute(k) +} + +func (m *finishMessage) GetExtension(s string) interface{} { + return m.Message.(MessageMetadataReader).GetExtension(s) +} + +func (m *finishMessage) GetWrappedMessage() Message { + return m.Message +} + +func (m *finishMessage) Finish(err error) error { + err2 := m.Message.Finish(err) // Finish original message first + if m.finish != nil { + m.finish(err) // Notify callback + } + return err2 +} + +var _ MessageWrapper = (*finishMessage)(nil) + +// WithFinish returns a wrapper for m that calls finish() and +// m.Finish() in its Finish(). +// Allows code to be notified when a message is Finished. +func WithFinish(m Message, finish func(error)) Message { + return &finishMessage{Message: m, finish: finish} +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go new file mode 100644 index 000000000..54c3f1a8c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go @@ -0,0 +1,12 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package format formats structured events. + +The "application/cloudevents+json" format is built-in and always +available. Other formats may be added. +*/ +package format diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go new file mode 100644 index 000000000..6bdd1842b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go @@ -0,0 +1,105 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package format + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + + "github.com/cloudevents/sdk-go/v2/event" +) + +// Format marshals and unmarshals structured events to bytes. +type Format interface { + // MediaType identifies the format + MediaType() string + // Marshal event to bytes + Marshal(*event.Event) ([]byte, error) + // Unmarshal bytes to event + Unmarshal([]byte, *event.Event) error +} + +// Prefix for event-format media types. +const Prefix = "application/cloudevents" + +// IsFormat returns true if mediaType begins with "application/cloudevents" +func IsFormat(mediaType string) bool { return strings.HasPrefix(mediaType, Prefix) } + +// JSON is the built-in "application/cloudevents+json" format. +var JSON = jsonFmt{} + +type jsonFmt struct{} + +func (jsonFmt) MediaType() string { return event.ApplicationCloudEventsJSON } + +func (jsonFmt) Marshal(e *event.Event) ([]byte, error) { return json.Marshal(e) } +func (jsonFmt) Unmarshal(b []byte, e *event.Event) error { + return json.Unmarshal(b, e) +} + +// JSONBatch is the built-in "application/cloudevents-batch+json" format. +var JSONBatch = jsonBatchFmt{} + +type jsonBatchFmt struct{} + +func (jb jsonBatchFmt) MediaType() string { + return event.ApplicationCloudEventsBatchJSON +} + +// Marshal will return an error for jsonBatchFmt since the Format interface doesn't support batch Marshalling, and we +// know it's structured batch json, we'll go direct to the json.UnMarshall() (see `ToEvents()`) since that is the best +// way to support batch operations for now. +func (jb jsonBatchFmt) Marshal(e *event.Event) ([]byte, error) { + return nil, errors.New("not supported for batch events") +} + +func (jb jsonBatchFmt) Unmarshal(b []byte, e *event.Event) error { + return errors.New("not supported for batch events") +} + +// built-in formats +var formats map[string]Format + +func init() { + formats = map[string]Format{} + Add(JSON) + Add(JSONBatch) +} + +// Lookup returns the format for contentType, or nil if not found. +func Lookup(contentType string) Format { + i := strings.IndexRune(contentType, ';') + if i == -1 { + i = len(contentType) + } + contentType = strings.TrimSpace(strings.ToLower(contentType[0:i])) + return formats[contentType] +} + +func unknown(mediaType string) error { + return fmt.Errorf("unknown event format media-type %#v", mediaType) +} + +// Add a new Format. It can be retrieved by Lookup(f.MediaType()) +func Add(f Format) { formats[f.MediaType()] = f } + +// Marshal an event to bytes using the mediaType event format. +func Marshal(mediaType string, e *event.Event) ([]byte, error) { + if f := formats[mediaType]; f != nil { + return f.Marshal(e) + } + return nil, unknown(mediaType) +} + +// Unmarshal bytes to an event using the mediaType event format. +func Unmarshal(mediaType string, b []byte, e *event.Event) error { + if f := formats[mediaType]; f != nil { + return f.Unmarshal(b, e) + } + return unknown(mediaType) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go new file mode 100644 index 000000000..2fb136c62 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go @@ -0,0 +1,153 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/binding/spec" +) + +// MessageReader defines the read-related portion of the Message interface. +// +// The ReadStructured and ReadBinary methods allows to perform an optimized encoding of a Message to a specific data structure. +// +// If MessageReader.ReadEncoding() can be equal to EncodingBinary, then the implementation of MessageReader +// MUST also implement MessageMetadataReader. +// +// A Sender should try each method of interest and fall back to binding.ToEvent() if none are supported. +// An out of the box algorithm is provided for writing a message: binding.Write(). +type MessageReader interface { + // Return the type of the message Encoding. + // The encoding should be preferably computed when the message is constructed. + ReadEncoding() Encoding + + // ReadStructured transfers a structured-mode event to a StructuredWriter. + // It must return ErrNotStructured if message is not in structured mode. + // + // Returns a different err if something wrong happened while trying to read the structured event. + // In this case, the caller must Finish the message with appropriate error. + // + // This allows Senders to avoid re-encoding messages that are + // already in suitable structured form. + ReadStructured(context.Context, StructuredWriter) error + + // ReadBinary transfers a binary-mode event to an BinaryWriter. + // It must return ErrNotBinary if message is not in binary mode. + // + // The implementation of ReadBinary must not control the lifecycle with BinaryWriter.Start() and BinaryWriter.End(), + // because the caller must control the lifecycle. + // + // Returns a different err if something wrong happened while trying to read the binary event + // In this case, the caller must Finish the message with appropriate error + // + // This allows Senders to avoid re-encoding messages that are + // already in suitable binary form. + ReadBinary(context.Context, BinaryWriter) error +} + +// MessageMetadataReader defines how to read metadata from a binary/event message +// +// If a message implementing MessageReader is encoded as binary (MessageReader.ReadEncoding() == EncodingBinary) +// or it's an EventMessage, then it's safe to assume that it also implements this interface +type MessageMetadataReader interface { + // GetAttribute returns: + // + // * attribute, value: if the message contains an attribute of that attribute kind + // * attribute, nil: if the message spec version supports the attribute kind, but doesn't have any value + // * nil, nil: if the message spec version doesn't support the attribute kind + GetAttribute(attributeKind spec.Kind) (spec.Attribute, interface{}) + // GetExtension returns the value of that extension, if any. + GetExtension(name string) interface{} +} + +// Message is the interface to a binding-specific message containing an event. +// +// # Reliable Delivery +// +// There are 3 reliable qualities of service for messages: +// +// 0/at-most-once/unreliable: messages can be dropped silently. +// +// 1/at-least-once: messages are not dropped without signaling an error +// to the sender, but they may be duplicated in the event of a re-send. +// +// 2/exactly-once: messages are never dropped (without error) or +// duplicated, as long as both sending and receiving ends maintain +// some binding-specific delivery state. Whether this is persisted +// depends on the configuration of the binding implementations. +// +// The Message interface supports QoS 0 and 1, the ExactlyOnceMessage interface +// supports QoS 2 +// +// Message includes the MessageReader interface to read messages. Every binding.Message implementation *must* specify if the message can be accessed one or more times. +// +// When a Message can be forgotten by the entity who produced the message, Message.Finish() *must* be invoked. +type Message interface { + MessageReader + + // Finish *must* be called when message from a Receiver can be forgotten by + // the receiver. A QoS 1 sender should not call Finish() until it gets an acknowledgment of + // receipt on the underlying transport. For QoS 2 see ExactlyOnceMessage. + // + // Note that, depending on the Message implementation, forgetting to Finish the message + // could produce memory/resources leaks! + // + // Passing a non-nil err indicates sending or processing failed. + // A non-nil return indicates that the message was not accepted + // by the receivers peer. + Finish(error) error +} + +// ExactlyOnceMessage is implemented by received Messages +// that support QoS 2. Only transports that support QoS 2 need to +// implement or use this interface. +type ExactlyOnceMessage interface { + Message + + // Received is called by a forwarding QoS2 Sender when it gets + // acknowledgment of receipt (e.g. AMQP 'accept' or MQTT PUBREC) + // + // The receiver must call settle(nil) when it get's the ack-of-ack + // (e.g. AMQP 'settle' or MQTT PUBCOMP) or settle(err) if the + // transfer fails. + // + // Finally the Sender calls Finish() to indicate the message can be + // discarded. + // + // If sending fails, or if the sender does not support QoS 2, then + // Finish() may be called without any call to Received() + Received(settle func(error)) +} + +// MessageContext interface exposes the internal context that a message might contain +// Only some Message implementations implement this interface. +type MessageContext interface { + // Get the context associated with this message + Context() context.Context +} + +// MessageWrapper interface is used to walk through a decorated Message and unwrap it. +type MessageWrapper interface { + Message + MessageMetadataReader + + // Method to get the wrapped message + GetWrappedMessage() Message +} + +func UnwrapMessage(message Message) Message { + m := message + for m != nil { + switch mt := m.(type) { + case MessageWrapper: + m = mt.GetWrappedMessage() + default: + return m + } + } + return m +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go new file mode 100644 index 000000000..3c3021d46 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go @@ -0,0 +1,141 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package spec + +import ( + "fmt" + "time" + + "github.com/cloudevents/sdk-go/v2/event" + + "github.com/cloudevents/sdk-go/v2/types" +) + +// Kind is a version-independent identifier for a CloudEvent context attribute. +type Kind uint8 + +const ( + // Required cloudevents attributes + ID Kind = iota + Source + SpecVersion + Type + // Optional cloudevents attributes + DataContentType + DataSchema + Subject + Time +) +const nAttrs = int(Time) + 1 + +var kindNames = [nAttrs]string{ + "id", + "source", + "specversion", + "type", + "datacontenttype", + "dataschema", + "subject", + "time", +} + +// String is a human-readable string, for a valid attribute name use Attribute.Name +func (k Kind) String() string { return kindNames[k] } + +// IsRequired returns true for attributes defined as "required" by the CE spec. +func (k Kind) IsRequired() bool { return k < DataContentType } + +// Attribute is a named attribute accessor. +// The attribute name is specific to a Version. +type Attribute interface { + Kind() Kind + // Name of the attribute with respect to the current spec Version() with prefix + PrefixedName() string + // Name of the attribute with respect to the current spec Version() + Name() string + // Version of the spec that this attribute belongs to + Version() Version + // Get the value of this attribute from an event context + Get(event.EventContextReader) interface{} + // Set the value of this attribute on an event context + Set(event.EventContextWriter, interface{}) error + // Delete this attribute from and event context, when possible + Delete(event.EventContextWriter) error +} + +// accessor provides Kind, Get, Set. +type accessor interface { + Kind() Kind + Get(event.EventContextReader) interface{} + Set(event.EventContextWriter, interface{}) error + Delete(event.EventContextWriter) error +} + +var acc = [nAttrs]accessor{ + &aStr{aKind(ID), event.EventContextReader.GetID, event.EventContextWriter.SetID}, + &aStr{aKind(Source), event.EventContextReader.GetSource, event.EventContextWriter.SetSource}, + &aStr{aKind(SpecVersion), event.EventContextReader.GetSpecVersion, func(writer event.EventContextWriter, s string) error { return nil }}, + &aStr{aKind(Type), event.EventContextReader.GetType, event.EventContextWriter.SetType}, + &aStr{aKind(DataContentType), event.EventContextReader.GetDataContentType, event.EventContextWriter.SetDataContentType}, + &aStr{aKind(DataSchema), event.EventContextReader.GetDataSchema, event.EventContextWriter.SetDataSchema}, + &aStr{aKind(Subject), event.EventContextReader.GetSubject, event.EventContextWriter.SetSubject}, + &aTime{aKind(Time), event.EventContextReader.GetTime, event.EventContextWriter.SetTime}, +} + +// aKind implements Kind() +type aKind Kind + +func (kind aKind) Kind() Kind { return Kind(kind) } + +type aStr struct { + aKind + get func(event.EventContextReader) string + set func(event.EventContextWriter, string) error +} + +func (a *aStr) Get(c event.EventContextReader) interface{} { + if s := a.get(c); s != "" { + return s + } + return nil // Treat blank as missing +} + +func (a *aStr) Set(c event.EventContextWriter, v interface{}) error { + s, err := types.ToString(v) + if err != nil { + return fmt.Errorf("invalid value for %s: %#v", a.Kind(), v) + } + return a.set(c, s) +} + +func (a *aStr) Delete(c event.EventContextWriter) error { + return a.set(c, "") +} + +type aTime struct { + aKind + get func(event.EventContextReader) time.Time + set func(event.EventContextWriter, time.Time) error +} + +func (a *aTime) Get(c event.EventContextReader) interface{} { + if v := a.get(c); !v.IsZero() { + return v + } + return nil // Treat zero time as missing. +} + +func (a *aTime) Set(c event.EventContextWriter, v interface{}) error { + t, err := types.ToTime(v) + if err != nil { + return fmt.Errorf("invalid value for %s: %#v", a.Kind(), v) + } + return a.set(c, t) +} + +func (a *aTime) Delete(c event.EventContextWriter) error { + return a.set(c, time.Time{}) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go new file mode 100644 index 000000000..da5bc9f85 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go @@ -0,0 +1,12 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package spec provides spec-version metadata. + +For use by code that maps events using (prefixed) attribute name strings. +Supports handling multiple spec versions uniformly. +*/ +package spec diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go new file mode 100644 index 000000000..110787ddc --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go @@ -0,0 +1,81 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package spec + +import ( + "github.com/cloudevents/sdk-go/v2/event" +) + +type matchExactVersion struct { + version +} + +func (v *matchExactVersion) Attribute(name string) Attribute { return v.attrMap[name] } + +var _ Version = (*matchExactVersion)(nil) + +func newMatchExactVersionVersion( + prefix string, + attributeNameMatchMapper func(string) string, + context event.EventContext, + convert func(event.EventContextConverter) event.EventContext, + attrs ...*attribute, +) *matchExactVersion { + v := &matchExactVersion{ + version: version{ + prefix: prefix, + context: context, + convert: convert, + attrMap: map[string]Attribute{}, + attrs: make([]Attribute, len(attrs)), + }, + } + for i, a := range attrs { + a.version = v + v.attrs[i] = a + v.attrMap[attributeNameMatchMapper(a.name)] = a + } + return v +} + +// WithPrefixMatchExact returns a set of versions with prefix added to all attribute names. +func WithPrefixMatchExact(attributeNameMatchMapper func(string) string, prefix string) *Versions { + attr := func(name string, kind Kind) *attribute { + return &attribute{accessor: acc[kind], name: name} + } + vs := &Versions{ + m: map[string]Version{}, + prefix: prefix, + all: []Version{ + newMatchExactVersionVersion(prefix, attributeNameMatchMapper, event.EventContextV1{}.AsV1(), + func(c event.EventContextConverter) event.EventContext { return c.AsV1() }, + attr("id", ID), + attr("source", Source), + attr("specversion", SpecVersion), + attr("type", Type), + attr("datacontenttype", DataContentType), + attr("dataschema", DataSchema), + attr("subject", Subject), + attr("time", Time), + ), + newMatchExactVersionVersion(prefix, attributeNameMatchMapper, event.EventContextV03{}.AsV03(), + func(c event.EventContextConverter) event.EventContext { return c.AsV03() }, + attr("specversion", SpecVersion), + attr("type", Type), + attr("source", Source), + attr("schemaurl", DataSchema), + attr("subject", Subject), + attr("id", ID), + attr("time", Time), + attr("datacontenttype", DataContentType), + ), + }, + } + for _, v := range vs.all { + vs.m[v.String()] = v + } + return vs +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go new file mode 100644 index 000000000..7fa0f5840 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go @@ -0,0 +1,189 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package spec + +import ( + "strings" + + "github.com/cloudevents/sdk-go/v2/event" +) + +// Version provides meta-data for a single spec-version. +type Version interface { + // String name of the version, e.g. "1.0" + String() string + // Prefix for attribute names. + Prefix() string + // Attribute looks up a prefixed attribute name (case insensitive). + // Returns nil if not found. + Attribute(prefixedName string) Attribute + // Attribute looks up the attribute from kind. + // Returns nil if not found. + AttributeFromKind(kind Kind) Attribute + // Attributes returns all the context attributes for this version. + Attributes() []Attribute + // Convert translates a context to this version. + Convert(event.EventContextConverter) event.EventContext + // NewContext returns a new context for this version. + NewContext() event.EventContext + // SetAttribute sets named attribute to value. + // + // Name is case insensitive. + // Does nothing if name does not start with prefix. + SetAttribute(context event.EventContextWriter, name string, value interface{}) error +} + +// Versions contains all known versions with the same attribute prefix. +type Versions struct { + prefix string + all []Version + m map[string]Version +} + +// Versions returns the list of all known versions, most recent first. +func (vs *Versions) Versions() []Version { return vs.all } + +// Version returns the named version. +func (vs *Versions) Version(name string) Version { + return vs.m[name] +} + +// Latest returns the latest Version +func (vs *Versions) Latest() Version { return vs.all[0] } + +// PrefixedSpecVersionName returns the specversion attribute PrefixedName +func (vs *Versions) PrefixedSpecVersionName() string { return vs.prefix + "specversion" } + +// Prefix is the lowercase attribute name prefix. +func (vs *Versions) Prefix() string { return vs.prefix } + +type attribute struct { + accessor + name string + version Version +} + +func (a *attribute) PrefixedName() string { return a.version.Prefix() + a.name } +func (a *attribute) Name() string { return a.name } +func (a *attribute) Version() Version { return a.version } + +type version struct { + prefix string + context event.EventContext + convert func(event.EventContextConverter) event.EventContext + attrMap map[string]Attribute + attrs []Attribute +} + +func (v *version) Attribute(name string) Attribute { return v.attrMap[strings.ToLower(name)] } +func (v *version) Attributes() []Attribute { return v.attrs } +func (v *version) String() string { return v.context.GetSpecVersion() } +func (v *version) Prefix() string { return v.prefix } +func (v *version) NewContext() event.EventContext { return v.context.Clone() } + +// HasPrefix is a case-insensitive prefix check. +func (v *version) HasPrefix(name string) bool { + return strings.HasPrefix(strings.ToLower(name), v.prefix) +} + +func (v *version) Convert(c event.EventContextConverter) event.EventContext { return v.convert(c) } + +func (v *version) SetAttribute(c event.EventContextWriter, name string, value interface{}) error { + if a := v.Attribute(name); a != nil { // Standard attribute + return a.Set(c, value) + } + name = strings.ToLower(name) + var err error + if v.HasPrefix(name) { // Extension attribute + return c.SetExtension(strings.TrimPrefix(name, v.prefix), value) + } + return err +} + +func (v *version) AttributeFromKind(kind Kind) Attribute { + for _, a := range v.Attributes() { + if a.Kind() == kind { + return a + } + } + return nil +} + +func newVersion( + prefix string, + context event.EventContext, + convert func(event.EventContextConverter) event.EventContext, + attrs ...*attribute, +) *version { + v := &version{ + prefix: strings.ToLower(prefix), + context: context, + convert: convert, + attrMap: map[string]Attribute{}, + attrs: make([]Attribute, len(attrs)), + } + for i, a := range attrs { + a.version = v + v.attrs[i] = a + v.attrMap[strings.ToLower(a.PrefixedName())] = a + } + return v +} + +// WithPrefix returns a set of versions with prefix added to all attribute names. +func WithPrefix(prefix string) *Versions { + attr := func(name string, kind Kind) *attribute { + return &attribute{accessor: acc[kind], name: name} + } + vs := &Versions{ + m: map[string]Version{}, + prefix: prefix, + all: []Version{ + newVersion(prefix, event.EventContextV1{}.AsV1(), + func(c event.EventContextConverter) event.EventContext { return c.AsV1() }, + attr("id", ID), + attr("source", Source), + attr("specversion", SpecVersion), + attr("type", Type), + attr("datacontenttype", DataContentType), + attr("dataschema", DataSchema), + attr("subject", Subject), + attr("time", Time), + ), + newVersion(prefix, event.EventContextV03{}.AsV03(), + func(c event.EventContextConverter) event.EventContext { return c.AsV03() }, + attr("specversion", SpecVersion), + attr("type", Type), + attr("source", Source), + attr("schemaurl", DataSchema), + attr("subject", Subject), + attr("id", ID), + attr("time", Time), + attr("datacontenttype", DataContentType), + ), + }, + } + for _, v := range vs.all { + vs.m[v.String()] = v + } + return vs +} + +// New returns a set of versions +func New() *Versions { return WithPrefix("") } + +// Built-in un-prefixed versions. +var ( + VS *Versions + V03 Version + V1 Version +) + +func init() { + VS = New() + V03 = VS.Version("0.3") + V1 = VS.Version("1.0") +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go new file mode 100644 index 000000000..60256f2b3 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go @@ -0,0 +1,22 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "context" + "io" + + "github.com/cloudevents/sdk-go/v2/binding/format" +) + +// StructuredWriter is used to visit a structured Message and generate a new representation. +// +// Protocols that supports structured encoding should implement this interface to implement direct +// structured to structured encoding and event to structured encoding. +type StructuredWriter interface { + // Event receives an io.Reader for the whole event. + SetStructuredEvent(ctx context.Context, format format.Format, event io.Reader) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go new file mode 100644 index 000000000..d3332c158 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go @@ -0,0 +1,153 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/types" +) + +// ErrCannotConvertToEvent is a generic error when a conversion of a Message to an Event fails +var ErrCannotConvertToEvent = errors.New("cannot convert message to event") + +// ErrCannotConvertToEvents is a generic error when a conversion of a Message to a Batched Event fails +var ErrCannotConvertToEvents = errors.New("cannot convert message to batched events") + +// ToEvent translates a Message with a valid Structured or Binary representation to an Event. +// This function returns the Event generated from the Message and the original encoding of the message or +// an error that points the conversion error. +// transformers can be nil and this function guarantees that they are invoked only once during the encoding process. +func ToEvent(ctx context.Context, message MessageReader, transformers ...Transformer) (*event.Event, error) { + if message == nil { + return nil, nil + } + + messageEncoding := message.ReadEncoding() + if messageEncoding == EncodingEvent { + m := message + for m != nil { + switch mt := m.(type) { + case *EventMessage: + e := (*event.Event)(mt) + return e, Transformers(transformers).Transform(mt, (*messageToEventBuilder)(e)) + case MessageWrapper: + m = mt.GetWrappedMessage() + default: + break + } + } + return nil, ErrCannotConvertToEvent + } + + e := event.New() + encoder := (*messageToEventBuilder)(&e) + _, err := DirectWrite( + context.Background(), + message, + encoder, + encoder, + ) + if err != nil { + return nil, err + } + return &e, Transformers(transformers).Transform((*EventMessage)(&e), encoder) +} + +// ToEvents translates a Batch Message and corresponding Reader data to a slice of Events. +// This function returns the Events generated from the body data, or an error that points +// to the conversion issue. +func ToEvents(ctx context.Context, message MessageReader, body io.Reader) ([]event.Event, error) { + messageEncoding := message.ReadEncoding() + if messageEncoding != EncodingBatch { + return nil, ErrCannotConvertToEvents + } + + // Since Format doesn't support batch Marshalling, and we know it's structured batch json, we'll go direct to the + // json.UnMarshall(), since that is the best way to support batch operations for now. + var events []event.Event + return events, json.NewDecoder(body).Decode(&events) +} + +type messageToEventBuilder event.Event + +var _ StructuredWriter = (*messageToEventBuilder)(nil) +var _ BinaryWriter = (*messageToEventBuilder)(nil) + +func (b *messageToEventBuilder) SetStructuredEvent(ctx context.Context, format format.Format, ev io.Reader) error { + var buf bytes.Buffer + _, err := io.Copy(&buf, ev) + if err != nil { + return err + } + return format.Unmarshal(buf.Bytes(), (*event.Event)(b)) +} + +func (b *messageToEventBuilder) Start(ctx context.Context) error { + return nil +} + +func (b *messageToEventBuilder) End(ctx context.Context) error { + return nil +} + +func (b *messageToEventBuilder) SetData(data io.Reader) error { + buf, ok := data.(*bytes.Buffer) + if !ok { + buf = new(bytes.Buffer) + _, err := io.Copy(buf, data) + if err != nil { + return err + } + } + if buf.Len() > 0 { + b.DataEncoded = buf.Bytes() + } + return nil +} + +func (b *messageToEventBuilder) SetAttribute(attribute spec.Attribute, value interface{}) error { + if value == nil { + _ = attribute.Delete(b.Context) + return nil + } + // If spec version we need to change to right context struct + if attribute.Kind() == spec.SpecVersion { + str, err := types.ToString(value) + if err != nil { + return err + } + switch str { + case event.CloudEventsVersionV03: + b.Context = b.Context.AsV03() + case event.CloudEventsVersionV1: + b.Context = b.Context.AsV1() + default: + return fmt.Errorf("unrecognized event version %s", str) + } + return nil + } + return attribute.Set(b.Context, value) +} + +func (b *messageToEventBuilder) SetExtension(name string, value interface{}) error { + if value == nil { + return b.Context.SetExtension(name, nil) + } + value, err := types.Validate(value) + if err != nil { + return err + } + return b.Context.SetExtension(name, value) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go new file mode 100644 index 000000000..de3bec44f --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go @@ -0,0 +1,42 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +// Transformer is an interface that implements a transformation +// process while transferring the event from the Message +// implementation to the provided encoder +// +// When a write function (binding.Write, binding.ToEvent, buffering.CopyMessage, etc.) +// takes Transformer(s) as parameter, it eventually converts the message to a form +// which correctly implements MessageMetadataReader, in order to guarantee that transformation +// is applied +type Transformer interface { + Transform(MessageMetadataReader, MessageMetadataWriter) error +} + +// TransformerFunc is a type alias to implement a Transformer through a function pointer +type TransformerFunc func(MessageMetadataReader, MessageMetadataWriter) error + +func (t TransformerFunc) Transform(r MessageMetadataReader, w MessageMetadataWriter) error { + return t(r, w) +} + +var _ Transformer = (TransformerFunc)(nil) + +// Transformers is a utility alias to run several Transformer +type Transformers []Transformer + +func (t Transformers) Transform(r MessageMetadataReader, w MessageMetadataWriter) error { + for _, transformer := range t { + err := transformer.Transform(r, w) + if err != nil { + return err + } + } + return nil +} + +var _ Transformer = (Transformers)(nil) diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go new file mode 100644 index 000000000..cb498e62d --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go @@ -0,0 +1,179 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/event" +) + +type eventEncodingKey int + +const ( + skipDirectStructuredEncoding eventEncodingKey = iota + skipDirectBinaryEncoding + preferredEventEncoding +) + +// DirectWrite invokes the encoders. structuredWriter and binaryWriter could be nil if the protocol doesn't support it. +// transformers can be nil and this function guarantees that they are invoked only once during the encoding process. +// This function MUST be invoked only if message.ReadEncoding() == EncodingBinary or message.ReadEncoding() == EncodingStructured +// +// Returns: +// * EncodingStructured, nil if message is correctly encoded in structured encoding +// * EncodingBinary, nil if message is correctly encoded in binary encoding +// * EncodingStructured, err if message was structured but error happened during the encoding +// * EncodingBinary, err if message was binary but error happened during the encoding +// * EncodingUnknown, ErrUnknownEncoding if message is not a structured or a binary Message +func DirectWrite( + ctx context.Context, + message MessageReader, + structuredWriter StructuredWriter, + binaryWriter BinaryWriter, + transformers ...Transformer, +) (Encoding, error) { + if structuredWriter != nil && len(transformers) == 0 && !GetOrDefaultFromCtx(ctx, skipDirectStructuredEncoding, false).(bool) { + if err := message.ReadStructured(ctx, structuredWriter); err == nil { + return EncodingStructured, nil + } else if err != ErrNotStructured { + return EncodingStructured, err + } + } + + if binaryWriter != nil && !GetOrDefaultFromCtx(ctx, skipDirectBinaryEncoding, false).(bool) && message.ReadEncoding() == EncodingBinary { + return EncodingBinary, writeBinaryWithTransformer(ctx, message, binaryWriter, transformers) + } + + return EncodingUnknown, ErrUnknownEncoding +} + +// Write executes the full algorithm to encode a Message using transformers: +// 1. It first tries direct encoding using DirectWrite +// 2. If no direct encoding is possible, it uses ToEvent to generate an Event representation +// 3. From the Event, the message is encoded back to the provided structured or binary encoders +// You can tweak the encoding process using the context decorators WithForceStructured, WithForceStructured, etc. +// transformers can be nil and this function guarantees that they are invoked only once during the encoding process. +// Returns: +// * EncodingStructured, nil if message is correctly encoded in structured encoding +// * EncodingBinary, nil if message is correctly encoded in binary encoding +// * EncodingUnknown, ErrUnknownEncoding if message.ReadEncoding() == EncodingUnknown +// * _, err if error happened during the encoding +func Write( + ctx context.Context, + message MessageReader, + structuredWriter StructuredWriter, + binaryWriter BinaryWriter, + transformers ...Transformer, +) (Encoding, error) { + enc := message.ReadEncoding() + var err error + // Skip direct encoding if the event is an event message + if enc != EncodingEvent { + enc, err = DirectWrite(ctx, message, structuredWriter, binaryWriter, transformers...) + if enc != EncodingUnknown { + // Message directly encoded, nothing else to do here + return enc, err + } + } + + var e *event.Event + e, err = ToEvent(ctx, message, transformers...) + if err != nil { + return enc, err + } + + message = (*EventMessage)(e) + + if GetOrDefaultFromCtx(ctx, preferredEventEncoding, EncodingBinary).(Encoding) == EncodingStructured { + if structuredWriter != nil { + return EncodingStructured, message.ReadStructured(ctx, structuredWriter) + } + if binaryWriter != nil { + return EncodingBinary, writeBinary(ctx, message, binaryWriter) + } + } else { + if binaryWriter != nil { + return EncodingBinary, writeBinary(ctx, message, binaryWriter) + } + if structuredWriter != nil { + return EncodingStructured, message.ReadStructured(ctx, structuredWriter) + } + } + + return EncodingUnknown, ErrUnknownEncoding +} + +// WithSkipDirectStructuredEncoding skips direct structured to structured encoding during the encoding process +func WithSkipDirectStructuredEncoding(ctx context.Context, skip bool) context.Context { + return context.WithValue(ctx, skipDirectStructuredEncoding, skip) +} + +// WithSkipDirectBinaryEncoding skips direct binary to binary encoding during the encoding process +func WithSkipDirectBinaryEncoding(ctx context.Context, skip bool) context.Context { + return context.WithValue(ctx, skipDirectBinaryEncoding, skip) +} + +// WithPreferredEventEncoding defines the preferred encoding from event to message during the encoding process +func WithPreferredEventEncoding(ctx context.Context, enc Encoding) context.Context { + return context.WithValue(ctx, preferredEventEncoding, enc) +} + +// WithForceStructured forces structured encoding during the encoding process +func WithForceStructured(ctx context.Context) context.Context { + return context.WithValue(context.WithValue(ctx, preferredEventEncoding, EncodingStructured), skipDirectBinaryEncoding, true) +} + +// WithForceBinary forces binary encoding during the encoding process +func WithForceBinary(ctx context.Context) context.Context { + return context.WithValue(context.WithValue(ctx, preferredEventEncoding, EncodingBinary), skipDirectStructuredEncoding, true) +} + +// GetOrDefaultFromCtx gets a configuration value from the provided context +func GetOrDefaultFromCtx(ctx context.Context, key interface{}, def interface{}) interface{} { + if val := ctx.Value(key); val != nil { + return val + } else { + return def + } +} + +func writeBinaryWithTransformer( + ctx context.Context, + message MessageReader, + binaryWriter BinaryWriter, + transformers Transformers, +) error { + err := binaryWriter.Start(ctx) + if err != nil { + return err + } + err = message.ReadBinary(ctx, binaryWriter) + if err != nil { + return err + } + err = transformers.Transform(message.(MessageMetadataReader), binaryWriter) + if err != nil { + return err + } + return binaryWriter.End(ctx) +} + +func writeBinary( + ctx context.Context, + message MessageReader, + binaryWriter BinaryWriter, +) error { + err := binaryWriter.Start(ctx) + if err != nil { + return err + } + err = message.ReadBinary(ctx, binaryWriter) + if err != nil { + return err + } + return binaryWriter.End(ctx) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client.go new file mode 100644 index 000000000..452304ffd --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client.go @@ -0,0 +1,295 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "errors" + "fmt" + "io" + "runtime" + "sync" + + "go.uber.org/zap" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// Client interface defines the runtime contract the CloudEvents client supports. +type Client interface { + // Send will transmit the given event over the client's configured transport. + Send(ctx context.Context, event event.Event) protocol.Result + + // Request will transmit the given event over the client's configured + // transport and return any response event. + Request(ctx context.Context, event event.Event) (*event.Event, protocol.Result) + + // StartReceiver will register the provided function for callback on receipt + // of a cloudevent. It will also start the underlying protocol as it has + // been configured. + // This call is blocking. + // Valid fn signatures are: + // * func() + // * func() error + // * func(context.Context) + // * func(context.Context) protocol.Result + // * func(event.Event) + // * func(event.Event) protocol.Result + // * func(context.Context, event.Event) + // * func(context.Context, event.Event) protocol.Result + // * func(event.Event) *event.Event + // * func(event.Event) (*event.Event, protocol.Result) + // * func(context.Context, event.Event) *event.Event + // * func(context.Context, event.Event) (*event.Event, protocol.Result) + StartReceiver(ctx context.Context, fn interface{}) error +} + +// New produces a new client with the provided transport object and applied +// client options. +func New(obj interface{}, opts ...Option) (Client, error) { + c := &ceClient{ + // Running runtime.GOMAXPROCS(0) doesn't update the value, just returns the current one + pollGoroutines: runtime.GOMAXPROCS(0), + observabilityService: noopObservabilityService{}, + } + + if p, ok := obj.(protocol.Sender); ok { + c.sender = p + } + if p, ok := obj.(protocol.Requester); ok { + c.requester = p + } + if p, ok := obj.(protocol.Responder); ok { + c.responder = p + } + if p, ok := obj.(protocol.Receiver); ok { + c.receiver = p + } + if p, ok := obj.(protocol.Opener); ok { + c.opener = p + } + + if err := c.applyOptions(opts...); err != nil { + return nil, err + } + return c, nil +} + +type ceClient struct { + sender protocol.Sender + requester protocol.Requester + receiver protocol.Receiver + responder protocol.Responder + // Optional. + opener protocol.Opener + + observabilityService ObservabilityService + + inboundContextDecorators []func(context.Context, binding.Message) context.Context + outboundContextDecorators []func(context.Context) context.Context + invoker Invoker + receiverMu sync.Mutex + eventDefaulterFns []EventDefaulter + pollGoroutines int + blockingCallback bool + ackMalformedEvent bool +} + +func (c *ceClient) applyOptions(opts ...Option) error { + for _, fn := range opts { + if err := fn(c); err != nil { + return err + } + } + return nil +} + +func (c *ceClient) Send(ctx context.Context, e event.Event) protocol.Result { + var err error + if c.sender == nil { + err = errors.New("sender not set") + return err + } + + for _, f := range c.outboundContextDecorators { + ctx = f(ctx) + } + + if len(c.eventDefaulterFns) > 0 { + for _, fn := range c.eventDefaulterFns { + e = fn(ctx, e) + } + } + if err = e.Validate(); err != nil { + return err + } + + // Event has been defaulted and validated, record we are going to perform send. + ctx, cb := c.observabilityService.RecordSendingEvent(ctx, e) + err = c.sender.Send(ctx, (*binding.EventMessage)(&e)) + defer cb(err) + return err +} + +func (c *ceClient) Request(ctx context.Context, e event.Event) (*event.Event, protocol.Result) { + var resp *event.Event + var err error + + if c.requester == nil { + err = errors.New("requester not set") + return nil, err + } + for _, f := range c.outboundContextDecorators { + ctx = f(ctx) + } + + if len(c.eventDefaulterFns) > 0 { + for _, fn := range c.eventDefaulterFns { + e = fn(ctx, e) + } + } + + if err = e.Validate(); err != nil { + return nil, err + } + + // Event has been defaulted and validated, record we are going to perform request. + ctx, cb := c.observabilityService.RecordRequestEvent(ctx, e) + + // If provided a requester, use it to do request/response. + var msg binding.Message + msg, err = c.requester.Request(ctx, (*binding.EventMessage)(&e)) + if msg != nil { + defer func() { + if err := msg.Finish(err); err != nil { + cecontext.LoggerFrom(ctx).Warnw("failed calling message.Finish", zap.Error(err)) + } + }() + } + if protocol.IsUndelivered(err) { + return nil, err + } + + // try to turn msg into an event, it might not work and that is ok. + if rs, rserr := binding.ToEvent(ctx, msg); rserr != nil { + cecontext.LoggerFrom(ctx).Debugw("response: failed calling ToEvent", zap.Error(rserr), zap.Any("resp", msg)) + // If the protocol returns no error, it is an ACK on the request, but we had + // issues turning the response into an event, so make an ACK Result and pass + // down the ToEvent error as well. + err = protocol.NewReceipt(true, "failed to convert response into event: %v\n%w", rserr, err) + } else { + resp = rs + } + defer cb(err, resp) + return resp, err +} + +// StartReceiver sets up the given fn to handle Receive. +// See Client.StartReceiver for details. This is a blocking call. +func (c *ceClient) StartReceiver(ctx context.Context, fn interface{}) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + c.receiverMu.Lock() + defer c.receiverMu.Unlock() + + if c.invoker != nil { + return fmt.Errorf("client already has a receiver") + } + + invoker, err := newReceiveInvoker( + fn, + c.observabilityService, + c.inboundContextDecorators, + c.eventDefaulterFns, + c.ackMalformedEvent, + ) + if err != nil { + return err + } + if invoker.IsReceiver() && c.receiver == nil { + return fmt.Errorf("mismatched receiver callback without protocol.Receiver supported by protocol") + } + if invoker.IsResponder() && c.responder == nil { + return fmt.Errorf("mismatched receiver callback without protocol.Responder supported by protocol") + } + c.invoker = invoker + + if c.responder == nil && c.receiver == nil { + return errors.New("responder nor receiver set") + } + + defer func() { + c.invoker = nil + }() + + // Start Polling. + wg := sync.WaitGroup{} + for i := 0; i < c.pollGoroutines; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + var msg binding.Message + var respFn protocol.ResponseFn + var err error + + if c.responder != nil { + msg, respFn, err = c.responder.Respond(ctx) + } else if c.receiver != nil { + msg, err = c.receiver.Receive(ctx) + respFn = noRespFn + } + + if err == io.EOF { // Normal close + return + } + + if err != nil { + cecontext.LoggerFrom(ctx).Warn("Error while receiving a message: ", err) + continue + } + + callback := func() { + if err := c.invoker.Invoke(ctx, msg, respFn); err != nil { + cecontext.LoggerFrom(ctx).Warn("Error while handling a message: ", err) + } + } + + if c.blockingCallback { + callback() + } else { + // Do not block on the invoker. + wg.Add(1) + go func() { + defer wg.Done() + callback() + }() + } + } + }() + } + + // Start the opener, if set. + if c.opener != nil { + if err = c.opener.OpenInbound(ctx); err != nil { + err = fmt.Errorf("error while opening the inbound connection: %w", err) + cancel() + } + } + + wg.Wait() + + return err +} + +// noRespFn is used to simply forward the protocol.Result for receivers that aren't responders +func noRespFn(_ context.Context, _ binding.Message, r protocol.Result, _ ...binding.Transformer) error { + return r +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go new file mode 100644 index 000000000..d48cc2042 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go @@ -0,0 +1,35 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "github.com/cloudevents/sdk-go/v2/protocol/http" +) + +// NewHTTP provides the good defaults for the common case using an HTTP +// Protocol client. +// The WithTimeNow, and WithUUIDs client options are also applied to the +// client, all outbound events will have a time and id set if not already +// present. +func NewHTTP(opts ...http.Option) (Client, error) { + p, err := http.New(opts...) + if err != nil { + return nil, err + } + + c, err := New(p, WithTimeNow(), WithUUIDs()) + if err != nil { + return nil, err + } + + return c, nil +} + +// NewDefault has been replaced by NewHTTP +// Deprecated. To get the same as NewDefault provided, please use NewHTTP with +// the observability service passed as an option, or client.NewClientHTTP from +// package github.com/cloudevents/sdk-go/observability/opencensus/v2/client +var NewDefault = NewHTTP diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go new file mode 100644 index 000000000..82985b8a7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go @@ -0,0 +1,12 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +// NewObserved produces a new client with the provided transport object and applied +// client options. +// Deprecated: This now has the same behaviour of New, and will be removed in future releases. +// As New, you must provide the observability service to use. +var NewObserved = New diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go b/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go new file mode 100644 index 000000000..7bfebf35c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go @@ -0,0 +1,57 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "time" + + "github.com/cloudevents/sdk-go/v2/event" + + "github.com/google/uuid" +) + +// EventDefaulter is the function signature for extensions that are able +// to perform event defaulting. +type EventDefaulter func(ctx context.Context, event event.Event) event.Event + +// DefaultIDToUUIDIfNotSet will inspect the provided event and assign a UUID to +// context.ID if it is found to be empty. +func DefaultIDToUUIDIfNotSet(ctx context.Context, event event.Event) event.Event { + if event.Context != nil { + if event.ID() == "" { + event.Context = event.Context.Clone() + event.SetID(uuid.New().String()) + } + } + return event +} + +// DefaultTimeToNowIfNotSet will inspect the provided event and assign a new +// Timestamp to context.Time if it is found to be nil or zero. +func DefaultTimeToNowIfNotSet(ctx context.Context, event event.Event) event.Event { + if event.Context != nil { + if event.Time().IsZero() { + event.Context = event.Context.Clone() + event.SetTime(time.Now()) + } + } + return event +} + +// NewDefaultDataContentTypeIfNotSet returns a defaulter that will inspect the +// provided event and set the provided content type if content type is found +// to be empty. +func NewDefaultDataContentTypeIfNotSet(contentType string) EventDefaulter { + return func(ctx context.Context, event event.Event) event.Event { + if event.Context != nil { + if event.DataContentType() == "" { + event.SetDataContentType(contentType) + } + } + return event + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go new file mode 100644 index 000000000..e09962ce6 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go @@ -0,0 +1,11 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package client holds the recommended entry points for interacting with the CloudEvents Golang SDK. The client wraps +a selected transport. The client adds validation and defaulting for sending events, and flexible receiver method +registration. For full details, read the `client.Client` documentation. +*/ +package client diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go b/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go new file mode 100644 index 000000000..672581b58 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go @@ -0,0 +1,45 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + cecontext "github.com/cloudevents/sdk-go/v2/context" + thttp "github.com/cloudevents/sdk-go/v2/protocol/http" + "go.uber.org/zap" + "net/http" +) + +func NewHTTPReceiveHandler(ctx context.Context, p *thttp.Protocol, fn interface{}) (*EventReceiver, error) { + invoker, err := newReceiveInvoker(fn, noopObservabilityService{}, nil, nil, false) //TODO(slinkydeveloper) maybe not nil? + if err != nil { + return nil, err + } + + return &EventReceiver{ + p: p, + invoker: invoker, + }, nil +} + +type EventReceiver struct { + p *thttp.Protocol + invoker Invoker +} + +func (r *EventReceiver) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + // Prepare to handle the message if there's one (context cancellation will ensure this closes) + go func() { + ctx := req.Context() + msg, respFn, err := r.p.Respond(ctx) + if err != nil { + cecontext.LoggerFrom(context.TODO()).Debugw("failed to call Respond", zap.Error(err)) + } else if err := r.invoker.Invoke(ctx, msg, respFn); err != nil { + cecontext.LoggerFrom(context.TODO()).Debugw("failed to call Invoke", zap.Error(err)) + } + }() + r.p.ServeHTTP(rw, req) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go b/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go new file mode 100644 index 000000000..a3080b007 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go @@ -0,0 +1,145 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "fmt" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +type Invoker interface { + Invoke(context.Context, binding.Message, protocol.ResponseFn) error + IsReceiver() bool + IsResponder() bool +} + +var _ Invoker = (*receiveInvoker)(nil) + +func newReceiveInvoker( + fn interface{}, + observabilityService ObservabilityService, + inboundContextDecorators []func(context.Context, binding.Message) context.Context, + fns []EventDefaulter, + ackMalformedEvent bool, +) (Invoker, error) { + r := &receiveInvoker{ + eventDefaulterFns: fns, + observabilityService: observabilityService, + inboundContextDecorators: inboundContextDecorators, + ackMalformedEvent: ackMalformedEvent, + } + + if fn, err := receiver(fn); err != nil { + return nil, err + } else { + r.fn = fn + } + + return r, nil +} + +type receiveInvoker struct { + fn *receiverFn + observabilityService ObservabilityService + eventDefaulterFns []EventDefaulter + inboundContextDecorators []func(context.Context, binding.Message) context.Context + ackMalformedEvent bool +} + +func (r *receiveInvoker) Invoke(ctx context.Context, m binding.Message, respFn protocol.ResponseFn) (err error) { + defer func() { + err = m.Finish(err) + }() + + var respMsg binding.Message + var result protocol.Result + + e, eventErr := binding.ToEvent(ctx, m) + switch { + case eventErr != nil && r.fn.hasEventIn: + r.observabilityService.RecordReceivedMalformedEvent(ctx, eventErr) + return respFn(ctx, nil, protocol.NewReceipt(r.ackMalformedEvent, "failed to convert Message to Event: %w", eventErr)) + case r.fn != nil: + // Check if event is valid before invoking the receiver function + if e != nil { + if validationErr := e.Validate(); validationErr != nil { + r.observabilityService.RecordReceivedMalformedEvent(ctx, validationErr) + return respFn(ctx, nil, protocol.NewReceipt(r.ackMalformedEvent, "validation error in incoming event: %w", validationErr)) + } + } + + // Let's invoke the receiver fn + var resp *event.Event + resp, result = func() (resp *event.Event, result protocol.Result) { + defer func() { + if r := recover(); r != nil { + result = fmt.Errorf("call to Invoker.Invoke(...) has panicked: %v", r) + cecontext.LoggerFrom(ctx).Error(result) + } + }() + ctx = computeInboundContext(m, ctx, r.inboundContextDecorators) + + var cb func(error) + ctx, cb = r.observabilityService.RecordCallingInvoker(ctx, e) + + resp, result = r.fn.invoke(ctx, e) + defer cb(result) + return + }() + + if respFn == nil { + break + } + + // Apply the defaulter chain to the outgoing event. + if resp != nil && len(r.eventDefaulterFns) > 0 { + for _, fn := range r.eventDefaulterFns { + *resp = fn(ctx, *resp) + } + // Validate the event conforms to the CloudEvents Spec. + if vErr := resp.Validate(); vErr != nil { + cecontext.LoggerFrom(ctx).Errorf("cloudevent validation failed on response event: %v", vErr) + } + } + + // because binding.Message is an interface, casting a nil resp + // here would make future comparisons to nil false + if resp != nil { + respMsg = (*binding.EventMessage)(resp) + } + } + + if respFn == nil { + // let the protocol ACK based on the result + return result + } + + return respFn(ctx, respMsg, result) +} + +func (r *receiveInvoker) IsReceiver() bool { + return !r.fn.hasEventOut +} + +func (r *receiveInvoker) IsResponder() bool { + return r.fn.hasEventOut +} + +func computeInboundContext(message binding.Message, fallback context.Context, inboundContextDecorators []func(context.Context, binding.Message) context.Context) context.Context { + result := fallback + if mctx, ok := message.(binding.MessageContext); ok { + result = cecontext.ValuesDelegating(mctx.Context(), fallback) + } + for _, f := range inboundContextDecorators { + result = f(result, message) + } + return result +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go b/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go new file mode 100644 index 000000000..75005d3bb --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go @@ -0,0 +1,54 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/event" +) + +// ObservabilityService is an interface users can implement to record metrics, create tracing spans, and plug other observability tools in the Client +type ObservabilityService interface { + // InboundContextDecorators is a method that returns the InboundContextDecorators that must be mounted in the Client to properly propagate some tracing informations. + InboundContextDecorators() []func(context.Context, binding.Message) context.Context + + // RecordReceivedMalformedEvent is invoked when an event was received but it's malformed or invalid. + RecordReceivedMalformedEvent(ctx context.Context, err error) + // RecordCallingInvoker is invoked before the user function is invoked. + // The returned callback will be invoked after the user finishes to process the event with the eventual processing error + // The error provided to the callback could be both a processing error, or a result + RecordCallingInvoker(ctx context.Context, event *event.Event) (context.Context, func(errOrResult error)) + // RecordSendingEvent is invoked before the event is sent. + // The returned callback will be invoked when the response is received + // The error provided to the callback could be both a processing error, or a result + RecordSendingEvent(ctx context.Context, event event.Event) (context.Context, func(errOrResult error)) + + // RecordRequestEvent is invoked before the event is requested. + // The returned callback will be invoked when the response is received + RecordRequestEvent(ctx context.Context, event event.Event) (context.Context, func(errOrResult error, event *event.Event)) +} + +type noopObservabilityService struct{} + +func (n noopObservabilityService) InboundContextDecorators() []func(context.Context, binding.Message) context.Context { + return nil +} + +func (n noopObservabilityService) RecordReceivedMalformedEvent(ctx context.Context, err error) {} + +func (n noopObservabilityService) RecordCallingInvoker(ctx context.Context, event *event.Event) (context.Context, func(errOrResult error)) { + return ctx, func(errOrResult error) {} +} + +func (n noopObservabilityService) RecordSendingEvent(ctx context.Context, event event.Event) (context.Context, func(errOrResult error)) { + return ctx, func(errOrResult error) {} +} + +func (n noopObservabilityService) RecordRequestEvent(ctx context.Context, e event.Event) (context.Context, func(errOrResult error, event *event.Event)) { + return ctx, func(errOrResult error, event *event.Event) {} +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/options.go b/vendor/github.com/cloudevents/sdk-go/v2/client/options.go new file mode 100644 index 000000000..44394be34 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/options.go @@ -0,0 +1,141 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "fmt" + + "github.com/cloudevents/sdk-go/v2/binding" +) + +// Option is the function signature required to be considered an client.Option. +type Option func(interface{}) error + +// WithEventDefaulter adds an event defaulter to the end of the defaulter chain. +func WithEventDefaulter(fn EventDefaulter) Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + if fn == nil { + return fmt.Errorf("client option was given an nil event defaulter") + } + c.eventDefaulterFns = append(c.eventDefaulterFns, fn) + } + return nil + } +} + +func WithForceBinary() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.outboundContextDecorators = append(c.outboundContextDecorators, binding.WithForceBinary) + } + return nil + } +} + +func WithForceStructured() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.outboundContextDecorators = append(c.outboundContextDecorators, binding.WithForceStructured) + } + return nil + } +} + +// WithUUIDs adds DefaultIDToUUIDIfNotSet event defaulter to the end of the +// defaulter chain. +func WithUUIDs() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.eventDefaulterFns = append(c.eventDefaulterFns, DefaultIDToUUIDIfNotSet) + } + return nil + } +} + +// WithTimeNow adds DefaultTimeToNowIfNotSet event defaulter to the end of the +// defaulter chain. +func WithTimeNow() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.eventDefaulterFns = append(c.eventDefaulterFns, DefaultTimeToNowIfNotSet) + } + return nil + } +} + +// WithTracePropagation enables trace propagation via the distributed tracing +// extension. +// Deprecated: this is now noop and will be removed in future releases. +// Don't use distributed tracing extension to propagate traces: +// https://github.com/cloudevents/spec/blob/v1.0.1/extensions/distributed-tracing.md#using-the-distributed-tracing-extension +func WithTracePropagation() Option { + return func(i interface{}) error { + return nil + } +} + +// WithPollGoroutines configures how much goroutines should be used to +// poll the Receiver/Responder/Protocol implementations. +// Default value is GOMAXPROCS +func WithPollGoroutines(pollGoroutines int) Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.pollGoroutines = pollGoroutines + } + return nil + } +} + +// WithObservabilityService configures the observability service to use +// to record traces and metrics +func WithObservabilityService(service ObservabilityService) Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.observabilityService = service + c.inboundContextDecorators = append(c.inboundContextDecorators, service.InboundContextDecorators()...) + } + return nil + } +} + +// WithInboundContextDecorator configures a new inbound context decorator. +// Inbound context decorators are invoked to wrap additional informations from the binding.Message +// and propagate these informations in the context passed to the event receiver. +func WithInboundContextDecorator(dec func(context.Context, binding.Message) context.Context) Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.inboundContextDecorators = append(c.inboundContextDecorators, dec) + } + return nil + } +} + +// WithBlockingCallback makes the callback passed into StartReceiver is executed as a blocking call, +// i.e. in each poll go routine, the next event will not be received until the callback on current event completes. +// To make event processing serialized (no concurrency), use this option along with WithPollGoroutines(1) +func WithBlockingCallback() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.blockingCallback = true + } + return nil + } +} + +// WithAckMalformedevents causes malformed events received within StartReceiver to be acknowledged +// rather than being permanently not-acknowledged. This can be useful when a protocol does not +// provide a responder implementation and would otherwise cause the receiver to be partially or +// fully stuck. +func WithAckMalformedEvent() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.ackMalformedEvent = true + } + return nil + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go b/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go new file mode 100644 index 000000000..2cc0e6497 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go @@ -0,0 +1,193 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "errors" + "fmt" + "reflect" + + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// ReceiveFull is the signature of a fn to be invoked for incoming cloudevents. +type ReceiveFull func(context.Context, event.Event) protocol.Result + +type receiverFn struct { + numIn int + numOut int + fnValue reflect.Value + + hasContextIn bool + hasEventIn bool + + hasEventOut bool + hasResultOut bool +} + +const ( + inParamUsage = "expected a function taking either no parameters, one or more of (context.Context, event.Event) ordered" + outParamUsage = "expected a function returning one or mode of (*event.Event, protocol.Result) ordered" +) + +var ( + contextType = reflect.TypeOf((*context.Context)(nil)).Elem() + eventType = reflect.TypeOf((*event.Event)(nil)).Elem() + eventPtrType = reflect.TypeOf((*event.Event)(nil)) // want the ptr type + resultType = reflect.TypeOf((*protocol.Result)(nil)).Elem() +) + +// receiver creates a receiverFn wrapper class that is used by the client to +// validate and invoke the provided function. +// Valid fn signatures are: +// * func() +// * func() protocol.Result +// * func(context.Context) +// * func(context.Context) protocol.Result +// * func(event.Event) +// * func(event.Event) transport.Result +// * func(context.Context, event.Event) +// * func(context.Context, event.Event) protocol.Result +// * func(event.Event) *event.Event +// * func(event.Event) (*event.Event, protocol.Result) +// * func(context.Context, event.Event) *event.Event +// * func(context.Context, event.Event) (*event.Event, protocol.Result) +func receiver(fn interface{}) (*receiverFn, error) { + fnType := reflect.TypeOf(fn) + if fnType.Kind() != reflect.Func { + return nil, errors.New("must pass a function to handle events") + } + + r := &receiverFn{ + fnValue: reflect.ValueOf(fn), + numIn: fnType.NumIn(), + numOut: fnType.NumOut(), + } + + if err := r.validate(fnType); err != nil { + return nil, err + } + + return r, nil +} + +func (r *receiverFn) invoke(ctx context.Context, e *event.Event) (*event.Event, protocol.Result) { + args := make([]reflect.Value, 0, r.numIn) + + if r.numIn > 0 { + if r.hasContextIn { + args = append(args, reflect.ValueOf(ctx)) + } + if r.hasEventIn { + args = append(args, reflect.ValueOf(*e)) + } + } + v := r.fnValue.Call(args) + var respOut protocol.Result + var eOut *event.Event + if r.numOut > 0 { + i := 0 + if r.hasEventOut { + if eo, ok := v[i].Interface().(*event.Event); ok { + eOut = eo + } + i++ // <-- note, need to inc i. + } + if r.hasResultOut { + if resp, ok := v[i].Interface().(protocol.Result); ok { + respOut = resp + } + } + } + return eOut, respOut +} + +// Verifies that the inputs to a function have a valid signature +// Valid input is to be [0, all] of +// context.Context, event.Event in this order. +func (r *receiverFn) validateInParamSignature(fnType reflect.Type) error { + r.hasContextIn = false + r.hasEventIn = false + + switch fnType.NumIn() { + case 2: + // has to be (context.Context, event.Event) + if !eventType.ConvertibleTo(fnType.In(1)) { + return fmt.Errorf("%s; cannot convert parameter 2 to %s from event.Event", inParamUsage, fnType.In(1)) + } else { + r.hasEventIn = true + } + fallthrough + case 1: + if !contextType.ConvertibleTo(fnType.In(0)) { + if !eventType.ConvertibleTo(fnType.In(0)) { + return fmt.Errorf("%s; cannot convert parameter 1 to %s from context.Context or event.Event", inParamUsage, fnType.In(0)) + } else if r.hasEventIn { + return fmt.Errorf("%s; duplicate parameter of type event.Event", inParamUsage) + } else { + r.hasEventIn = true + } + } else { + r.hasContextIn = true + } + fallthrough + case 0: + return nil + + default: + return fmt.Errorf("%s; function has too many parameters (%d)", inParamUsage, fnType.NumIn()) + } +} + +// Verifies that the outputs of a function have a valid signature +// Valid output signatures to be [0, all] of +// *event.Event, transport.Result in this order +func (r *receiverFn) validateOutParamSignature(fnType reflect.Type) error { + r.hasEventOut = false + r.hasResultOut = false + + switch fnType.NumOut() { + case 2: + // has to be (*event.Event, transport.Result) + if !fnType.Out(1).ConvertibleTo(resultType) { + return fmt.Errorf("%s; cannot convert parameter 2 from %s to event.Response", outParamUsage, fnType.Out(1)) + } else { + r.hasResultOut = true + } + fallthrough + case 1: + if !fnType.Out(0).ConvertibleTo(resultType) { + if !fnType.Out(0).ConvertibleTo(eventPtrType) { + return fmt.Errorf("%s; cannot convert parameter 1 from %s to *event.Event or transport.Result", outParamUsage, fnType.Out(0)) + } else { + r.hasEventOut = true + } + } else if r.hasResultOut { + return fmt.Errorf("%s; duplicate parameter of type event.Response", outParamUsage) + } else { + r.hasResultOut = true + } + fallthrough + case 0: + return nil + default: + return fmt.Errorf("%s; function has too many return types (%d)", outParamUsage, fnType.NumOut()) + } +} + +// validateReceiverFn validates that a function has the right number of in and +// out params and that they are of allowed types. +func (r *receiverFn) validate(fnType reflect.Type) error { + if err := r.validateInParamSignature(fnType); err != nil { + return err + } + if err := r.validateOutParamSignature(fnType); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/context.go b/vendor/github.com/cloudevents/sdk-go/v2/context/context.go new file mode 100644 index 000000000..fc9ef0315 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/context.go @@ -0,0 +1,110 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package context + +import ( + "context" + "net/url" + "time" +) + +// Opaque key type used to store target +type targetKeyType struct{} + +var targetKey = targetKeyType{} + +// WithTarget returns back a new context with the given target. Target is intended to be transport dependent. +// For http transport, `target` should be a full URL and will be injected into the outbound http request. +func WithTarget(ctx context.Context, target string) context.Context { + return context.WithValue(ctx, targetKey, target) +} + +// TargetFrom looks in the given context and returns `target` as a parsed url if found and valid, otherwise nil. +func TargetFrom(ctx context.Context) *url.URL { + c := ctx.Value(targetKey) + if c != nil { + if s, ok := c.(string); ok && s != "" { + if target, err := url.Parse(s); err == nil { + return target + } + } + } + return nil +} + +// Opaque key type used to store topic +type topicKeyType struct{} + +var topicKey = topicKeyType{} + +// WithTopic returns back a new context with the given topic. Topic is intended to be transport dependent. +// For pubsub transport, `topic` should be a Pub/Sub Topic ID. +func WithTopic(ctx context.Context, topic string) context.Context { + return context.WithValue(ctx, topicKey, topic) +} + +// TopicFrom looks in the given context and returns `topic` as a string if found and valid, otherwise "". +func TopicFrom(ctx context.Context) string { + c := ctx.Value(topicKey) + if c != nil { + if s, ok := c.(string); ok { + return s + } + } + return "" +} + +// Opaque key type used to store retry parameters +type retriesKeyType struct{} + +var retriesKey = retriesKeyType{} + +// WithRetriesConstantBackoff returns back a new context with retries parameters using constant backoff strategy. +// MaxTries is the maximum number for retries and delay is the time interval between retries +func WithRetriesConstantBackoff(ctx context.Context, delay time.Duration, maxTries int) context.Context { + return WithRetryParams(ctx, &RetryParams{ + Strategy: BackoffStrategyConstant, + Period: delay, + MaxTries: maxTries, + }) +} + +// WithRetriesLinearBackoff returns back a new context with retries parameters using linear backoff strategy. +// MaxTries is the maximum number for retries and delay*tries is the time interval between retries +func WithRetriesLinearBackoff(ctx context.Context, delay time.Duration, maxTries int) context.Context { + return WithRetryParams(ctx, &RetryParams{ + Strategy: BackoffStrategyLinear, + Period: delay, + MaxTries: maxTries, + }) +} + +// WithRetriesExponentialBackoff returns back a new context with retries parameters using exponential backoff strategy. +// MaxTries is the maximum number for retries and period is the amount of time to wait, used as `period * 2^retries`. +func WithRetriesExponentialBackoff(ctx context.Context, period time.Duration, maxTries int) context.Context { + return WithRetryParams(ctx, &RetryParams{ + Strategy: BackoffStrategyExponential, + Period: period, + MaxTries: maxTries, + }) +} + +// WithRetryParams returns back a new context with retries parameters. +func WithRetryParams(ctx context.Context, rp *RetryParams) context.Context { + return context.WithValue(ctx, retriesKey, rp) +} + +// RetriesFrom looks in the given context and returns the retries parameters if found. +// Otherwise returns the default retries configuration (ie. no retries). +func RetriesFrom(ctx context.Context) *RetryParams { + c := ctx.Value(retriesKey) + if c != nil { + if s, ok := c.(*RetryParams); ok { + return s + } + } + return &DefaultRetryParams +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go b/vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go new file mode 100644 index 000000000..434a4da7a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go @@ -0,0 +1,25 @@ +package context + +import "context" + +type valuesDelegating struct { + context.Context + parent context.Context +} + +// ValuesDelegating wraps a child and parent context. It will perform Value() +// lookups first on the child, and then fall back to the child. All other calls +// go solely to the child context. +func ValuesDelegating(child, parent context.Context) context.Context { + return &valuesDelegating{ + Context: child, + parent: parent, + } +} + +func (c *valuesDelegating) Value(key interface{}) interface{} { + if val := c.Context.Value(key); val != nil { + return val + } + return c.parent.Value(key) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go new file mode 100644 index 000000000..0b2dcaf70 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go @@ -0,0 +1,10 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package context holds the last resort overrides and fyi objects that can be passed to clients and transports added to +context.Context objects. +*/ +package context diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go b/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go new file mode 100644 index 000000000..b3087a79f --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go @@ -0,0 +1,48 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package context + +import ( + "context" + + "go.uber.org/zap" +) + +// Opaque key type used to store logger +type loggerKeyType struct{} + +var loggerKey = loggerKeyType{} + +// fallbackLogger is the logger is used when there is no logger attached to the context. +var fallbackLogger *zap.SugaredLogger + +func init() { + if logger, err := zap.NewProduction(); err != nil { + // We failed to create a fallback logger. + fallbackLogger = zap.NewNop().Sugar() + } else { + fallbackLogger = logger.Named("fallback").Sugar() + } +} + +// WithLogger returns a new context with the logger injected into the given context. +func WithLogger(ctx context.Context, logger *zap.SugaredLogger) context.Context { + if logger == nil { + return context.WithValue(ctx, loggerKey, fallbackLogger) + } + return context.WithValue(ctx, loggerKey, logger) +} + +// LoggerFrom returns the logger stored in context. +func LoggerFrom(ctx context.Context) *zap.SugaredLogger { + l := ctx.Value(loggerKey) + if l != nil { + if logger, ok := l.(*zap.SugaredLogger); ok { + return logger + } + } + return fallbackLogger +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/retry.go b/vendor/github.com/cloudevents/sdk-go/v2/context/retry.go new file mode 100644 index 000000000..ec17df72e --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/retry.go @@ -0,0 +1,76 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package context + +import ( + "context" + "errors" + "math" + "time" +) + +type BackoffStrategy string + +const ( + BackoffStrategyNone = "none" + BackoffStrategyConstant = "constant" + BackoffStrategyLinear = "linear" + BackoffStrategyExponential = "exponential" +) + +var DefaultRetryParams = RetryParams{Strategy: BackoffStrategyNone} + +// RetryParams holds parameters applied to retries +type RetryParams struct { + // Strategy is the backoff strategy to applies between retries + Strategy BackoffStrategy + + // MaxTries is the maximum number of times to retry request before giving up + MaxTries int + + // Period is + // - for none strategy: no delay + // - for constant strategy: the delay interval between retries + // - for linear strategy: interval between retries = Period * retries + // - for exponential strategy: interval between retries = Period * retries^2 + Period time.Duration +} + +// BackoffFor tries will return the time duration that should be used for this +// current try count. +// `tries` is assumed to be the number of times the caller has already retried. +func (r *RetryParams) BackoffFor(tries int) time.Duration { + switch r.Strategy { + case BackoffStrategyConstant: + return r.Period + case BackoffStrategyLinear: + return r.Period * time.Duration(tries) + case BackoffStrategyExponential: + exp := math.Exp2(float64(tries)) + return r.Period * time.Duration(exp) + case BackoffStrategyNone: + fallthrough // default + default: + return r.Period + } +} + +// Backoff is a blocking call to wait for the correct amount of time for the retry. +// `tries` is assumed to be the number of times the caller has already retried. +func (r *RetryParams) Backoff(ctx context.Context, tries int) error { + if tries > r.MaxTries { + return errors.New("too many retries") + } + ticker := time.NewTicker(r.BackoffFor(tries)) + select { + case <-ctx.Done(): + ticker.Stop() + return errors.New("context has been cancelled") + case <-ticker.C: + ticker.Stop() + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go b/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go new file mode 100644 index 000000000..a49522f82 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go @@ -0,0 +1,47 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +const ( + TextPlain = "text/plain" + TextJSON = "text/json" + ApplicationJSON = "application/json" + ApplicationXML = "application/xml" + ApplicationCloudEventsJSON = "application/cloudevents+json" + ApplicationCloudEventsBatchJSON = "application/cloudevents-batch+json" +) + +// StringOfApplicationJSON returns a string pointer to "application/json" +func StringOfApplicationJSON() *string { + a := ApplicationJSON + return &a +} + +// StringOfApplicationXML returns a string pointer to "application/xml" +func StringOfApplicationXML() *string { + a := ApplicationXML + return &a +} + +// StringOfTextPlain returns a string pointer to "text/plain" +func StringOfTextPlain() *string { + a := TextPlain + return &a +} + +// StringOfApplicationCloudEventsJSON returns a string pointer to +// "application/cloudevents+json" +func StringOfApplicationCloudEventsJSON() *string { + a := ApplicationCloudEventsJSON + return &a +} + +// StringOfApplicationCloudEventsBatchJSON returns a string pointer to +// "application/cloudevents-batch+json" +func StringOfApplicationCloudEventsBatchJSON() *string { + a := ApplicationCloudEventsBatchJSON + return &a +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go b/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go new file mode 100644 index 000000000..cf2152693 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go @@ -0,0 +1,16 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +const ( + Base64 = "base64" +) + +// StringOfBase64 returns a string pointer to "Base64" +func StringOfBase64() *string { + a := Base64 + return &a +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go new file mode 100644 index 000000000..3e077740b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go @@ -0,0 +1,78 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package datacodec + +import ( + "context" + "fmt" + + "github.com/cloudevents/sdk-go/v2/event/datacodec/json" + "github.com/cloudevents/sdk-go/v2/event/datacodec/text" + "github.com/cloudevents/sdk-go/v2/event/datacodec/xml" +) + +// Decoder is the expected function signature for decoding `in` to `out`. +// If Event sent the payload as base64, Decoder assumes that `in` is the +// decoded base64 byte array. +type Decoder func(ctx context.Context, in []byte, out interface{}) error + +// Encoder is the expected function signature for encoding `in` to bytes. +// Returns an error if the encoder has an issue encoding `in`. +type Encoder func(ctx context.Context, in interface{}) ([]byte, error) + +var decoder map[string]Decoder +var encoder map[string]Encoder + +func init() { + decoder = make(map[string]Decoder, 10) + encoder = make(map[string]Encoder, 10) + + AddDecoder("", json.Decode) + AddDecoder("application/json", json.Decode) + AddDecoder("text/json", json.Decode) + AddDecoder("application/xml", xml.Decode) + AddDecoder("text/xml", xml.Decode) + AddDecoder("text/plain", text.Decode) + + AddEncoder("", json.Encode) + AddEncoder("application/json", json.Encode) + AddEncoder("text/json", json.Encode) + AddEncoder("application/xml", xml.Encode) + AddEncoder("text/xml", xml.Encode) + AddEncoder("text/plain", text.Encode) +} + +// AddDecoder registers a decoder for a given content type. The codecs will use +// these to decode the data payload from a cloudevent.Event object. +func AddDecoder(contentType string, fn Decoder) { + decoder[contentType] = fn +} + +// AddEncoder registers an encoder for a given content type. The codecs will +// use these to encode the data payload for a cloudevent.Event object. +func AddEncoder(contentType string, fn Encoder) { + encoder[contentType] = fn +} + +// Decode looks up and invokes the decoder registered for the given content +// type. An error is returned if no decoder is registered for the given +// content type. +func Decode(ctx context.Context, contentType string, in []byte, out interface{}) error { + if fn, ok := decoder[contentType]; ok { + return fn(ctx, in, out) + } + return fmt.Errorf("[decode] unsupported content type: %q", contentType) +} + +// Encode looks up and invokes the encoder registered for the given content +// type. An error is returned if no encoder is registered for the given +// content type. +func Encode(ctx context.Context, contentType string, in interface{}) ([]byte, error) { + if fn, ok := encoder[contentType]; ok { + return fn(ctx, in) + } + return nil, fmt.Errorf("[encode] unsupported content type: %q", contentType) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go new file mode 100644 index 000000000..b681af887 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go @@ -0,0 +1,10 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package datacodec holds the data codec registry and adds known encoders and decoders supporting media types such as +`application/json` and `application/xml`. +*/ +package datacodec diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go new file mode 100644 index 000000000..734ade59f --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go @@ -0,0 +1,56 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package json + +import ( + "context" + "encoding/json" + "fmt" + "reflect" +) + +// Decode takes `in` as []byte. +// If Event sent the payload as base64, Decoder assumes that `in` is the +// decoded base64 byte array. +func Decode(ctx context.Context, in []byte, out interface{}) error { + if in == nil { + return nil + } + if out == nil { + return fmt.Errorf("out is nil") + } + + if err := json.Unmarshal(in, out); err != nil { + return fmt.Errorf("[json] found bytes \"%s\", but failed to unmarshal: %s", string(in), err.Error()) + } + return nil +} + +// Encode attempts to json.Marshal `in` into bytes. Encode will inspect `in` +// and returns `in` unmodified if it is detected that `in` is already a []byte; +// Or json.Marshal errors. +func Encode(ctx context.Context, in interface{}) ([]byte, error) { + if in == nil { + return nil, nil + } + + it := reflect.TypeOf(in) + switch it.Kind() { + case reflect.Slice: + if it.Elem().Kind() == reflect.Uint8 { + + if b, ok := in.([]byte); ok && len(b) > 0 { + // check to see if it is a pre-encoded byte string. + if b[0] == byte('"') || b[0] == byte('{') || b[0] == byte('[') { + return b, nil + } + } + + } + } + + return json.Marshal(in) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go new file mode 100644 index 000000000..33e1323c7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package json holds the encoder/decoder implementation for `application/json`. +*/ +package json diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go new file mode 100644 index 000000000..761a10113 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go @@ -0,0 +1,30 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package text + +import ( + "context" + "fmt" +) + +// Text codec converts []byte or string to string and vice-versa. + +func Decode(_ context.Context, in []byte, out interface{}) error { + p, _ := out.(*string) + if p == nil { + return fmt.Errorf("text.Decode out: want *string, got %T", out) + } + *p = string(in) + return nil +} + +func Encode(_ context.Context, in interface{}) ([]byte, error) { + s, ok := in.(string) + if !ok { + return nil, fmt.Errorf("text.Encode in: want string, got %T", in) + } + return []byte(s), nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go new file mode 100644 index 000000000..af10577aa --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package text holds the encoder/decoder implementation for `text/plain`. +*/ +package text diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go new file mode 100644 index 000000000..de68ec3dc --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go @@ -0,0 +1,40 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package xml + +import ( + "context" + "encoding/xml" + "fmt" +) + +// Decode takes `in` as []byte. +// If Event sent the payload as base64, Decoder assumes that `in` is the +// decoded base64 byte array. +func Decode(ctx context.Context, in []byte, out interface{}) error { + if in == nil { + return nil + } + + if err := xml.Unmarshal(in, out); err != nil { + return fmt.Errorf("[xml] found bytes, but failed to unmarshal: %s %s", err.Error(), string(in)) + } + return nil +} + +// Encode attempts to xml.Marshal `in` into bytes. Encode will inspect `in` +// and returns `in` unmodified if it is detected that `in` is already a []byte; +// Or xml.Marshal errors. +func Encode(ctx context.Context, in interface{}) ([]byte, error) { + if b, ok := in.([]byte); ok { + // check to see if it is a pre-encoded byte string. + if len(b) > 0 && b[0] == byte('"') { + return b, nil + } + } + + return xml.Marshal(in) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go new file mode 100644 index 000000000..c8d73213f --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package xml holds the encoder/decoder implementation for `application/xml`. +*/ +package xml diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go new file mode 100644 index 000000000..31c22ce67 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package event provides primitives to work with CloudEvents specification: https://github.com/cloudevents/spec. +*/ +package event diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event.go new file mode 100644 index 000000000..52495f9a3 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event.go @@ -0,0 +1,125 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "bytes" + "encoding/json" + "strings" +) + +// Event represents the canonical representation of a CloudEvent. +type Event struct { + Context EventContext + DataEncoded []byte + // DataBase64 indicates if the event, when serialized, represents + // the data field using the base64 encoding. + // In v0.3, this field is superseded by DataContentEncoding + DataBase64 bool + FieldErrors map[string]error +} + +const ( + defaultEventVersion = CloudEventsVersionV1 +) + +func (e *Event) fieldError(field string, err error) { + if e.FieldErrors == nil { + e.FieldErrors = make(map[string]error) + } + e.FieldErrors[field] = err +} + +func (e *Event) fieldOK(field string) { + if e.FieldErrors != nil { + delete(e.FieldErrors, field) + } +} + +// New returns a new Event, an optional version can be passed to change the +// default spec version from 1.0 to the provided version. +func New(version ...string) Event { + specVersion := defaultEventVersion + if len(version) >= 1 { + specVersion = version[0] + } + e := &Event{} + e.SetSpecVersion(specVersion) + return *e +} + +// ExtensionAs is deprecated: access extensions directly via the e.Extensions() map. +// Use functions in the types package to convert extension values. +// For example replace this: +// +// var i int +// err := e.ExtensionAs("foo", &i) +// +// With this: +// +// i, err := types.ToInteger(e.Extensions["foo"]) +func (e Event) ExtensionAs(name string, obj interface{}) error { + return e.Context.ExtensionAs(name, obj) +} + +// String returns a pretty-printed representation of the Event. +func (e Event) String() string { + b := strings.Builder{} + + b.WriteString(e.Context.String()) + + if e.DataEncoded != nil { + if e.DataBase64 { + b.WriteString("Data (binary),\n ") + } else { + b.WriteString("Data,\n ") + } + switch e.DataMediaType() { + case ApplicationJSON: + var prettyJSON bytes.Buffer + err := json.Indent(&prettyJSON, e.DataEncoded, " ", " ") + if err != nil { + b.Write(e.DataEncoded) + } else { + b.Write(prettyJSON.Bytes()) + } + default: + b.Write(e.DataEncoded) + } + b.WriteString("\n") + } + + return b.String() +} + +func (e Event) Clone() Event { + out := Event{} + out.Context = e.Context.Clone() + out.DataEncoded = cloneBytes(e.DataEncoded) + out.DataBase64 = e.DataBase64 + out.FieldErrors = e.cloneFieldErrors() + return out +} + +func cloneBytes(in []byte) []byte { + if in == nil { + return nil + } + out := make([]byte, len(in)) + copy(out, in) + return out +} + +func (e Event) cloneFieldErrors() map[string]error { + if e.FieldErrors == nil { + return nil + } + newFE := make(map[string]error, len(e.FieldErrors)) + for k, v := range e.FieldErrors { + newFE[k] = v + } + return newFE +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go new file mode 100644 index 000000000..8fc449ed9 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go @@ -0,0 +1,118 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "context" + "encoding/base64" + "fmt" + "strconv" + + "github.com/cloudevents/sdk-go/v2/event/datacodec" +) + +// SetData encodes the given payload with the given content type. +// If the provided payload is a byte array, when marshalled to json it will be encoded as base64. +// If the provided payload is different from byte array, datacodec.Encode is invoked to attempt a +// marshalling to byte array. +func (e *Event) SetData(contentType string, obj interface{}) error { + e.SetDataContentType(contentType) + + if e.SpecVersion() != CloudEventsVersionV1 { + return e.legacySetData(obj) + } + + // Version 1.0 and above. + switch obj := obj.(type) { + case []byte: + e.DataEncoded = obj + e.DataBase64 = true + default: + data, err := datacodec.Encode(context.Background(), e.DataMediaType(), obj) + if err != nil { + return err + } + e.DataEncoded = data + e.DataBase64 = false + } + + return nil +} + +// Deprecated: Delete when we do not have to support Spec v0.3. +func (e *Event) legacySetData(obj interface{}) error { + data, err := datacodec.Encode(context.Background(), e.DataMediaType(), obj) + if err != nil { + return err + } + if e.DeprecatedDataContentEncoding() == Base64 { + buf := make([]byte, base64.StdEncoding.EncodedLen(len(data))) + base64.StdEncoding.Encode(buf, data) + e.DataEncoded = buf + e.DataBase64 = false + } else { + data, err := datacodec.Encode(context.Background(), e.DataMediaType(), obj) + if err != nil { + return err + } + e.DataEncoded = data + e.DataBase64 = false + } + return nil +} + +const ( + quotes = `"'` +) + +func (e Event) Data() []byte { + return e.DataEncoded +} + +// DataAs attempts to populate the provided data object with the event payload. +// obj should be a pointer type. +func (e Event) DataAs(obj interface{}) error { + data := e.Data() + + if len(data) == 0 { + // No data. + return nil + } + + if e.SpecVersion() != CloudEventsVersionV1 { + var err error + if data, err = e.legacyConvertData(data); err != nil { + return err + } + } + + return datacodec.Decode(context.Background(), e.DataMediaType(), data, obj) +} + +func (e Event) legacyConvertData(data []byte) ([]byte, error) { + if e.Context.DeprecatedGetDataContentEncoding() == Base64 { + var bs []byte + // test to see if we need to unquote the data. + if data[0] == quotes[0] || data[0] == quotes[1] { + str, err := strconv.Unquote(string(data)) + if err != nil { + return nil, err + } + bs = []byte(str) + } else { + bs = data + } + + buf := make([]byte, base64.StdEncoding.DecodedLen(len(bs))) + n, err := base64.StdEncoding.Decode(buf, bs) + if err != nil { + return nil, fmt.Errorf("failed to decode data from base64: %s", err.Error()) + } + data = buf[:n] + } + + return data, nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go new file mode 100644 index 000000000..2809fed57 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go @@ -0,0 +1,102 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "time" +) + +// EventReader is the interface for reading through an event from attributes. +type EventReader interface { + // SpecVersion returns event.Context.GetSpecVersion(). + SpecVersion() string + // Type returns event.Context.GetType(). + Type() string + // Source returns event.Context.GetSource(). + Source() string + // Subject returns event.Context.GetSubject(). + Subject() string + // ID returns event.Context.GetID(). + ID() string + // Time returns event.Context.GetTime(). + Time() time.Time + // DataSchema returns event.Context.GetDataSchema(). + DataSchema() string + // DataContentType returns event.Context.GetDataContentType(). + DataContentType() string + // DataMediaType returns event.Context.GetDataMediaType(). + DataMediaType() string + // DeprecatedDataContentEncoding returns event.Context.DeprecatedGetDataContentEncoding(). + DeprecatedDataContentEncoding() string + + // Extension Attributes + + // Extensions returns the event.Context.GetExtensions(). + // Extensions use the CloudEvents type system, details in package cloudevents/types. + Extensions() map[string]interface{} + + // ExtensionAs returns event.Context.ExtensionAs(name, obj). + // + // DEPRECATED: Access extensions directly via the e.Extensions() map. + // Use functions in the types package to convert extension values. + // For example replace this: + // + // var i int + // err := e.ExtensionAs("foo", &i) + // + // With this: + // + // i, err := types.ToInteger(e.Extensions["foo"]) + // + ExtensionAs(string, interface{}) error + + // Data Attribute + + // Data returns the raw data buffer + // If the event was encoded with base64 encoding, Data returns the already decoded + // byte array + Data() []byte + + // DataAs attempts to populate the provided data object with the event payload. + DataAs(interface{}) error +} + +// EventWriter is the interface for writing through an event onto attributes. +// If an error is thrown by a sub-component, EventWriter caches the error +// internally and exposes errors with a call to event.Validate(). +type EventWriter interface { + // Context Attributes + + // SetSpecVersion performs event.Context.SetSpecVersion. + SetSpecVersion(string) + // SetType performs event.Context.SetType. + SetType(string) + // SetSource performs event.Context.SetSource. + SetSource(string) + // SetSubject( performs event.Context.SetSubject. + SetSubject(string) + // SetID performs event.Context.SetID. + SetID(string) + // SetTime performs event.Context.SetTime. + SetTime(time.Time) + // SetDataSchema performs event.Context.SetDataSchema. + SetDataSchema(string) + // SetDataContentType performs event.Context.SetDataContentType. + SetDataContentType(string) + // DeprecatedSetDataContentEncoding performs event.Context.DeprecatedSetDataContentEncoding. + SetDataContentEncoding(string) + + // Extension Attributes + + // SetExtension performs event.Context.SetExtension. + SetExtension(string, interface{}) + + // SetData encodes the given payload with the given content type. + // If the provided payload is a byte array, when marshalled to json it will be encoded as base64. + // If the provided payload is different from byte array, datacodec.Encode is invoked to attempt a + // marshalling to byte array. + SetData(string, interface{}) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go new file mode 100644 index 000000000..c5f2dc03c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go @@ -0,0 +1,203 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "strings" + + jsoniter "github.com/json-iterator/go" +) + +// WriteJson writes the in event in the provided writer. +// Note: this function assumes the input event is valid. +func WriteJson(in *Event, writer io.Writer) error { + stream := jsoniter.ConfigFastest.BorrowStream(writer) + defer jsoniter.ConfigFastest.ReturnStream(stream) + stream.WriteObjectStart() + + var ext map[string]interface{} + var dct *string + var isBase64 bool + + // Write the context (without the extensions) + switch eventContext := in.Context.(type) { + case *EventContextV03: + // Set a bunch of variables we need later + ext = eventContext.Extensions + dct = eventContext.DataContentType + + stream.WriteObjectField("specversion") + stream.WriteString(CloudEventsVersionV03) + stream.WriteMore() + + stream.WriteObjectField("id") + stream.WriteString(eventContext.ID) + stream.WriteMore() + + stream.WriteObjectField("source") + stream.WriteString(eventContext.Source.String()) + stream.WriteMore() + + stream.WriteObjectField("type") + stream.WriteString(eventContext.Type) + + if eventContext.Subject != nil { + stream.WriteMore() + stream.WriteObjectField("subject") + stream.WriteString(*eventContext.Subject) + } + + if eventContext.DataContentEncoding != nil { + isBase64 = true + stream.WriteMore() + stream.WriteObjectField("datacontentencoding") + stream.WriteString(*eventContext.DataContentEncoding) + } + + if eventContext.DataContentType != nil { + stream.WriteMore() + stream.WriteObjectField("datacontenttype") + stream.WriteString(*eventContext.DataContentType) + } + + if eventContext.SchemaURL != nil { + stream.WriteMore() + stream.WriteObjectField("schemaurl") + stream.WriteString(eventContext.SchemaURL.String()) + } + + if eventContext.Time != nil { + stream.WriteMore() + stream.WriteObjectField("time") + stream.WriteString(eventContext.Time.String()) + } + case *EventContextV1: + // Set a bunch of variables we need later + ext = eventContext.Extensions + dct = eventContext.DataContentType + isBase64 = in.DataBase64 + + stream.WriteObjectField("specversion") + stream.WriteString(CloudEventsVersionV1) + stream.WriteMore() + + stream.WriteObjectField("id") + stream.WriteString(eventContext.ID) + stream.WriteMore() + + stream.WriteObjectField("source") + stream.WriteString(eventContext.Source.String()) + stream.WriteMore() + + stream.WriteObjectField("type") + stream.WriteString(eventContext.Type) + + if eventContext.Subject != nil { + stream.WriteMore() + stream.WriteObjectField("subject") + stream.WriteString(*eventContext.Subject) + } + + if eventContext.DataContentType != nil { + stream.WriteMore() + stream.WriteObjectField("datacontenttype") + stream.WriteString(*eventContext.DataContentType) + } + + if eventContext.DataSchema != nil { + stream.WriteMore() + stream.WriteObjectField("dataschema") + stream.WriteString(eventContext.DataSchema.String()) + } + + if eventContext.Time != nil { + stream.WriteMore() + stream.WriteObjectField("time") + stream.WriteString(eventContext.Time.String()) + } + default: + return fmt.Errorf("missing event context") + } + + // Let's do a check on the error + if stream.Error != nil { + return fmt.Errorf("error while writing the event attributes: %w", stream.Error) + } + + // Let's write the body + if in.DataEncoded != nil { + stream.WriteMore() + + // We need to figure out the media type first + var mediaType string + if dct == nil { + mediaType = ApplicationJSON + } else { + // This code is required to extract the media type from the full content type string (which might contain encoding and stuff) + contentType := *dct + i := strings.IndexRune(contentType, ';') + if i == -1 { + i = len(contentType) + } + mediaType = strings.TrimSpace(strings.ToLower(contentType[0:i])) + } + + isJson := mediaType == "" || mediaType == ApplicationJSON || mediaType == TextJSON + + // If isJson and no encoding to base64, we don't need to perform additional steps + if isJson && !isBase64 { + stream.WriteObjectField("data") + _, err := stream.Write(in.DataEncoded) + if err != nil { + return fmt.Errorf("error while writing data: %w", err) + } + } else { + if in.Context.GetSpecVersion() == CloudEventsVersionV1 && isBase64 { + stream.WriteObjectField("data_base64") + } else { + stream.WriteObjectField("data") + } + // At this point of we need to write to base 64 string, or we just need to write the plain string + if isBase64 { + stream.WriteString(base64.StdEncoding.EncodeToString(in.DataEncoded)) + } else { + stream.WriteString(string(in.DataEncoded)) + } + } + + } + + // Let's do a check on the error + if stream.Error != nil { + return fmt.Errorf("error while writing the event data: %w", stream.Error) + } + + for k, v := range ext { + stream.WriteMore() + stream.WriteObjectField(k) + stream.WriteVal(v) + } + + stream.WriteObjectEnd() + + // Let's do a check on the error + if stream.Error != nil { + return fmt.Errorf("error while writing the event extensions: %w", stream.Error) + } + return stream.Flush() +} + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (e Event) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + err := WriteJson(&e, &buf) + return buf.Bytes(), err +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go new file mode 100644 index 000000000..9d1aeeb65 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go @@ -0,0 +1,103 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "time" +) + +var _ EventReader = (*Event)(nil) + +// SpecVersion implements EventReader.SpecVersion +func (e Event) SpecVersion() string { + if e.Context != nil { + return e.Context.GetSpecVersion() + } + return "" +} + +// Type implements EventReader.Type +func (e Event) Type() string { + if e.Context != nil { + return e.Context.GetType() + } + return "" +} + +// Source implements EventReader.Source +func (e Event) Source() string { + if e.Context != nil { + return e.Context.GetSource() + } + return "" +} + +// Subject implements EventReader.Subject +func (e Event) Subject() string { + if e.Context != nil { + return e.Context.GetSubject() + } + return "" +} + +// ID implements EventReader.ID +func (e Event) ID() string { + if e.Context != nil { + return e.Context.GetID() + } + return "" +} + +// Time implements EventReader.Time +func (e Event) Time() time.Time { + if e.Context != nil { + return e.Context.GetTime() + } + return time.Time{} +} + +// DataSchema implements EventReader.DataSchema +func (e Event) DataSchema() string { + if e.Context != nil { + return e.Context.GetDataSchema() + } + return "" +} + +// DataContentType implements EventReader.DataContentType +func (e Event) DataContentType() string { + if e.Context != nil { + return e.Context.GetDataContentType() + } + return "" +} + +// DataMediaType returns the parsed DataMediaType of the event. If parsing +// fails, the empty string is returned. To retrieve the parsing error, use +// `Context.GetDataMediaType` instead. +func (e Event) DataMediaType() string { + if e.Context != nil { + mediaType, _ := e.Context.GetDataMediaType() + return mediaType + } + return "" +} + +// DeprecatedDataContentEncoding implements EventReader.DeprecatedDataContentEncoding +func (e Event) DeprecatedDataContentEncoding() string { + if e.Context != nil { + return e.Context.DeprecatedGetDataContentEncoding() + } + return "" +} + +// Extensions implements EventReader.Extensions +func (e Event) Extensions() map[string]interface{} { + if e.Context != nil { + return e.Context.GetExtensions() + } + return map[string]interface{}(nil) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go new file mode 100644 index 000000000..0dd88ae5a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go @@ -0,0 +1,480 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "encoding/base64" + "errors" + "fmt" + "io" + "sync" + + jsoniter "github.com/json-iterator/go" + + "github.com/cloudevents/sdk-go/v2/types" +) + +const specVersionV03Flag uint8 = 1 << 4 +const specVersionV1Flag uint8 = 1 << 5 +const dataBase64Flag uint8 = 1 << 6 +const dataContentTypeFlag uint8 = 1 << 7 + +func checkFlag(state uint8, flag uint8) bool { + return state&flag != 0 +} + +func appendFlag(state *uint8, flag uint8) { + *state = (*state) | flag +} + +var iterPool = sync.Pool{ + New: func() interface{} { + return jsoniter.Parse(jsoniter.ConfigFastest, nil, 1024) + }, +} + +func borrowIterator(reader io.Reader) *jsoniter.Iterator { + iter := iterPool.Get().(*jsoniter.Iterator) + iter.Reset(reader) + return iter +} + +func returnIterator(iter *jsoniter.Iterator) { + iter.Error = nil + iter.Attachment = nil + iterPool.Put(iter) +} + +func ReadJson(out *Event, reader io.Reader) error { + iterator := borrowIterator(reader) + defer returnIterator(iterator) + + return readJsonFromIterator(out, iterator) +} + +// ReadJson allows you to read the bytes reader as an event +func readJsonFromIterator(out *Event, iterator *jsoniter.Iterator) error { + // Parsing dependency graph: + // SpecVersion + // ^ ^ + // | +--------------+ + // + + + // All Attributes datacontenttype (and datacontentencoding for v0.3) + // (except datacontenttype) ^ + // | + // | + // + + // Data + + var state uint8 = 0 + var cachedData []byte + + var ( + // Universally parseable fields. + id string + typ string + source types.URIRef + subject *string + time *types.Timestamp + datacontenttype *string + extensions = make(map[string]interface{}) + + // These fields require knowledge about the specversion to be parsed. + schemaurl jsoniter.Any + datacontentencoding jsoniter.Any + dataschema jsoniter.Any + dataBase64 jsoniter.Any + ) + + for key := iterator.ReadObject(); key != ""; key = iterator.ReadObject() { + // Check if we have some error in our error cache + if iterator.Error != nil { + return iterator.Error + } + + // We have a key, now we need to figure out what to do + // depending on the parsing state + + // If it's a specversion, trigger state change + if key == "specversion" { + if checkFlag(state, specVersionV1Flag|specVersionV03Flag) { + return fmt.Errorf("specversion was already provided") + } + sv := iterator.ReadString() + + // Check proper specversion + switch sv { + case CloudEventsVersionV1: + con := &EventContextV1{ + ID: id, + Type: typ, + Source: source, + Subject: subject, + Time: time, + DataContentType: datacontenttype, + } + + // Add the fields relevant for the version ... + if dataschema != nil { + var err error + con.DataSchema, err = toUriPtr(dataschema) + if err != nil { + return err + } + } + if dataBase64 != nil { + stream := jsoniter.ConfigFastest.BorrowStream(nil) + defer jsoniter.ConfigFastest.ReturnStream(stream) + dataBase64.WriteTo(stream) + cachedData = stream.Buffer() + if stream.Error != nil { + return stream.Error + } + appendFlag(&state, dataBase64Flag) + } + + // ... add all remaining fields as extensions. + if schemaurl != nil { + extensions["schemaurl"] = schemaurl.GetInterface() + } + if datacontentencoding != nil { + extensions["datacontentencoding"] = datacontentencoding.GetInterface() + } + + out.Context = con + appendFlag(&state, specVersionV1Flag) + case CloudEventsVersionV03: + con := &EventContextV03{ + ID: id, + Type: typ, + Source: source, + Subject: subject, + Time: time, + DataContentType: datacontenttype, + } + var err error + // Add the fields relevant for the version ... + if schemaurl != nil { + con.SchemaURL, err = toUriRefPtr(schemaurl) + if err != nil { + return err + } + } + if datacontentencoding != nil { + con.DataContentEncoding, err = toStrPtr(datacontentencoding) + if *con.DataContentEncoding != Base64 { + err = ValidationError{"datacontentencoding": errors.New("invalid datacontentencoding value, the only allowed value is 'base64'")} + } + if err != nil { + return err + } + appendFlag(&state, dataBase64Flag) + } + + // ... add all remaining fields as extensions. + if dataschema != nil { + extensions["dataschema"] = dataschema.GetInterface() + } + if dataBase64 != nil { + extensions["data_base64"] = dataBase64.GetInterface() + } + + out.Context = con + appendFlag(&state, specVersionV03Flag) + default: + return ValidationError{"specversion": errors.New("unknown value: " + sv)} + } + + // Apply all extensions to the context object. + for key, val := range extensions { + if err := out.Context.SetExtension(key, val); err != nil { + return err + } + } + continue + } + + // If no specversion ... + if !checkFlag(state, specVersionV03Flag|specVersionV1Flag) { + switch key { + case "id": + id = iterator.ReadString() + case "type": + typ = iterator.ReadString() + case "source": + source = readUriRef(iterator) + case "subject": + subject = readStrPtr(iterator) + case "time": + time = readTimestamp(iterator) + case "datacontenttype": + datacontenttype = readStrPtr(iterator) + appendFlag(&state, dataContentTypeFlag) + case "data": + cachedData = iterator.SkipAndReturnBytes() + case "data_base64": + dataBase64 = iterator.ReadAny() + case "dataschema": + dataschema = iterator.ReadAny() + case "schemaurl": + schemaurl = iterator.ReadAny() + case "datacontentencoding": + datacontentencoding = iterator.ReadAny() + default: + extensions[key] = iterator.Read() + } + continue + } + + // From this point downward -> we can assume the event has a context pointer non nil + + // If it's a datacontenttype, trigger state change + if key == "datacontenttype" { + if checkFlag(state, dataContentTypeFlag) { + return fmt.Errorf("datacontenttype was already provided") + } + + dct := iterator.ReadString() + + switch ctx := out.Context.(type) { + case *EventContextV03: + ctx.DataContentType = &dct + case *EventContextV1: + ctx.DataContentType = &dct + } + appendFlag(&state, dataContentTypeFlag) + continue + } + + // If it's a datacontentencoding and it's v0.3, trigger state change + if checkFlag(state, specVersionV03Flag) && key == "datacontentencoding" { + if checkFlag(state, dataBase64Flag) { + return ValidationError{"datacontentencoding": errors.New("datacontentencoding was specified twice")} + } + + dce := iterator.ReadString() + + if dce != Base64 { + return ValidationError{"datacontentencoding": errors.New("invalid datacontentencoding value, the only allowed value is 'base64'")} + } + + out.Context.(*EventContextV03).DataContentEncoding = &dce + appendFlag(&state, dataBase64Flag) + continue + } + + // We can parse all attributes, except data. + // If it's data or data_base64 and we don't have the attributes to process it, then we cache it + // The expanded form of this condition is: + // (checkFlag(state, specVersionV1Flag) && !checkFlag(state, dataContentTypeFlag) && (key == "data" || key == "data_base64")) || + // (checkFlag(state, specVersionV03Flag) && !(checkFlag(state, dataContentTypeFlag) && checkFlag(state, dataBase64Flag)) && key == "data") + if (state&(specVersionV1Flag|dataContentTypeFlag) == specVersionV1Flag && (key == "data" || key == "data_base64")) || + ((state&specVersionV03Flag == specVersionV03Flag) && (state&(dataContentTypeFlag|dataBase64Flag) != (dataContentTypeFlag | dataBase64Flag)) && key == "data") { + if key == "data_base64" { + appendFlag(&state, dataBase64Flag) + } + cachedData = iterator.SkipAndReturnBytes() + continue + } + + // At this point or this value is an attribute (excluding datacontenttype and datacontentencoding), or this value is data and this condition is valid: + // (specVersionV1Flag & dataContentTypeFlag) || (specVersionV03Flag & dataContentTypeFlag & dataBase64Flag) + switch eventContext := out.Context.(type) { + case *EventContextV03: + switch key { + case "id": + eventContext.ID = iterator.ReadString() + case "type": + eventContext.Type = iterator.ReadString() + case "source": + eventContext.Source = readUriRef(iterator) + case "subject": + eventContext.Subject = readStrPtr(iterator) + case "time": + eventContext.Time = readTimestamp(iterator) + case "schemaurl": + eventContext.SchemaURL = readUriRefPtr(iterator) + case "data": + iterator.Error = consumeData(out, checkFlag(state, dataBase64Flag), iterator) + default: + if eventContext.Extensions == nil { + eventContext.Extensions = make(map[string]interface{}, 1) + } + iterator.Error = eventContext.SetExtension(key, iterator.Read()) + } + case *EventContextV1: + switch key { + case "id": + eventContext.ID = iterator.ReadString() + case "type": + eventContext.Type = iterator.ReadString() + case "source": + eventContext.Source = readUriRef(iterator) + case "subject": + eventContext.Subject = readStrPtr(iterator) + case "time": + eventContext.Time = readTimestamp(iterator) + case "dataschema": + eventContext.DataSchema = readUriPtr(iterator) + case "data": + iterator.Error = consumeData(out, false, iterator) + case "data_base64": + iterator.Error = consumeData(out, true, iterator) + default: + if eventContext.Extensions == nil { + eventContext.Extensions = make(map[string]interface{}, 1) + } + iterator.Error = eventContext.SetExtension(key, iterator.Read()) + } + } + } + + if state&(specVersionV03Flag|specVersionV1Flag) == 0 { + return ValidationError{"specversion": errors.New("no specversion")} + } + + if iterator.Error != nil { + return iterator.Error + } + + // If there is a dataToken cached, we always defer at the end the processing + // because nor datacontenttype or datacontentencoding are mandatory. + if cachedData != nil { + return consumeDataAsBytes(out, checkFlag(state, dataBase64Flag), cachedData) + } + return nil +} + +func consumeDataAsBytes(e *Event, isBase64 bool, b []byte) error { + if isBase64 { + e.DataBase64 = true + + // Allocate payload byte buffer + base64Encoded := b[1 : len(b)-1] // remove quotes + e.DataEncoded = make([]byte, base64.StdEncoding.DecodedLen(len(base64Encoded))) + length, err := base64.StdEncoding.Decode(e.DataEncoded, base64Encoded) + if err != nil { + return err + } + e.DataEncoded = e.DataEncoded[0:length] + return nil + } + + mt, _ := e.Context.GetDataMediaType() + // Empty content type assumes json + if mt != "" && mt != ApplicationJSON && mt != TextJSON { + // If not json, then data is encoded as string + iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, b) + src := iter.ReadString() // handles escaping + e.DataEncoded = []byte(src) + if iter.Error != nil { + return fmt.Errorf("unexpected data payload for media type %q, expected a string: %w", mt, iter.Error) + } + return nil + } + + e.DataEncoded = b + return nil +} + +func consumeData(e *Event, isBase64 bool, iter *jsoniter.Iterator) error { + if isBase64 { + e.DataBase64 = true + + // Allocate payload byte buffer + base64Encoded := iter.ReadStringAsSlice() + e.DataEncoded = make([]byte, base64.StdEncoding.DecodedLen(len(base64Encoded))) + length, err := base64.StdEncoding.Decode(e.DataEncoded, base64Encoded) + if err != nil { + return err + } + e.DataEncoded = e.DataEncoded[0:length] + return nil + } + + mt, _ := e.Context.GetDataMediaType() + if mt != ApplicationJSON && mt != TextJSON { + // If not json, then data is encoded as string + src := iter.ReadString() // handles escaping + e.DataEncoded = []byte(src) + if iter.Error != nil { + return fmt.Errorf("unexpected data payload for media type %q, expected a string: %w", mt, iter.Error) + } + return nil + } + + e.DataEncoded = iter.SkipAndReturnBytes() + return nil +} + +func readUriRef(iter *jsoniter.Iterator) types.URIRef { + str := iter.ReadString() + uriRef := types.ParseURIRef(str) + if uriRef == nil { + iter.Error = fmt.Errorf("cannot parse uri ref: %v", str) + return types.URIRef{} + } + return *uriRef +} + +func readStrPtr(iter *jsoniter.Iterator) *string { + str := iter.ReadString() + if str == "" { + return nil + } + return &str +} + +func readUriRefPtr(iter *jsoniter.Iterator) *types.URIRef { + return types.ParseURIRef(iter.ReadString()) +} + +func readUriPtr(iter *jsoniter.Iterator) *types.URI { + return types.ParseURI(iter.ReadString()) +} + +func readTimestamp(iter *jsoniter.Iterator) *types.Timestamp { + t, err := types.ParseTimestamp(iter.ReadString()) + if err != nil { + iter.Error = err + } + return t +} + +func toStrPtr(val jsoniter.Any) (*string, error) { + str := val.ToString() + if val.LastError() != nil { + return nil, val.LastError() + } + if str == "" { + return nil, nil + } + return &str, nil +} + +func toUriRefPtr(val jsoniter.Any) (*types.URIRef, error) { + str := val.ToString() + if val.LastError() != nil { + return nil, val.LastError() + } + return types.ParseURIRef(str), nil +} + +func toUriPtr(val jsoniter.Any) (*types.URI, error) { + str := val.ToString() + if val.LastError() != nil { + return nil, val.LastError() + } + return types.ParseURI(str), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (e *Event) UnmarshalJSON(b []byte) error { + iterator := jsoniter.ConfigFastest.BorrowIterator(b) + defer jsoniter.ConfigFastest.ReturnIterator(iterator) + return readJsonFromIterator(e, iterator) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go new file mode 100644 index 000000000..958ecc47d --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go @@ -0,0 +1,50 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "strings" +) + +type ValidationError map[string]error + +func (e ValidationError) Error() string { + b := strings.Builder{} + for k, v := range e { + b.WriteString(k) + b.WriteString(": ") + b.WriteString(v.Error()) + b.WriteRune('\n') + } + return b.String() +} + +// Validate performs a spec based validation on this event. +// Validation is dependent on the spec version specified in the event context. +func (e Event) Validate() error { + if e.Context == nil { + return ValidationError{"specversion": fmt.Errorf("missing Event.Context")} + } + + errs := map[string]error{} + if e.FieldErrors != nil { + for k, v := range e.FieldErrors { + errs[k] = v + } + } + + if fieldErrors := e.Context.Validate(); fieldErrors != nil { + for k, v := range fieldErrors { + errs[k] = v + } + } + + if len(errs) > 0 { + return ValidationError(errs) + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go new file mode 100644 index 000000000..ddfb1be38 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go @@ -0,0 +1,117 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "time" +) + +var _ EventWriter = (*Event)(nil) + +// SetSpecVersion implements EventWriter.SetSpecVersion +func (e *Event) SetSpecVersion(v string) { + switch v { + case CloudEventsVersionV03: + if e.Context == nil { + e.Context = &EventContextV03{} + } else { + e.Context = e.Context.AsV03() + } + case CloudEventsVersionV1: + if e.Context == nil { + e.Context = &EventContextV1{} + } else { + e.Context = e.Context.AsV1() + } + default: + e.fieldError("specversion", fmt.Errorf("a valid spec version is required: [%s, %s]", + CloudEventsVersionV03, CloudEventsVersionV1)) + return + } + e.fieldOK("specversion") +} + +// SetType implements EventWriter.SetType +func (e *Event) SetType(t string) { + if err := e.Context.SetType(t); err != nil { + e.fieldError("type", err) + } else { + e.fieldOK("type") + } +} + +// SetSource implements EventWriter.SetSource +func (e *Event) SetSource(s string) { + if err := e.Context.SetSource(s); err != nil { + e.fieldError("source", err) + } else { + e.fieldOK("source") + } +} + +// SetSubject implements EventWriter.SetSubject +func (e *Event) SetSubject(s string) { + if err := e.Context.SetSubject(s); err != nil { + e.fieldError("subject", err) + } else { + e.fieldOK("subject") + } +} + +// SetID implements EventWriter.SetID +func (e *Event) SetID(id string) { + if err := e.Context.SetID(id); err != nil { + e.fieldError("id", err) + } else { + e.fieldOK("id") + } +} + +// SetTime implements EventWriter.SetTime +func (e *Event) SetTime(t time.Time) { + if err := e.Context.SetTime(t); err != nil { + e.fieldError("time", err) + } else { + e.fieldOK("time") + } +} + +// SetDataSchema implements EventWriter.SetDataSchema +func (e *Event) SetDataSchema(s string) { + if err := e.Context.SetDataSchema(s); err != nil { + e.fieldError("dataschema", err) + } else { + e.fieldOK("dataschema") + } +} + +// SetDataContentType implements EventWriter.SetDataContentType +func (e *Event) SetDataContentType(ct string) { + if err := e.Context.SetDataContentType(ct); err != nil { + e.fieldError("datacontenttype", err) + } else { + e.fieldOK("datacontenttype") + } +} + +// SetDataContentEncoding is deprecated. Implements EventWriter.SetDataContentEncoding. +func (e *Event) SetDataContentEncoding(enc string) { + if err := e.Context.DeprecatedSetDataContentEncoding(enc); err != nil { + e.fieldError("datacontentencoding", err) + } else { + e.fieldOK("datacontentencoding") + } +} + +// SetExtension implements EventWriter.SetExtension +func (e *Event) SetExtension(name string, obj interface{}) { + if err := e.Context.SetExtension(name, obj); err != nil { + e.fieldError("extension:"+name, err) + } else { + e.fieldOK("extension:" + name) + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go new file mode 100644 index 000000000..a39565afa --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go @@ -0,0 +1,125 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import "time" + +// EventContextReader are the methods required to be a reader of context +// attributes. +type EventContextReader interface { + // GetSpecVersion returns the native CloudEvents Spec version of the event + // context. + GetSpecVersion() string + // GetType returns the CloudEvents type from the context. + GetType() string + // GetSource returns the CloudEvents source from the context. + GetSource() string + // GetSubject returns the CloudEvents subject from the context. + GetSubject() string + // GetID returns the CloudEvents ID from the context. + GetID() string + // GetTime returns the CloudEvents creation time from the context. + GetTime() time.Time + // GetDataSchema returns the CloudEvents schema URL (if any) from the + // context. + GetDataSchema() string + // GetDataContentType returns content type on the context. + GetDataContentType() string + // DeprecatedGetDataContentEncoding returns content encoding on the context. + DeprecatedGetDataContentEncoding() string + + // GetDataMediaType returns the MIME media type for encoded data, which is + // needed by both encoding and decoding. This is a processed form of + // GetDataContentType and it may return an error. + GetDataMediaType() (string, error) + + // DEPRECATED: Access extensions directly via the GetExtensions() + // For example replace this: + // + // var i int + // err := ec.ExtensionAs("foo", &i) + // + // With this: + // + // i, err := types.ToInteger(ec.GetExtensions["foo"]) + // + ExtensionAs(string, interface{}) error + + // GetExtensions returns the full extensions map. + // + // Extensions use the CloudEvents type system, details in package cloudevents/types. + GetExtensions() map[string]interface{} + + // GetExtension returns the extension associated with with the given key. + // The given key is case insensitive. If the extension can not be found, + // an error will be returned. + GetExtension(string) (interface{}, error) +} + +// EventContextWriter are the methods required to be a writer of context +// attributes. +type EventContextWriter interface { + // SetType sets the type of the context. + SetType(string) error + // SetSource sets the source of the context. + SetSource(string) error + // SetSubject sets the subject of the context. + SetSubject(string) error + // SetID sets the ID of the context. + SetID(string) error + // SetTime sets the time of the context. + SetTime(time time.Time) error + // SetDataSchema sets the schema url of the context. + SetDataSchema(string) error + // SetDataContentType sets the data content type of the context. + SetDataContentType(string) error + // DeprecatedSetDataContentEncoding sets the data context encoding of the context. + DeprecatedSetDataContentEncoding(string) error + + // SetExtension sets the given interface onto the extension attributes + // determined by the provided name. + // + // This function fails in V1 if the name doesn't respect the regex ^[a-zA-Z0-9]+$ + // + // Package ./types documents the types that are allowed as extension values. + SetExtension(string, interface{}) error +} + +// EventContextConverter are the methods that allow for event version +// conversion. +type EventContextConverter interface { + // AsV03 provides a translation from whatever the "native" encoding of the + // CloudEvent was to the equivalent in v0.3 field names, moving fields to or + // from extensions as necessary. + AsV03() *EventContextV03 + + // AsV1 provides a translation from whatever the "native" encoding of the + // CloudEvent was to the equivalent in v1.0 field names, moving fields to or + // from extensions as necessary. + AsV1() *EventContextV1 +} + +// EventContext is conical interface for a CloudEvents Context. +type EventContext interface { + // EventContextConverter allows for conversion between versions. + EventContextConverter + + // EventContextReader adds methods for reading context. + EventContextReader + + // EventContextWriter adds methods for writing to context. + EventContextWriter + + // Validate the event based on the specifics of the CloudEvents spec version + // represented by this event context. + Validate() ValidationError + + // Clone clones the event context. + Clone() EventContext + + // String returns a pretty-printed representation of the EventContext. + String() string +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go new file mode 100644 index 000000000..3f0505547 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go @@ -0,0 +1,330 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "encoding/json" + "fmt" + "mime" + "sort" + "strings" + + "github.com/cloudevents/sdk-go/v2/types" +) + +const ( + // CloudEventsVersionV03 represents the version 0.3 of the CloudEvents spec. + CloudEventsVersionV03 = "0.3" +) + +var specV03Attributes = map[string]struct{}{ + "type": {}, + "source": {}, + "subject": {}, + "id": {}, + "time": {}, + "schemaurl": {}, + "datacontenttype": {}, + "datacontentencoding": {}, +} + +// EventContextV03 represents the non-data attributes of a CloudEvents v0.3 +// event. +type EventContextV03 struct { + // Type - The type of the occurrence which has happened. + Type string `json:"type"` + // Source - A URI describing the event producer. + Source types.URIRef `json:"source"` + // Subject - The subject of the event in the context of the event producer + // (identified by `source`). + Subject *string `json:"subject,omitempty"` + // ID of the event; must be non-empty and unique within the scope of the producer. + ID string `json:"id"` + // Time - A Timestamp when the event happened. + Time *types.Timestamp `json:"time,omitempty"` + // DataSchema - A link to the schema that the `data` attribute adheres to. + SchemaURL *types.URIRef `json:"schemaurl,omitempty"` + // GetDataMediaType - A MIME (RFC2046) string describing the media type of `data`. + DataContentType *string `json:"datacontenttype,omitempty"` + // DeprecatedDataContentEncoding describes the content encoding for the `data` attribute. Valid: nil, `Base64`. + DataContentEncoding *string `json:"datacontentencoding,omitempty"` + // Extensions - Additional extension metadata beyond the base spec. + Extensions map[string]interface{} `json:"-"` +} + +// Adhere to EventContext +var _ EventContext = (*EventContextV03)(nil) + +// ExtensionAs implements EventContext.ExtensionAs +func (ec EventContextV03) ExtensionAs(name string, obj interface{}) error { + value, ok := ec.Extensions[name] + if !ok { + return fmt.Errorf("extension %q does not exist", name) + } + + // Try to unmarshal extension if we find it as a RawMessage. + switch v := value.(type) { + case json.RawMessage: + if err := json.Unmarshal(v, obj); err == nil { + // if that worked, return with obj set. + return nil + } + } + // else try as a string ptr. + + // Only support *string for now. + switch v := obj.(type) { + case *string: + if valueAsString, ok := value.(string); ok { + *v = valueAsString + return nil + } else { + return fmt.Errorf("invalid type for extension %q", name) + } + default: + return fmt.Errorf("unknown extension type %T", obj) + } +} + +// SetExtension adds the extension 'name' with value 'value' to the CloudEvents +// context. This function fails if the name uses a reserved event context key. +func (ec *EventContextV03) SetExtension(name string, value interface{}) error { + if ec.Extensions == nil { + ec.Extensions = make(map[string]interface{}) + } + + if _, ok := specV03Attributes[strings.ToLower(name)]; ok { + return fmt.Errorf("bad key %q: CloudEvents spec attribute MUST NOT be overwritten by extension", name) + } + + if value == nil { + delete(ec.Extensions, name) + if len(ec.Extensions) == 0 { + ec.Extensions = nil + } + return nil + } else { + v, err := types.Validate(value) + if err == nil { + ec.Extensions[name] = v + } + return err + } +} + +// Clone implements EventContextConverter.Clone +func (ec EventContextV03) Clone() EventContext { + ec03 := ec.AsV03() + ec03.Source = types.Clone(ec.Source).(types.URIRef) + if ec.Time != nil { + ec03.Time = types.Clone(ec.Time).(*types.Timestamp) + } + if ec.SchemaURL != nil { + ec03.SchemaURL = types.Clone(ec.SchemaURL).(*types.URIRef) + } + ec03.Extensions = ec.cloneExtensions() + return ec03 +} + +func (ec *EventContextV03) cloneExtensions() map[string]interface{} { + old := ec.Extensions + if old == nil { + return nil + } + new := make(map[string]interface{}, len(ec.Extensions)) + for k, v := range old { + new[k] = types.Clone(v) + } + return new +} + +// AsV03 implements EventContextConverter.AsV03 +func (ec EventContextV03) AsV03() *EventContextV03 { + return &ec +} + +// AsV1 implements EventContextConverter.AsV1 +func (ec EventContextV03) AsV1() *EventContextV1 { + ret := EventContextV1{ + ID: ec.ID, + Time: ec.Time, + Type: ec.Type, + DataContentType: ec.DataContentType, + Source: types.URIRef{URL: ec.Source.URL}, + Subject: ec.Subject, + Extensions: make(map[string]interface{}), + } + if ec.SchemaURL != nil { + ret.DataSchema = &types.URI{URL: ec.SchemaURL.URL} + } + + // DataContentEncoding was removed in 1.0, so put it in an extension for 1.0. + if ec.DataContentEncoding != nil { + _ = ret.SetExtension(DataContentEncodingKey, *ec.DataContentEncoding) + } + + if ec.Extensions != nil { + for k, v := range ec.Extensions { + k = strings.ToLower(k) + ret.Extensions[k] = v + } + } + if len(ret.Extensions) == 0 { + ret.Extensions = nil + } + return &ret +} + +// Validate returns errors based on requirements from the CloudEvents spec. +// For more details, see +// https://github.com/cloudevents/spec/blob/main/cloudevents/spec.md +// As of Feb 26, 2019, commit 17c32ea26baf7714ad027d9917d03d2fff79fc7e +// + https://github.com/cloudevents/spec/pull/387 -> datacontentencoding +// + https://github.com/cloudevents/spec/pull/406 -> subject +func (ec EventContextV03) Validate() ValidationError { + errors := map[string]error{} + + // type + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // SHOULD be prefixed with a reverse-DNS name. The prefixed domain dictates the organization which defines the semantics of this event type. + eventType := strings.TrimSpace(ec.Type) + if eventType == "" { + errors["type"] = fmt.Errorf("MUST be a non-empty string") + } + + // source + // Type: URI-reference + // Constraints: + // REQUIRED + source := strings.TrimSpace(ec.Source.String()) + if source == "" { + errors["source"] = fmt.Errorf("REQUIRED") + } + + // subject + // Type: String + // Constraints: + // OPTIONAL + // MUST be a non-empty string + if ec.Subject != nil { + subject := strings.TrimSpace(*ec.Subject) + if subject == "" { + errors["subject"] = fmt.Errorf("if present, MUST be a non-empty string") + } + } + + // id + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // MUST be unique within the scope of the producer + id := strings.TrimSpace(ec.ID) + if id == "" { + errors["id"] = fmt.Errorf("MUST be a non-empty string") + + // no way to test "MUST be unique within the scope of the producer" + } + + // time + // Type: Timestamp + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3339 + // --> no need to test this, no way to set the time without it being valid. + + // schemaurl + // Type: URI + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3986 + if ec.SchemaURL != nil { + schemaURL := strings.TrimSpace(ec.SchemaURL.String()) + // empty string is not RFC 3986 compatible. + if schemaURL == "" { + errors["schemaurl"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 3986") + } + } + + // datacontenttype + // Type: String per RFC 2046 + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 2046 + if ec.DataContentType != nil { + dataContentType := strings.TrimSpace(*ec.DataContentType) + if dataContentType == "" { + errors["datacontenttype"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 2046") + } else { + _, _, err := mime.ParseMediaType(dataContentType) + if err != nil { + errors["datacontenttype"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 2046") + } + } + } + + // datacontentencoding + // Type: String per RFC 2045 Section 6.1 + // Constraints: + // The attribute MUST be set if the data attribute contains string-encoded binary data. + // Otherwise the attribute MUST NOT be set. + // If present, MUST adhere to RFC 2045 Section 6.1 + if ec.DataContentEncoding != nil { + dataContentEncoding := strings.ToLower(strings.TrimSpace(*ec.DataContentEncoding)) + if dataContentEncoding != Base64 { + errors["datacontentencoding"] = fmt.Errorf("if present, MUST adhere to RFC 2045 Section 6.1") + } + } + + if len(errors) > 0 { + return errors + } + return nil +} + +// String returns a pretty-printed representation of the EventContext. +func (ec EventContextV03) String() string { + b := strings.Builder{} + + b.WriteString("Context Attributes,\n") + + b.WriteString(" specversion: " + CloudEventsVersionV03 + "\n") + b.WriteString(" type: " + ec.Type + "\n") + b.WriteString(" source: " + ec.Source.String() + "\n") + if ec.Subject != nil { + b.WriteString(" subject: " + *ec.Subject + "\n") + } + b.WriteString(" id: " + ec.ID + "\n") + if ec.Time != nil { + b.WriteString(" time: " + ec.Time.String() + "\n") + } + if ec.SchemaURL != nil { + b.WriteString(" schemaurl: " + ec.SchemaURL.String() + "\n") + } + if ec.DataContentType != nil { + b.WriteString(" datacontenttype: " + *ec.DataContentType + "\n") + } + if ec.DataContentEncoding != nil { + b.WriteString(" datacontentencoding: " + *ec.DataContentEncoding + "\n") + } + + if ec.Extensions != nil && len(ec.Extensions) > 0 { + b.WriteString("Extensions,\n") + keys := make([]string, 0, len(ec.Extensions)) + for k := range ec.Extensions { + keys = append(keys, k) + } + sort.Strings(keys) + for _, key := range keys { + b.WriteString(fmt.Sprintf(" %s: %v\n", key, ec.Extensions[key])) + } + } + + return b.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go new file mode 100644 index 000000000..2cd27a705 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go @@ -0,0 +1,99 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "strings" + "time" +) + +// GetSpecVersion implements EventContextReader.GetSpecVersion +func (ec EventContextV03) GetSpecVersion() string { + return CloudEventsVersionV03 +} + +// GetDataContentType implements EventContextReader.GetDataContentType +func (ec EventContextV03) GetDataContentType() string { + if ec.DataContentType != nil { + return *ec.DataContentType + } + return "" +} + +// GetDataMediaType implements EventContextReader.GetDataMediaType +func (ec EventContextV03) GetDataMediaType() (string, error) { + if ec.DataContentType != nil { + dct := *ec.DataContentType + i := strings.IndexRune(dct, ';') + if i == -1 { + return dct, nil + } + return strings.TrimSpace(dct[0:i]), nil + } + return "", nil +} + +// GetType implements EventContextReader.GetType +func (ec EventContextV03) GetType() string { + return ec.Type +} + +// GetSource implements EventContextReader.GetSource +func (ec EventContextV03) GetSource() string { + return ec.Source.String() +} + +// GetSubject implements EventContextReader.GetSubject +func (ec EventContextV03) GetSubject() string { + if ec.Subject != nil { + return *ec.Subject + } + return "" +} + +// GetTime implements EventContextReader.GetTime +func (ec EventContextV03) GetTime() time.Time { + if ec.Time != nil { + return ec.Time.Time + } + return time.Time{} +} + +// GetID implements EventContextReader.GetID +func (ec EventContextV03) GetID() string { + return ec.ID +} + +// GetDataSchema implements EventContextReader.GetDataSchema +func (ec EventContextV03) GetDataSchema() string { + if ec.SchemaURL != nil { + return ec.SchemaURL.String() + } + return "" +} + +// DeprecatedGetDataContentEncoding implements EventContextReader.DeprecatedGetDataContentEncoding +func (ec EventContextV03) DeprecatedGetDataContentEncoding() string { + if ec.DataContentEncoding != nil { + return *ec.DataContentEncoding + } + return "" +} + +// GetExtensions implements EventContextReader.GetExtensions +func (ec EventContextV03) GetExtensions() map[string]interface{} { + return ec.Extensions +} + +// GetExtension implements EventContextReader.GetExtension +func (ec EventContextV03) GetExtension(key string) (interface{}, error) { + v, ok := caseInsensitiveSearch(key, ec.Extensions) + if !ok { + return "", fmt.Errorf("%q not found", key) + } + return v, nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go new file mode 100644 index 000000000..5d664635e --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go @@ -0,0 +1,103 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "errors" + "net/url" + "strings" + "time" + + "github.com/cloudevents/sdk-go/v2/types" +) + +// Adhere to EventContextWriter +var _ EventContextWriter = (*EventContextV03)(nil) + +// SetDataContentType implements EventContextWriter.SetDataContentType +func (ec *EventContextV03) SetDataContentType(ct string) error { + ct = strings.TrimSpace(ct) + if ct == "" { + ec.DataContentType = nil + } else { + ec.DataContentType = &ct + } + return nil +} + +// SetType implements EventContextWriter.SetType +func (ec *EventContextV03) SetType(t string) error { + t = strings.TrimSpace(t) + ec.Type = t + return nil +} + +// SetSource implements EventContextWriter.SetSource +func (ec *EventContextV03) SetSource(u string) error { + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.Source = types.URIRef{URL: *pu} + return nil +} + +// SetSubject implements EventContextWriter.SetSubject +func (ec *EventContextV03) SetSubject(s string) error { + s = strings.TrimSpace(s) + if s == "" { + ec.Subject = nil + } else { + ec.Subject = &s + } + return nil +} + +// SetID implements EventContextWriter.SetID +func (ec *EventContextV03) SetID(id string) error { + id = strings.TrimSpace(id) + if id == "" { + return errors.New("id is required to be a non-empty string") + } + ec.ID = id + return nil +} + +// SetTime implements EventContextWriter.SetTime +func (ec *EventContextV03) SetTime(t time.Time) error { + if t.IsZero() { + ec.Time = nil + } else { + ec.Time = &types.Timestamp{Time: t} + } + return nil +} + +// SetDataSchema implements EventContextWriter.SetDataSchema +func (ec *EventContextV03) SetDataSchema(u string) error { + u = strings.TrimSpace(u) + if u == "" { + ec.SchemaURL = nil + return nil + } + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.SchemaURL = &types.URIRef{URL: *pu} + return nil +} + +// DeprecatedSetDataContentEncoding implements EventContextWriter.DeprecatedSetDataContentEncoding +func (ec *EventContextV03) DeprecatedSetDataContentEncoding(e string) error { + e = strings.ToLower(strings.TrimSpace(e)) + if e == "" { + ec.DataContentEncoding = nil + } else { + ec.DataContentEncoding = &e + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go new file mode 100644 index 000000000..8f164502b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go @@ -0,0 +1,315 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "mime" + "sort" + "strings" + + "github.com/cloudevents/sdk-go/v2/types" +) + +// WIP: AS OF SEP 20, 2019 + +const ( + // CloudEventsVersionV1 represents the version 1.0 of the CloudEvents spec. + CloudEventsVersionV1 = "1.0" +) + +var specV1Attributes = map[string]struct{}{ + "id": {}, + "source": {}, + "type": {}, + "datacontenttype": {}, + "subject": {}, + "time": {}, + "specversion": {}, + "dataschema": {}, +} + +// EventContextV1 represents the non-data attributes of a CloudEvents v1.0 +// event. +type EventContextV1 struct { + // ID of the event; must be non-empty and unique within the scope of the producer. + // +required + ID string `json:"id"` + // Source - A URI describing the event producer. + // +required + Source types.URIRef `json:"source"` + // Type - The type of the occurrence which has happened. + // +required + Type string `json:"type"` + + // DataContentType - A MIME (RFC2046) string describing the media type of `data`. + // +optional + DataContentType *string `json:"datacontenttype,omitempty"` + // Subject - The subject of the event in the context of the event producer + // (identified by `source`). + // +optional + Subject *string `json:"subject,omitempty"` + // Time - A Timestamp when the event happened. + // +optional + Time *types.Timestamp `json:"time,omitempty"` + // DataSchema - A link to the schema that the `data` attribute adheres to. + // +optional + DataSchema *types.URI `json:"dataschema,omitempty"` + + // Extensions - Additional extension metadata beyond the base spec. + // +optional + Extensions map[string]interface{} `json:"-"` +} + +// Adhere to EventContext +var _ EventContext = (*EventContextV1)(nil) + +// ExtensionAs implements EventContext.ExtensionAs +func (ec EventContextV1) ExtensionAs(name string, obj interface{}) error { + name = strings.ToLower(name) + value, ok := ec.Extensions[name] + if !ok { + return fmt.Errorf("extension %q does not exist", name) + } + + // Only support *string for now. + if v, ok := obj.(*string); ok { + if *v, ok = value.(string); ok { + return nil + } + } + return fmt.Errorf("unknown extension type %T", obj) +} + +// SetExtension adds the extension 'name' with value 'value' to the CloudEvents +// context. This function fails if the name doesn't respect the regex +// ^[a-zA-Z0-9]+$ or if the name uses a reserved event context key. +func (ec *EventContextV1) SetExtension(name string, value interface{}) error { + if err := validateExtensionName(name); err != nil { + return err + } + + if _, ok := specV1Attributes[strings.ToLower(name)]; ok { + return fmt.Errorf("bad key %q: CloudEvents spec attribute MUST NOT be overwritten by extension", name) + } + + name = strings.ToLower(name) + if ec.Extensions == nil { + ec.Extensions = make(map[string]interface{}) + } + if value == nil { + delete(ec.Extensions, name) + if len(ec.Extensions) == 0 { + ec.Extensions = nil + } + return nil + } else { + v, err := types.Validate(value) // Ensure it's a legal CE attribute value + if err == nil { + ec.Extensions[name] = v + } + return err + } +} + +// Clone implements EventContextConverter.Clone +func (ec EventContextV1) Clone() EventContext { + ec1 := ec.AsV1() + ec1.Source = types.Clone(ec.Source).(types.URIRef) + if ec.Time != nil { + ec1.Time = types.Clone(ec.Time).(*types.Timestamp) + } + if ec.DataSchema != nil { + ec1.DataSchema = types.Clone(ec.DataSchema).(*types.URI) + } + ec1.Extensions = ec.cloneExtensions() + return ec1 +} + +func (ec *EventContextV1) cloneExtensions() map[string]interface{} { + old := ec.Extensions + if old == nil { + return nil + } + new := make(map[string]interface{}, len(ec.Extensions)) + for k, v := range old { + new[k] = types.Clone(v) + } + return new +} + +// AsV03 implements EventContextConverter.AsV03 +func (ec EventContextV1) AsV03() *EventContextV03 { + ret := EventContextV03{ + ID: ec.ID, + Time: ec.Time, + Type: ec.Type, + DataContentType: ec.DataContentType, + Source: types.URIRef{URL: ec.Source.URL}, + Subject: ec.Subject, + Extensions: make(map[string]interface{}), + } + + if ec.DataSchema != nil { + ret.SchemaURL = &types.URIRef{URL: ec.DataSchema.URL} + } + + if ec.Extensions != nil { + for k, v := range ec.Extensions { + k = strings.ToLower(k) + // DeprecatedDataContentEncoding was introduced in 0.3, removed in 1.0 + if strings.EqualFold(k, DataContentEncodingKey) { + etv, ok := v.(string) + if ok && etv != "" { + ret.DataContentEncoding = &etv + } + continue + } + ret.Extensions[k] = v + } + } + if len(ret.Extensions) == 0 { + ret.Extensions = nil + } + return &ret +} + +// AsV1 implements EventContextConverter.AsV1 +func (ec EventContextV1) AsV1() *EventContextV1 { + return &ec +} + +// Validate returns errors based on requirements from the CloudEvents spec. +// For more details, see https://github.com/cloudevents/spec/blob/v1.0/spec.md. +func (ec EventContextV1) Validate() ValidationError { + errors := map[string]error{} + + // id + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // MUST be unique within the scope of the producer + id := strings.TrimSpace(ec.ID) + if id == "" { + errors["id"] = fmt.Errorf("MUST be a non-empty string") + // no way to test "MUST be unique within the scope of the producer" + } + + // source + // Type: URI-reference + // Constraints: + // REQUIRED + // MUST be a non-empty URI-reference + // An absolute URI is RECOMMENDED + source := strings.TrimSpace(ec.Source.String()) + if source == "" { + errors["source"] = fmt.Errorf("REQUIRED") + } + + // type + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // SHOULD be prefixed with a reverse-DNS name. The prefixed domain dictates the organization which defines the semantics of this event type. + eventType := strings.TrimSpace(ec.Type) + if eventType == "" { + errors["type"] = fmt.Errorf("MUST be a non-empty string") + } + + // The following attributes are optional but still have validation. + + // datacontenttype + // Type: String per RFC 2046 + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 2046 + if ec.DataContentType != nil { + dataContentType := strings.TrimSpace(*ec.DataContentType) + if dataContentType == "" { + errors["datacontenttype"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 2046") + } else { + _, _, err := mime.ParseMediaType(dataContentType) + if err != nil { + errors["datacontenttype"] = fmt.Errorf("failed to parse RFC 2046 media type %w", err) + } + } + } + + // dataschema + // Type: URI + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3986 + if ec.DataSchema != nil { + if !ec.DataSchema.Validate() { + errors["dataschema"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 3986, Section 4.3. Absolute URI") + } + } + + // subject + // Type: String + // Constraints: + // OPTIONAL + // MUST be a non-empty string + if ec.Subject != nil { + subject := strings.TrimSpace(*ec.Subject) + if subject == "" { + errors["subject"] = fmt.Errorf("if present, MUST be a non-empty string") + } + } + + // time + // Type: Timestamp + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3339 + // --> no need to test this, no way to set the time without it being valid. + + if len(errors) > 0 { + return errors + } + return nil +} + +// String returns a pretty-printed representation of the EventContext. +func (ec EventContextV1) String() string { + b := strings.Builder{} + + b.WriteString("Context Attributes,\n") + + b.WriteString(" specversion: " + CloudEventsVersionV1 + "\n") + b.WriteString(" type: " + ec.Type + "\n") + b.WriteString(" source: " + ec.Source.String() + "\n") + if ec.Subject != nil { + b.WriteString(" subject: " + *ec.Subject + "\n") + } + b.WriteString(" id: " + ec.ID + "\n") + if ec.Time != nil { + b.WriteString(" time: " + ec.Time.String() + "\n") + } + if ec.DataSchema != nil { + b.WriteString(" dataschema: " + ec.DataSchema.String() + "\n") + } + if ec.DataContentType != nil { + b.WriteString(" datacontenttype: " + *ec.DataContentType + "\n") + } + + if ec.Extensions != nil && len(ec.Extensions) > 0 { + b.WriteString("Extensions,\n") + keys := make([]string, 0, len(ec.Extensions)) + for k := range ec.Extensions { + keys = append(keys, k) + } + sort.Strings(keys) + for _, key := range keys { + b.WriteString(fmt.Sprintf(" %s: %v\n", key, ec.Extensions[key])) + } + } + + return b.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go new file mode 100644 index 000000000..74f73b029 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go @@ -0,0 +1,104 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "strings" + "time" +) + +// GetSpecVersion implements EventContextReader.GetSpecVersion +func (ec EventContextV1) GetSpecVersion() string { + return CloudEventsVersionV1 +} + +// GetDataContentType implements EventContextReader.GetDataContentType +func (ec EventContextV1) GetDataContentType() string { + if ec.DataContentType != nil { + return *ec.DataContentType + } + return "" +} + +// GetDataMediaType implements EventContextReader.GetDataMediaType +func (ec EventContextV1) GetDataMediaType() (string, error) { + if ec.DataContentType != nil { + dct := *ec.DataContentType + i := strings.IndexRune(dct, ';') + if i == -1 { + return dct, nil + } + return strings.TrimSpace(dct[0:i]), nil + } + return "", nil +} + +// GetType implements EventContextReader.GetType +func (ec EventContextV1) GetType() string { + return ec.Type +} + +// GetSource implements EventContextReader.GetSource +func (ec EventContextV1) GetSource() string { + return ec.Source.String() +} + +// GetSubject implements EventContextReader.GetSubject +func (ec EventContextV1) GetSubject() string { + if ec.Subject != nil { + return *ec.Subject + } + return "" +} + +// GetTime implements EventContextReader.GetTime +func (ec EventContextV1) GetTime() time.Time { + if ec.Time != nil { + return ec.Time.Time + } + return time.Time{} +} + +// GetID implements EventContextReader.GetID +func (ec EventContextV1) GetID() string { + return ec.ID +} + +// GetDataSchema implements EventContextReader.GetDataSchema +func (ec EventContextV1) GetDataSchema() string { + if ec.DataSchema != nil { + return ec.DataSchema.String() + } + return "" +} + +// DeprecatedGetDataContentEncoding implements EventContextReader.DeprecatedGetDataContentEncoding +func (ec EventContextV1) DeprecatedGetDataContentEncoding() string { + return "" +} + +// GetExtensions implements EventContextReader.GetExtensions +func (ec EventContextV1) GetExtensions() map[string]interface{} { + if len(ec.Extensions) == 0 { + return nil + } + // For now, convert the extensions of v1.0 to the pre-v1.0 style. + ext := make(map[string]interface{}, len(ec.Extensions)) + for k, v := range ec.Extensions { + ext[k] = v + } + return ext +} + +// GetExtension implements EventContextReader.GetExtension +func (ec EventContextV1) GetExtension(key string) (interface{}, error) { + v, ok := caseInsensitiveSearch(key, ec.Extensions) + if !ok { + return "", fmt.Errorf("%q not found", key) + } + return v, nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go new file mode 100644 index 000000000..5f2aca763 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go @@ -0,0 +1,97 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "errors" + "net/url" + "strings" + "time" + + "github.com/cloudevents/sdk-go/v2/types" +) + +// Adhere to EventContextWriter +var _ EventContextWriter = (*EventContextV1)(nil) + +// SetDataContentType implements EventContextWriter.SetDataContentType +func (ec *EventContextV1) SetDataContentType(ct string) error { + ct = strings.TrimSpace(ct) + if ct == "" { + ec.DataContentType = nil + } else { + ec.DataContentType = &ct + } + return nil +} + +// SetType implements EventContextWriter.SetType +func (ec *EventContextV1) SetType(t string) error { + t = strings.TrimSpace(t) + ec.Type = t + return nil +} + +// SetSource implements EventContextWriter.SetSource +func (ec *EventContextV1) SetSource(u string) error { + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.Source = types.URIRef{URL: *pu} + return nil +} + +// SetSubject implements EventContextWriter.SetSubject +func (ec *EventContextV1) SetSubject(s string) error { + s = strings.TrimSpace(s) + if s == "" { + ec.Subject = nil + } else { + ec.Subject = &s + } + return nil +} + +// SetID implements EventContextWriter.SetID +func (ec *EventContextV1) SetID(id string) error { + id = strings.TrimSpace(id) + if id == "" { + return errors.New("id is required to be a non-empty string") + } + ec.ID = id + return nil +} + +// SetTime implements EventContextWriter.SetTime +func (ec *EventContextV1) SetTime(t time.Time) error { + if t.IsZero() { + ec.Time = nil + } else { + ec.Time = &types.Timestamp{Time: t} + } + return nil +} + +// SetDataSchema implements EventContextWriter.SetDataSchema +func (ec *EventContextV1) SetDataSchema(u string) error { + u = strings.TrimSpace(u) + if u == "" { + ec.DataSchema = nil + return nil + } + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.DataSchema = &types.URI{URL: *pu} + return nil +} + +// DeprecatedSetDataContentEncoding implements EventContextWriter.DeprecatedSetDataContentEncoding +func (ec *EventContextV1) DeprecatedSetDataContentEncoding(e string) error { + return errors.New("deprecated: SetDataContentEncoding is not supported in v1.0 of CloudEvents") +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go b/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go new file mode 100644 index 000000000..72d0e757a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go @@ -0,0 +1,57 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "errors" + "fmt" + "strings" +) + +const ( + // DataContentEncodingKey is the key to DeprecatedDataContentEncoding for versions that do not support data content encoding + // directly. + DataContentEncodingKey = "datacontentencoding" +) + +var ( + // This determines the behavior of validateExtensionName(). For MaxExtensionNameLength > 0, an error will be returned, + // if len(key) > MaxExtensionNameLength + MaxExtensionNameLength = 0 +) + +func caseInsensitiveSearch(key string, space map[string]interface{}) (interface{}, bool) { + lkey := strings.ToLower(key) + for k, v := range space { + if strings.EqualFold(lkey, strings.ToLower(k)) { + return v, true + } + } + return nil, false +} + +func IsExtensionNameValid(key string) bool { + if err := validateExtensionName(key); err != nil { + return false + } + return true +} + +func validateExtensionName(key string) error { + if len(key) < 1 { + return errors.New("bad key, CloudEvents attribute names MUST NOT be empty") + } + if MaxExtensionNameLength > 0 && len(key) > MaxExtensionNameLength { + return fmt.Errorf("bad key, CloudEvents attribute name '%s' is longer than %d characters", key, MaxExtensionNameLength) + } + + for _, c := range key { + if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9')) { + return errors.New("bad key, CloudEvents attribute names MUST consist of lower-case letters ('a' to 'z'), upper-case letters ('A' to 'Z') or digits ('0' to '9') from the ASCII character set") + } + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go new file mode 100644 index 000000000..3c771fc5c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go @@ -0,0 +1,25 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package protocol defines interfaces to decouple the client package +from protocol implementations. + +Most event sender and receiver applications should not use this +package, they should use the client package. This package is for +infrastructure developers implementing new transports, or intermediary +components like importers, channels or brokers. + +Available protocols: + +* HTTP (using net/http) +* Kafka (using github.com/Shopify/sarama) +* AMQP (using pack.ag/amqp) +* Go Channels +* Nats +* Nats Streaming (stan) +* Google PubSub +*/ +package protocol diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go new file mode 100644 index 000000000..a3f335261 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go @@ -0,0 +1,42 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import "fmt" + +// ErrTransportMessageConversion is an error produced when the transport +// message can not be converted. +type ErrTransportMessageConversion struct { + fatal bool + handled bool + transport string + message string +} + +// NewErrTransportMessageConversion makes a new ErrTransportMessageConversion. +func NewErrTransportMessageConversion(transport, message string, handled, fatal bool) *ErrTransportMessageConversion { + return &ErrTransportMessageConversion{ + transport: transport, + message: message, + handled: handled, + fatal: fatal, + } +} + +// IsFatal reports if this error should be considered fatal. +func (e *ErrTransportMessageConversion) IsFatal() bool { + return e.fatal +} + +// Handled reports if this error should be considered accepted and no further action. +func (e *ErrTransportMessageConversion) Handled() bool { + return e.handled +} + +// Error implements error.Error +func (e *ErrTransportMessageConversion) Error() string { + return fmt.Sprintf("transport %s failed to convert message: %s", e.transport, e.message) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go new file mode 100644 index 000000000..48f03fb6c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go @@ -0,0 +1,128 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "go.uber.org/zap" + "net/http" + "strconv" + "strings" + "time" +) + +type WebhookConfig struct { + AllowedMethods []string // defaults to POST + AllowedRate *int + AutoACKCallback bool + AllowedOrigins []string +} + +const ( + DefaultAllowedRate = 1000 + DefaultTimeout = time.Second * 600 +) + +// TODO: implement rate limiting. +// Throttling is indicated by requests being rejected using HTTP status code 429 Too Many Requests. +// TODO: use this if Webhook Request Origin has been turned on. +// Inbound requests should be rejected if Allowed Origins is required by SDK. + +func (p *Protocol) OptionsHandler(rw http.ResponseWriter, req *http.Request) { + if req.Method != http.MethodOptions || p.WebhookConfig == nil { + rw.WriteHeader(http.StatusMethodNotAllowed) + return + } + + headers := make(http.Header) + + // The spec does not say we need to validate the origin, just the request origin. + // After the handshake, we will validate the origin. + if origin, ok := p.ValidateRequestOrigin(req); !ok { + rw.WriteHeader(http.StatusBadRequest) + return + } else { + headers.Set("WebHook-Allowed-Origin", origin) + } + + allowedRateRequired := false + if _, ok := req.Header[http.CanonicalHeaderKey("WebHook-Request-Rate")]; ok { + // must send WebHook-Allowed-Rate + allowedRateRequired = true + } + + if p.WebhookConfig.AllowedRate != nil { + headers.Set("WebHook-Allowed-Rate", strconv.Itoa(*p.WebhookConfig.AllowedRate)) + } else if allowedRateRequired { + headers.Set("WebHook-Allowed-Rate", strconv.Itoa(DefaultAllowedRate)) + } + + if len(p.WebhookConfig.AllowedMethods) > 0 { + headers.Set("Allow", strings.Join(p.WebhookConfig.AllowedMethods, ", ")) + } else { + headers.Set("Allow", http.MethodPost) + } + + cb := req.Header.Get("WebHook-Request-Callback") + if cb != "" { + if p.WebhookConfig.AutoACKCallback { + go func() { + reqAck, err := http.NewRequest(http.MethodPost, cb, nil) + if err != nil { + cecontext.LoggerFrom(req.Context()).Errorw("OPTIONS handler failed to create http request attempting to ack callback.", zap.Error(err), zap.String("callback", cb)) + return + } + + // Write out the headers. + for k := range headers { + reqAck.Header.Set(k, headers.Get(k)) + } + + _, err = http.DefaultClient.Do(reqAck) + if err != nil { + cecontext.LoggerFrom(req.Context()).Errorw("OPTIONS handler failed to ack callback.", zap.Error(err), zap.String("callback", cb)) + return + } + }() + return + } else { + cecontext.LoggerFrom(req.Context()).Infof("ACTION REQUIRED: Please validate web hook request callback: %q", cb) + // TODO: what to do pending https://github.com/cloudevents/spec/issues/617 + return + } + } + + // Write out the headers. + for k := range headers { + rw.Header().Set(k, headers.Get(k)) + } +} + +func (p *Protocol) ValidateRequestOrigin(req *http.Request) (string, bool) { + return p.validateOrigin(req.Header.Get("WebHook-Request-Origin")) +} + +func (p *Protocol) ValidateOrigin(req *http.Request) (string, bool) { + return p.validateOrigin(req.Header.Get("Origin")) +} + +func (p *Protocol) validateOrigin(ro string) (string, bool) { + cecontext.LoggerFrom(context.TODO()).Infow("Validating origin.", zap.String("origin", ro)) + + for _, ao := range p.WebhookConfig.AllowedOrigins { + if ao == "*" { + return ao, true + } + // TODO: it is not clear what the rules for allowed hosts are. + // Need to find docs for this. For now, test for prefix. + if strings.HasPrefix(ro, ao) { + return ao, true + } + } + + return ro, false +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go new file mode 100644 index 000000000..e973738c6 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go @@ -0,0 +1,48 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + + nethttp "net/http" + "net/url" +) + +type requestKey struct{} + +// RequestData holds the http.Request information subset that can be +// used to retrieve HTTP information for an incoming CloudEvent. +type RequestData struct { + URL *url.URL + Header nethttp.Header + RemoteAddr string + Host string +} + +// WithRequestDataAtContext uses the http.Request to add RequestData +// information to the Context. +func WithRequestDataAtContext(ctx context.Context, r *nethttp.Request) context.Context { + if r == nil { + return ctx + } + + return context.WithValue(ctx, requestKey{}, &RequestData{ + URL: r.URL, + Header: r.Header, + RemoteAddr: r.RemoteAddr, + Host: r.Host, + }) +} + +// RequestDataFromContext retrieves RequestData from the Context. +// If not set nil is returned. +func RequestDataFromContext(ctx context.Context) *RequestData { + if req := ctx.Value(requestKey{}); req != nil { + return req.(*RequestData) + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go new file mode 100644 index 000000000..3428ea387 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package http implements an HTTP binding using net/http module +*/ +package http diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go new file mode 100644 index 000000000..055a5c4dd --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go @@ -0,0 +1,55 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + "github.com/cloudevents/sdk-go/v2/binding" + "net/http" + "net/textproto" + "strings" + "unicode" + + "github.com/cloudevents/sdk-go/v2/binding/spec" +) + +var attributeHeadersMapping map[string]string + +type customHeaderKey int + +const ( + headerKey customHeaderKey = iota +) + +func init() { + attributeHeadersMapping = make(map[string]string) + for _, v := range specs.Versions() { + for _, a := range v.Attributes() { + if a.Kind() == spec.DataContentType { + attributeHeadersMapping[a.Name()] = ContentType + } else { + attributeHeadersMapping[a.Name()] = textproto.CanonicalMIMEHeaderKey(prefix + a.Name()) + } + } + } +} + +func extNameToHeaderName(name string) string { + var b strings.Builder + b.Grow(len(name) + len(prefix)) + b.WriteString(prefix) + b.WriteRune(unicode.ToUpper(rune(name[0]))) + b.WriteString(name[1:]) + return b.String() +} + +func HeaderFrom(ctx context.Context) http.Header { + return binding.GetOrDefaultFromCtx(ctx, headerKey, make(http.Header)).(http.Header) +} + +func WithCustomHeader(ctx context.Context, header http.Header) context.Context { + return context.WithValue(ctx, headerKey, header) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go new file mode 100644 index 000000000..7a7c36f9b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go @@ -0,0 +1,175 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + "io" + nethttp "net/http" + "net/textproto" + "strings" + "unicode" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" +) + +const prefix = "Ce-" + +var specs = spec.WithPrefixMatchExact( + func(s string) string { + if s == "datacontenttype" { + return "Content-Type" + } else { + return textproto.CanonicalMIMEHeaderKey("Ce-" + s) + } + }, + "Ce-", +) + +const ContentType = "Content-Type" +const ContentLength = "Content-Length" + +// Message holds the Header and Body of a HTTP Request or Response. +// The Message instance *must* be constructed from NewMessage function. +// This message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +type Message struct { + Header nethttp.Header + BodyReader io.ReadCloser + OnFinish func(error) error + + ctx context.Context + + format format.Format + version spec.Version +} + +// Check if http.Message implements binding.Message +var _ binding.Message = (*Message)(nil) +var _ binding.MessageContext = (*Message)(nil) +var _ binding.MessageMetadataReader = (*Message)(nil) + +// NewMessage returns a binding.Message with header and data. +// The returned binding.Message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +func NewMessage(header nethttp.Header, body io.ReadCloser) *Message { + m := Message{Header: header} + if body != nil { + m.BodyReader = body + } + if m.format = format.Lookup(header.Get(ContentType)); m.format == nil { + m.version = specs.Version(m.Header.Get(specs.PrefixedSpecVersionName())) + } + return &m +} + +// NewMessageFromHttpRequest returns a binding.Message with header and data. +// The returned binding.Message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +func NewMessageFromHttpRequest(req *nethttp.Request) *Message { + if req == nil { + return nil + } + message := NewMessage(req.Header, req.Body) + message.ctx = req.Context() + return message +} + +// NewMessageFromHttpResponse returns a binding.Message with header and data. +// The returned binding.Message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +func NewMessageFromHttpResponse(resp *nethttp.Response) *Message { + if resp == nil { + return nil + } + msg := NewMessage(resp.Header, resp.Body) + return msg +} + +func (m *Message) ReadEncoding() binding.Encoding { + if m.version != nil { + return binding.EncodingBinary + } + if m.format != nil { + if m.format == format.JSONBatch { + return binding.EncodingBatch + } + return binding.EncodingStructured + } + return binding.EncodingUnknown +} + +func (m *Message) ReadStructured(ctx context.Context, encoder binding.StructuredWriter) error { + if m.format == nil { + return binding.ErrNotStructured + } else { + return encoder.SetStructuredEvent(ctx, m.format, m.BodyReader) + } +} + +func (m *Message) ReadBinary(ctx context.Context, encoder binding.BinaryWriter) (err error) { + if m.version == nil { + return binding.ErrNotBinary + } + + for k, v := range m.Header { + attr := m.version.Attribute(k) + if attr != nil { + err = encoder.SetAttribute(attr, v[0]) + } else if strings.HasPrefix(k, prefix) { + // Trim Prefix + To lower + var b strings.Builder + b.Grow(len(k) - len(prefix)) + b.WriteRune(unicode.ToLower(rune(k[len(prefix)]))) + b.WriteString(k[len(prefix)+1:]) + err = encoder.SetExtension(b.String(), v[0]) + } + if err != nil { + return err + } + } + + if m.BodyReader != nil { + err = encoder.SetData(m.BodyReader) + if err != nil { + return err + } + } + + return +} + +func (m *Message) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) { + attr := m.version.AttributeFromKind(k) + if attr != nil { + h := m.Header[attributeHeadersMapping[attr.Name()]] + if h != nil { + return attr, h[0] + } + return attr, nil + } + return nil, nil +} + +func (m *Message) GetExtension(name string) interface{} { + h := m.Header[extNameToHeaderName(name)] + if h != nil { + return h[0] + } + return nil +} + +func (m *Message) Context() context.Context { + return m.ctx +} + +func (m *Message) Finish(err error) error { + if m.BodyReader != nil { + _ = m.BodyReader.Close() + } + if m.OnFinish != nil { + return m.OnFinish(err) + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go new file mode 100644 index 000000000..6582af3ea --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go @@ -0,0 +1,300 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "fmt" + "net" + nethttp "net/http" + "net/url" + "strings" + "time" +) + +// Option is the function signature required to be considered an http.Option. +type Option func(*Protocol) error + +// WithTarget sets the outbound recipient of cloudevents when using an HTTP +// request. +func WithTarget(targetUrl string) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http target option can not set nil protocol") + } + targetUrl = strings.TrimSpace(targetUrl) + if targetUrl != "" { + var err error + var target *url.URL + target, err = url.Parse(targetUrl) + if err != nil { + return fmt.Errorf("http target option failed to parse target url: %s", err.Error()) + } + + p.Target = target + + if p.RequestTemplate == nil { + p.RequestTemplate = &nethttp.Request{ + Method: nethttp.MethodPost, + } + } + p.RequestTemplate.URL = target + + return nil + } + return fmt.Errorf("http target option was empty string") + } +} + +// WithHeader sets an additional default outbound header for all cloudevents +// when using an HTTP request. +func WithHeader(key, value string) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http header option can not set nil protocol") + } + key = strings.TrimSpace(key) + if key != "" { + if p.RequestTemplate == nil { + p.RequestTemplate = &nethttp.Request{ + Method: nethttp.MethodPost, + } + } + if p.RequestTemplate.Header == nil { + p.RequestTemplate.Header = nethttp.Header{} + } + p.RequestTemplate.Header.Add(key, value) + return nil + } + return fmt.Errorf("http header option was empty string") + } +} + +// WithShutdownTimeout sets the shutdown timeout when the http server is being shutdown. +func WithShutdownTimeout(timeout time.Duration) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http shutdown timeout option can not set nil protocol") + } + p.ShutdownTimeout = timeout + return nil + } +} + +func checkListen(p *Protocol, prefix string) error { + switch { + case p.listener.Load() != nil: + return fmt.Errorf("error setting %v: listener already set", prefix) + } + return nil +} + +// WithPort sets the listening port for StartReceiver. +// Only one of WithListener or WithPort is allowed. +func WithPort(port int) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http port option can not set nil protocol") + } + if port < 0 || port > 65535 { + return fmt.Errorf("http port option was given an invalid port: %d", port) + } + if err := checkListen(p, "http port option"); err != nil { + return err + } + p.Port = port + return nil + } +} + +// WithListener sets the listener for StartReceiver. +// Only one of WithListener or WithPort is allowed. +func WithListener(l net.Listener) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http listener option can not set nil protocol") + } + if err := checkListen(p, "http listener"); err != nil { + return err + } + p.listener.Store(l) + return nil + } +} + +// WithPath sets the path to receive cloudevents on for HTTP transports. +func WithPath(path string) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http path option can not set nil protocol") + } + path = strings.TrimSpace(path) + if len(path) == 0 { + return fmt.Errorf("http path option was given an invalid path: %q", path) + } + p.Path = path + return nil + } +} + +// WithMethod sets the HTTP verb (GET, POST, PUT, etc.) to use +// when using an HTTP request. +func WithMethod(method string) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http method option can not set nil protocol") + } + method = strings.TrimSpace(method) + if method != "" { + if p.RequestTemplate == nil { + p.RequestTemplate = &nethttp.Request{} + } + p.RequestTemplate.Method = method + return nil + } + return fmt.Errorf("http method option was empty string") + } +} + +// Middleware is a function that takes an existing http.Handler and wraps it in middleware, +// returning the wrapped http.Handler. +type Middleware func(next nethttp.Handler) nethttp.Handler + +// WithMiddleware adds an HTTP middleware to the transport. It may be specified multiple times. +// Middleware is applied to everything before it. For example +// `NewClient(WithMiddleware(foo), WithMiddleware(bar))` would result in `bar(foo(original))`. +func WithMiddleware(middleware Middleware) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http middleware option can not set nil protocol") + } + p.middleware = append(p.middleware, middleware) + return nil + } +} + +// WithRoundTripper sets the HTTP RoundTripper. +func WithRoundTripper(roundTripper nethttp.RoundTripper) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http round tripper option can not set nil protocol") + } + p.roundTripper = roundTripper + return nil + } +} + +// WithRoundTripperDecorator decorates the default HTTP RoundTripper chosen. +func WithRoundTripperDecorator(decorator func(roundTripper nethttp.RoundTripper) nethttp.RoundTripper) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http round tripper option can not set nil protocol") + } + if p.roundTripper == nil { + if p.Client == nil { + p.roundTripper = nethttp.DefaultTransport + } else { + p.roundTripper = p.Client.Transport + } + } + p.roundTripper = decorator(p.roundTripper) + return nil + } +} + +// WithClient sets the protocol client +func WithClient(client nethttp.Client) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("client option can not set nil protocol") + } + p.Client = &client + return nil + } +} + +// WithGetHandlerFunc sets the http GET handler func +func WithGetHandlerFunc(fn nethttp.HandlerFunc) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http GET handler func can not set nil protocol") + } + p.GetHandlerFn = fn + return nil + } +} + +// WithOptionsHandlerFunc sets the http OPTIONS handler func +func WithOptionsHandlerFunc(fn nethttp.HandlerFunc) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http OPTIONS handler func can not set nil protocol") + } + p.OptionsHandlerFn = fn + return nil + } +} + +// WithDefaultOptionsHandlerFunc sets the options handler to be the built in handler and configures the options. +// methods: the supported methods reported to OPTIONS caller. +// rate: the rate limit reported to OPTIONS caller. +// origins: the prefix of the accepted origins, or "*". +// callback: preform the callback to ACK the OPTIONS request. +func WithDefaultOptionsHandlerFunc(methods []string, rate int, origins []string, callback bool) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http OPTIONS handler func can not set nil protocol") + } + p.OptionsHandlerFn = p.OptionsHandler + p.WebhookConfig = &WebhookConfig{ + AllowedMethods: methods, + AllowedRate: &rate, + AllowedOrigins: origins, + AutoACKCallback: callback, + } + return nil + } +} + +// IsRetriable is a custom function that can be used to override the +// default retriable status codes. +type IsRetriable func(statusCode int) bool + +// WithIsRetriableFunc sets the function that gets called to determine if an +// error should be retried. If not set, the defaultIsRetriableFunc is used. +func WithIsRetriableFunc(isRetriable IsRetriable) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("isRetriable handler func can not set nil protocol") + } + if isRetriable == nil { + return fmt.Errorf("isRetriable handler can not be nil") + } + p.isRetriableFunc = isRetriable + return nil + } +} + +func WithRateLimiter(rl RateLimiter) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http OPTIONS handler func can not set nil protocol") + } + p.limiter = rl + return nil + } +} + +// WithRequestDataAtContextMiddleware adds to the Context RequestData. +// This enables a user's dispatch handler to inspect HTTP request information by +// retrieving it from the Context. +func WithRequestDataAtContextMiddleware() Option { + return WithMiddleware(func(next nethttp.Handler) nethttp.Handler { + return nethttp.HandlerFunc(func(w nethttp.ResponseWriter, r *nethttp.Request) { + ctx := WithRequestDataAtContext(r.Context(), r) + next.ServeHTTP(w, r.WithContext(ctx)) + }) + }) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go new file mode 100644 index 000000000..7ee3b8fe1 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go @@ -0,0 +1,411 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +const ( + // DefaultShutdownTimeout defines the default timeout given to the http.Server when calling Shutdown. + DefaultShutdownTimeout = time.Minute * 1 +) + +type msgErr struct { + msg *Message + respFn protocol.ResponseFn + err error +} + +// Default error codes that we retry on - string isn't used, it's just there so +// people know what each error code's title is. +// To modify this use Option +var defaultRetriableErrors = map[int]string{ + 404: "Not Found", + 413: "Payload Too Large", + 425: "Too Early", + 429: "Too Many Requests", + 502: "Bad Gateway", + 503: "Service Unavailable", + 504: "Gateway Timeout", +} + +// Protocol acts as both a http client and a http handler. +type Protocol struct { + Target *url.URL + RequestTemplate *http.Request + Client *http.Client + incoming chan msgErr + + // OptionsHandlerFn handles the OPTIONS method requests and is intended to + // implement the abuse protection spec: + // https://github.com/cloudevents/spec/blob/v1.0/http-webhook.md#4-abuse-protection + OptionsHandlerFn http.HandlerFunc + WebhookConfig *WebhookConfig + + GetHandlerFn http.HandlerFunc + DeleteHandlerFn http.HandlerFunc + + // To support Opener: + + // ShutdownTimeout defines the timeout given to the http.Server when calling Shutdown. + // If 0, DefaultShutdownTimeout is used. + ShutdownTimeout time.Duration + + // Port is the port configured to bind the receiver to. Defaults to 8080. + // If you want to know the effective port you're listening to, use GetListeningPort() + Port int + // Path is the path to bind the receiver to. Defaults to "/". + Path string + + // Receive Mutex + reMu sync.Mutex + // Handler is the handler the http Server will use. Use this to reuse the + // http server. If nil, the Protocol will create a one. + Handler *http.ServeMux + + listener atomic.Value + roundTripper http.RoundTripper + server *http.Server + handlerRegistered bool + middleware []Middleware + limiter RateLimiter + + isRetriableFunc IsRetriable +} + +func New(opts ...Option) (*Protocol, error) { + p := &Protocol{ + incoming: make(chan msgErr), + Port: -1, + } + if err := p.applyOptions(opts...); err != nil { + return nil, err + } + + if p.Client == nil { + // This is how http.DefaultClient is initialized. We do not just use + // that because when WithRoundTripper is used, it will change the client's + // transport, which would cause that transport to be used process-wide. + p.Client = &http.Client{} + } + + if p.roundTripper != nil { + p.Client.Transport = p.roundTripper + } + + if p.ShutdownTimeout == 0 { + p.ShutdownTimeout = DefaultShutdownTimeout + } + + if p.isRetriableFunc == nil { + p.isRetriableFunc = defaultIsRetriableFunc + } + + if p.limiter == nil { + p.limiter = noOpLimiter{} + } + + return p, nil +} + +// NewObserved creates an HTTP protocol with trace propagating middleware. +// Deprecated: now this behaves like New and it will be removed in future releases, +// setup the http observed protocol using the opencensus separate module NewObservedHttp +var NewObserved = New + +func (p *Protocol) applyOptions(opts ...Option) error { + for _, fn := range opts { + if err := fn(p); err != nil { + return err + } + } + return nil +} + +// Send implements binding.Sender +func (p *Protocol) Send(ctx context.Context, m binding.Message, transformers ...binding.Transformer) error { + if ctx == nil { + return fmt.Errorf("nil Context") + } else if m == nil { + return fmt.Errorf("nil Message") + } + + msg, err := p.Request(ctx, m, transformers...) + if msg != nil { + defer func() { _ = msg.Finish(err) }() + } + if err != nil && !protocol.IsACK(err) { + var res *Result + if protocol.ResultAs(err, &res) { + if message, ok := msg.(*Message); ok { + buf := new(bytes.Buffer) + buf.ReadFrom(message.BodyReader) + errorStr := buf.String() + // If the error is not wrapped, then append the original error string. + if og, ok := err.(*Result); ok { + og.Format = og.Format + "%s" + og.Args = append(og.Args, errorStr) + err = og + } else { + err = NewResult(res.StatusCode, "%w: %s", err, errorStr) + } + } + } + } + return err +} + +// Request implements binding.Requester +func (p *Protocol) Request(ctx context.Context, m binding.Message, transformers ...binding.Transformer) (binding.Message, error) { + if ctx == nil { + return nil, fmt.Errorf("nil Context") + } else if m == nil { + return nil, fmt.Errorf("nil Message") + } + + var err error + defer func() { _ = m.Finish(err) }() + + req := p.makeRequest(ctx) + + if p.Client == nil || req == nil || req.URL == nil { + return nil, fmt.Errorf("not initialized: %#v", p) + } + + if err = WriteRequest(ctx, m, req, transformers...); err != nil { + return nil, err + } + + return p.do(ctx, req) +} + +func (p *Protocol) makeRequest(ctx context.Context) *http.Request { + req := &http.Request{ + Method: http.MethodPost, + Header: HeaderFrom(ctx), + } + + if p.RequestTemplate != nil { + req.Method = p.RequestTemplate.Method + req.URL = p.RequestTemplate.URL + req.Close = p.RequestTemplate.Close + req.Host = p.RequestTemplate.Host + copyHeadersEnsure(p.RequestTemplate.Header, &req.Header) + } + + if p.Target != nil { + req.URL = p.Target + } + + // Override the default request with target from context. + if target := cecontext.TargetFrom(ctx); target != nil { + req.URL = target + } + return req.WithContext(ctx) +} + +// Ensure to is a non-nil map before copying +func copyHeadersEnsure(from http.Header, to *http.Header) { + if len(from) > 0 { + if *to == nil { + *to = http.Header{} + } + copyHeaders(from, *to) + } +} + +func copyHeaders(from, to http.Header) { + if from == nil || to == nil { + return + } + for header, values := range from { + for _, value := range values { + to.Add(header, value) + } + } +} + +// Receive the next incoming HTTP request as a CloudEvent. +// Returns non-nil error if the incoming HTTP request fails to parse as a CloudEvent +// Returns io.EOF if the receiver is closed. +func (p *Protocol) Receive(ctx context.Context) (binding.Message, error) { + if ctx == nil { + return nil, fmt.Errorf("nil Context") + } + + msg, fn, err := p.Respond(ctx) + // No-op the response when finish is invoked. + if msg != nil { + return binding.WithFinish(msg, func(err error) { + if fn != nil { + _ = fn(ctx, nil, nil) + } + }), err + } else { + return nil, err + } +} + +// Respond receives the next incoming HTTP request as a CloudEvent and waits +// for the response callback to invoked before continuing. +// Returns non-nil error if the incoming HTTP request fails to parse as a CloudEvent +// Returns io.EOF if the receiver is closed. +func (p *Protocol) Respond(ctx context.Context) (binding.Message, protocol.ResponseFn, error) { + if ctx == nil { + return nil, nil, fmt.Errorf("nil Context") + } + + select { + case in, ok := <-p.incoming: + if !ok { + return nil, nil, io.EOF + } + + if in.msg == nil { + return nil, in.respFn, in.err + } + return in.msg, in.respFn, in.err + + case <-ctx.Done(): + return nil, nil, io.EOF + } +} + +// ServeHTTP implements http.Handler. +// Blocks until ResponseFn is invoked. +func (p *Protocol) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + // always apply limiter first using req context + ok, reset, err := p.limiter.Allow(req.Context(), req) + if err != nil { + p.incoming <- msgErr{msg: nil, err: fmt.Errorf("unable to acquire rate limit token: %w", err)} + rw.WriteHeader(http.StatusInternalServerError) + return + } + + if !ok { + rw.Header().Add("Retry-After", strconv.Itoa(int(reset))) + http.Error(rw, "limit exceeded", 429) + return + } + + // Filter the GET style methods: + switch req.Method { + case http.MethodOptions: + if p.OptionsHandlerFn == nil { + rw.WriteHeader(http.StatusMethodNotAllowed) + return + } + p.OptionsHandlerFn(rw, req) + return + + case http.MethodGet: + if p.GetHandlerFn == nil { + rw.WriteHeader(http.StatusMethodNotAllowed) + return + } + p.GetHandlerFn(rw, req) + return + + case http.MethodDelete: + if p.DeleteHandlerFn == nil { + rw.WriteHeader(http.StatusMethodNotAllowed) + return + } + p.DeleteHandlerFn(rw, req) + return + } + + m := NewMessageFromHttpRequest(req) + if m == nil { + // Should never get here unless ServeHTTP is called directly. + p.incoming <- msgErr{msg: nil, err: binding.ErrUnknownEncoding} + rw.WriteHeader(http.StatusBadRequest) + return // if there was no message, return. + } + + var finishErr error + m.OnFinish = func(err error) error { + finishErr = err + return nil + } + + wg := sync.WaitGroup{} + wg.Add(1) + var fn protocol.ResponseFn = func(ctx context.Context, respMsg binding.Message, res protocol.Result, transformers ...binding.Transformer) error { + // Unblock the ServeHTTP after the reply is written + defer func() { + wg.Done() + }() + + if finishErr != nil { + http.Error(rw, fmt.Sprintf("Cannot forward CloudEvent: %s", finishErr), http.StatusInternalServerError) + return finishErr + } + + status := http.StatusOK + var errMsg string + if res != nil { + var result *Result + switch { + case protocol.ResultAs(res, &result): + if result.StatusCode > 100 && result.StatusCode < 600 { + status = result.StatusCode + } + errMsg = fmt.Errorf(result.Format, result.Args...).Error() + case !protocol.IsACK(res): + // Map client errors to http status code + validationError := event.ValidationError{} + if errors.As(res, &validationError) { + status = http.StatusBadRequest + rw.Header().Set("content-type", "text/plain") + rw.WriteHeader(status) + _, _ = rw.Write([]byte(validationError.Error())) + return validationError + } else if errors.Is(res, binding.ErrUnknownEncoding) { + status = http.StatusUnsupportedMediaType + } else { + status = http.StatusInternalServerError + } + } + } + + if respMsg != nil { + err := WriteResponseWriter(ctx, respMsg, status, rw, transformers...) + return respMsg.Finish(err) + } + + rw.WriteHeader(status) + if _, err := rw.Write([]byte(errMsg)); err != nil { + return err + } + return nil + } + + p.incoming <- msgErr{msg: m, respFn: fn} // Send to Request + // Block until ResponseFn is invoked + wg.Wait() +} + +func defaultIsRetriableFunc(sc int) bool { + _, ok := defaultRetriableErrors[sc] + return ok +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go new file mode 100644 index 000000000..04ef96915 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go @@ -0,0 +1,143 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + "fmt" + "net" + "net/http" + "strings" + + "github.com/cloudevents/sdk-go/v2/protocol" +) + +var _ protocol.Opener = (*Protocol)(nil) + +func (p *Protocol) OpenInbound(ctx context.Context) error { + p.reMu.Lock() + defer p.reMu.Unlock() + + if p.Handler == nil { + p.Handler = http.NewServeMux() + } + + if !p.handlerRegistered { + // handler.Handle might panic if the user tries to use the same path as the sdk. + p.Handler.Handle(p.GetPath(), p) + p.handlerRegistered = true + } + + // After listener is invok + listener, err := p.listen() + if err != nil { + return err + } + + p.server = &http.Server{ + Addr: listener.Addr().String(), + Handler: attachMiddleware(p.Handler, p.middleware), + ReadTimeout: DefaultTimeout, + WriteTimeout: DefaultTimeout, + } + + // Shutdown + defer func() { + _ = p.server.Close() + p.server = nil + }() + + errChan := make(chan error) + go func() { + errChan <- p.server.Serve(listener) + }() + + // wait for the server to return or ctx.Done(). + select { + case <-ctx.Done(): + // Try a graceful shutdown. + ctx, cancel := context.WithTimeout(context.Background(), p.ShutdownTimeout) + defer cancel() + + shdwnErr := p.server.Shutdown(ctx) + if shdwnErr != nil { + shdwnErr = fmt.Errorf("shutting down HTTP server: %w", shdwnErr) + } + + // Wait for server goroutine to exit + rntmErr := <-errChan + if rntmErr != nil && rntmErr != http.ErrServerClosed { + rntmErr = fmt.Errorf("server failed during shutdown: %w", rntmErr) + + if shdwnErr != nil { + return fmt.Errorf("combined error during shutdown of HTTP server: %w, %v", + shdwnErr, rntmErr) + } + + return rntmErr + } + + return shdwnErr + + case err := <-errChan: + if err != nil { + return fmt.Errorf("during runtime of HTTP server: %w", err) + } + return nil + } +} + +// GetListeningPort returns the listening port. +// Returns -1 if it's not listening. +func (p *Protocol) GetListeningPort() int { + if listener := p.listener.Load(); listener != nil { + if tcpAddr, ok := listener.(net.Listener).Addr().(*net.TCPAddr); ok { + return tcpAddr.Port + } + } + return -1 +} + +// listen if not already listening, update t.Port +func (p *Protocol) listen() (net.Listener, error) { + if p.listener.Load() == nil { + port := 8080 + if p.Port != -1 { + port = p.Port + if port < 0 || port > 65535 { + return nil, fmt.Errorf("invalid port %d", port) + } + } + var err error + var listener net.Listener + if listener, err = net.Listen("tcp", fmt.Sprintf(":%d", port)); err != nil { + return nil, err + } + p.listener.Store(listener) + return listener, nil + } + return p.listener.Load().(net.Listener), nil +} + +// GetPath returns the path the transport is hosted on. If the path is '/', +// the transport will handle requests on any URI. To discover the true path +// a request was received on, inspect the context from Receive(cxt, ...) with +// TransportContextFrom(ctx). +func (p *Protocol) GetPath() string { + path := strings.TrimSpace(p.Path) + if len(path) > 0 { + return path + } + return "/" // default +} + +// attachMiddleware attaches the HTTP middleware to the specified handler. +func attachMiddleware(h http.Handler, middleware []Middleware) http.Handler { + for _, m := range middleware { + h = m(h) + } + return h +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go new file mode 100644 index 000000000..9c4c10a29 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go @@ -0,0 +1,34 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + "net/http" +) + +type RateLimiter interface { + // Allow attempts to take one token from the rate limiter for the specified + // request. It returns ok when this operation was successful. In case ok is + // false, reset will indicate the time in seconds when it is safe to perform + // another attempt. An error is returned when this operation failed, e.g. due to + // a backend error. + Allow(ctx context.Context, r *http.Request) (ok bool, reset uint64, err error) + // Close terminates rate limiter and cleans up any data structures or + // connections that may remain open. After a store is stopped, Take() should + // always return zero values. + Close(ctx context.Context) error +} + +type noOpLimiter struct{} + +func (n noOpLimiter) Allow(ctx context.Context, r *http.Request) (bool, uint64, error) { + return true, 0, nil +} + +func (n noOpLimiter) Close(ctx context.Context) error { + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go new file mode 100644 index 000000000..21fc7e9b3 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go @@ -0,0 +1,126 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "errors" + "io" + "net/http" + "time" + + "go.uber.org/zap" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +func (p *Protocol) do(ctx context.Context, req *http.Request) (binding.Message, error) { + params := cecontext.RetriesFrom(ctx) + + switch params.Strategy { + case cecontext.BackoffStrategyConstant, cecontext.BackoffStrategyLinear, cecontext.BackoffStrategyExponential: + return p.doWithRetry(ctx, params, req) + case cecontext.BackoffStrategyNone: + fallthrough + default: + return p.doOnce(req) + } +} + +func (p *Protocol) doOnce(req *http.Request) (binding.Message, protocol.Result) { + resp, err := p.Client.Do(req) + if err != nil { + return nil, protocol.NewReceipt(false, "%w", err) + } + + var result protocol.Result + if resp.StatusCode/100 == 2 { + result = protocol.ResultACK + } else { + result = protocol.ResultNACK + } + + return NewMessage(resp.Header, resp.Body), NewResult(resp.StatusCode, "%w", result) +} + +func (p *Protocol) doWithRetry(ctx context.Context, params *cecontext.RetryParams, req *http.Request) (binding.Message, error) { + start := time.Now() + retry := 0 + results := make([]protocol.Result, 0) + + var ( + body []byte + err error + ) + + if req != nil && req.Body != nil { + defer func() { + if err = req.Body.Close(); err != nil { + cecontext.LoggerFrom(ctx).Warnw("could not close request body", zap.Error(err)) + } + }() + body, err = io.ReadAll(req.Body) + if err != nil { + panic(err) + } + resetBody(req, body) + } + + for { + msg, result := p.doOnce(req) + + // Fast track common case. + if protocol.IsACK(result) { + return msg, NewRetriesResult(result, retry, start, results) + } + + var httpResult *Result + if errors.As(result, &httpResult) { + sc := httpResult.StatusCode + if !p.isRetriableFunc(sc) { + cecontext.LoggerFrom(ctx).Debugw("status code not retryable, will not try again", + zap.Error(httpResult), + zap.Int("statusCode", sc)) + return msg, NewRetriesResult(result, retry, start, results) + } + } + + // total tries = retry + 1 + if err = params.Backoff(ctx, retry+1); err != nil { + // do not try again. + cecontext.LoggerFrom(ctx).Debugw("backoff error, will not try again", zap.Error(err)) + return msg, NewRetriesResult(result, retry, start, results) + } + + retry++ + resetBody(req, body) + results = append(results, result) + if msg != nil { + // avoid leak, forget message, ignore error + _ = msg.Finish(nil) + } + } +} + +// reset body to allow it to be read multiple times, e.g. when retrying http +// requests +func resetBody(req *http.Request, body []byte) { + if req == nil || req.Body == nil { + return + } + + req.Body = io.NopCloser(bytes.NewReader(body)) + + // do not modify existing GetBody function + if req.GetBody == nil { + req.GetBody = func() (io.ReadCloser, error) { + return io.NopCloser(bytes.NewReader(body)), nil + } + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go new file mode 100644 index 000000000..7a0b2626c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go @@ -0,0 +1,60 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "errors" + "fmt" + + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// NewResult returns a fully populated http Result that should be used as +// a transport.Result. +func NewResult(statusCode int, messageFmt string, args ...interface{}) protocol.Result { + return &Result{ + StatusCode: statusCode, + Format: messageFmt, + Args: args, + } +} + +// Result wraps the fields required to make adjustments for http Responses. +type Result struct { + StatusCode int + Format string + Args []interface{} +} + +// make sure Result implements error. +var _ error = (*Result)(nil) + +// Is returns if the target error is a Result type checking target. +func (e *Result) Is(target error) bool { + if o, ok := target.(*Result); ok { + return e.StatusCode == o.StatusCode + } + + // Special case for nil == ACK + if o, ok := target.(*protocol.Receipt); ok { + if e == nil && o.ACK { + return true + } + } + + // Allow for wrapped errors. + if e != nil { + err := fmt.Errorf(e.Format, e.Args...) + return errors.Is(err, target) + } + return false +} + +// Error returns the string that is formed by using the format string with the +// provided args. +func (e *Result) Error() string { + return fmt.Sprintf("%d: %v", e.StatusCode, fmt.Errorf(e.Format, e.Args...)) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go new file mode 100644 index 000000000..f4046d522 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go @@ -0,0 +1,59 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "fmt" + "time" + + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// NewRetriesResult returns a http RetriesResult that should be used as +// a transport.Result without retries +func NewRetriesResult(result protocol.Result, retries int, startTime time.Time, attempts []protocol.Result) protocol.Result { + rr := &RetriesResult{ + Result: result, + Retries: retries, + Duration: time.Since(startTime), + } + if len(attempts) > 0 { + rr.Attempts = attempts + } + return rr +} + +// RetriesResult wraps the fields required to make adjustments for http Responses. +type RetriesResult struct { + // The last result + protocol.Result + + // Retries is the number of times the request was tried + Retries int + + // Duration records the time spent retrying. Exclude the successful request (if any) + Duration time.Duration + + // Attempts of all failed requests. Exclude last result. + Attempts []protocol.Result +} + +// make sure RetriesResult implements error. +var _ error = (*RetriesResult)(nil) + +// Is returns if the target error is a RetriesResult type checking target. +func (e *RetriesResult) Is(target error) bool { + return protocol.ResultIs(e.Result, target) +} + +// Error returns the string that is formed by using the format string with the +// provided args. +func (e *RetriesResult) Error() string { + if e.Retries == 0 { + return e.Result.Error() + } + return fmt.Sprintf("%s (%dx)", e.Result.Error(), e.Retries) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go new file mode 100644 index 000000000..350fc1cf6 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go @@ -0,0 +1,89 @@ +/* + Copyright 2022 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "encoding/json" + nethttp "net/http" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/event" +) + +// NewEventFromHTTPRequest returns an Event. +func NewEventFromHTTPRequest(req *nethttp.Request) (*event.Event, error) { + msg := NewMessageFromHttpRequest(req) + return binding.ToEvent(context.Background(), msg) +} + +// NewEventFromHTTPResponse returns an Event. +func NewEventFromHTTPResponse(resp *nethttp.Response) (*event.Event, error) { + msg := NewMessageFromHttpResponse(resp) + return binding.ToEvent(context.Background(), msg) +} + +// NewEventsFromHTTPRequest returns a batched set of Events from a HTTP Request +func NewEventsFromHTTPRequest(req *nethttp.Request) ([]event.Event, error) { + msg := NewMessageFromHttpRequest(req) + return binding.ToEvents(context.Background(), msg, msg.BodyReader) +} + +// NewEventsFromHTTPResponse returns a batched set of Events from a HTTP Response +func NewEventsFromHTTPResponse(resp *nethttp.Response) ([]event.Event, error) { + msg := NewMessageFromHttpResponse(resp) + return binding.ToEvents(context.Background(), msg, msg.BodyReader) +} + +// NewHTTPRequestFromEvent creates a http.Request object that can be used with any http.Client for a singular event. +// This is an HTTP POST action to the provided url. +func NewHTTPRequestFromEvent(ctx context.Context, url string, event event.Event) (*nethttp.Request, error) { + if err := event.Validate(); err != nil { + return nil, err + } + + req, err := nethttp.NewRequestWithContext(ctx, nethttp.MethodPost, url, nil) + if err != nil { + return nil, err + } + if err := WriteRequest(ctx, (*binding.EventMessage)(&event), req); err != nil { + return nil, err + } + + return req, nil +} + +// NewHTTPRequestFromEvents creates a http.Request object that can be used with any http.Client for sending +// a batched set of events. This is an HTTP POST action to the provided url. +func NewHTTPRequestFromEvents(ctx context.Context, url string, events []event.Event) (*nethttp.Request, error) { + // Sending batch events is quite straightforward, as there is only JSON format, so a simple implementation. + for _, e := range events { + if err := e.Validate(); err != nil { + return nil, err + } + } + var buffer bytes.Buffer + err := json.NewEncoder(&buffer).Encode(events) + if err != nil { + return nil, err + } + + request, err := nethttp.NewRequestWithContext(ctx, nethttp.MethodPost, url, &buffer) + if err != nil { + return nil, err + } + + request.Header.Set(ContentType, event.ApplicationCloudEventsBatchJSON) + + return request, nil +} + +// IsHTTPBatch returns if the current http.Request or http.Response is a batch event operation, by checking the +// header `Content-Type` value. +func IsHTTPBatch(header nethttp.Header) bool { + return header.Get(ContentType) == event.ApplicationCloudEventsBatchJSON +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go new file mode 100644 index 000000000..f22259a3a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go @@ -0,0 +1,142 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "io" + "net/http" + "strings" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/types" +) + +// WriteRequest fills the provided httpRequest with the message m. +// Using context you can tweak the encoding processing (more details on binding.Write documentation). +func WriteRequest(ctx context.Context, m binding.Message, httpRequest *http.Request, transformers ...binding.Transformer) error { + structuredWriter := (*httpRequestWriter)(httpRequest) + binaryWriter := (*httpRequestWriter)(httpRequest) + + _, err := binding.Write( + ctx, + m, + structuredWriter, + binaryWriter, + transformers..., + ) + return err +} + +type httpRequestWriter http.Request + +func (b *httpRequestWriter) SetStructuredEvent(ctx context.Context, format format.Format, event io.Reader) error { + b.Header.Set(ContentType, format.MediaType()) + return b.setBody(event) +} + +func (b *httpRequestWriter) Start(ctx context.Context) error { + return nil +} + +func (b *httpRequestWriter) End(ctx context.Context) error { + return nil +} + +func (b *httpRequestWriter) SetData(data io.Reader) error { + return b.setBody(data) +} + +// setBody is a cherry-pick of the implementation in http.NewRequestWithContext +func (b *httpRequestWriter) setBody(body io.Reader) error { + rc, ok := body.(io.ReadCloser) + if !ok && body != nil { + rc = io.NopCloser(body) + } + b.Body = rc + if body != nil { + switch v := body.(type) { + case *bytes.Buffer: + b.ContentLength = int64(v.Len()) + buf := v.Bytes() + b.GetBody = func() (io.ReadCloser, error) { + r := bytes.NewReader(buf) + return io.NopCloser(r), nil + } + case *bytes.Reader: + b.ContentLength = int64(v.Len()) + snapshot := *v + b.GetBody = func() (io.ReadCloser, error) { + r := snapshot + return io.NopCloser(&r), nil + } + case *strings.Reader: + b.ContentLength = int64(v.Len()) + snapshot := *v + b.GetBody = func() (io.ReadCloser, error) { + r := snapshot + return io.NopCloser(&r), nil + } + default: + // This is where we'd set it to -1 (at least + // if body != NoBody) to mean unknown, but + // that broke people during the Go 1.8 testing + // period. People depend on it being 0 I + // guess. Maybe retry later. See Issue 18117. + } + // For client requests, Request.ContentLength of 0 + // means either actually 0, or unknown. The only way + // to explicitly say that the ContentLength is zero is + // to set the Body to nil. But turns out too much code + // depends on NewRequest returning a non-nil Body, + // so we use a well-known ReadCloser variable instead + // and have the http package also treat that sentinel + // variable to mean explicitly zero. + if b.GetBody != nil && b.ContentLength == 0 { + b.Body = http.NoBody + b.GetBody = func() (io.ReadCloser, error) { return http.NoBody, nil } + } + } + return nil +} + +func (b *httpRequestWriter) SetAttribute(attribute spec.Attribute, value interface{}) error { + mapping := attributeHeadersMapping[attribute.Name()] + if value == nil { + delete(b.Header, mapping) + return nil + } + + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + b.Header[mapping] = append(b.Header[mapping], s) + return nil +} + +func (b *httpRequestWriter) SetExtension(name string, value interface{}) error { + if value == nil { + delete(b.Header, extNameToHeaderName(name)) + return nil + } + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + b.Header[extNameToHeaderName(name)] = []string{s} + return nil +} + +var ( + _ binding.StructuredWriter = (*httpRequestWriter)(nil) // Test it conforms to the interface + _ binding.BinaryWriter = (*httpRequestWriter)(nil) // Test it conforms to the interface +) diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go new file mode 100644 index 000000000..41385dab1 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go @@ -0,0 +1,126 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "io" + "net/http" + "strconv" + "strings" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/types" +) + +// WriteResponseWriter writes out to the the provided httpResponseWriter with the message m. +// Using context you can tweak the encoding processing (more details on binding.Write documentation). +func WriteResponseWriter(ctx context.Context, m binding.Message, status int, rw http.ResponseWriter, transformers ...binding.Transformer) error { + if status < 200 || status >= 600 { + status = http.StatusOK + } + writer := &httpResponseWriter{rw: rw, status: status} + + _, err := binding.Write( + ctx, + m, + writer, + writer, + transformers..., + ) + return err +} + +type httpResponseWriter struct { + rw http.ResponseWriter + status int + body io.Reader +} + +func (b *httpResponseWriter) SetStructuredEvent(ctx context.Context, format format.Format, event io.Reader) error { + b.rw.Header().Set(ContentType, format.MediaType()) + b.body = event + return b.finalizeWriter() +} + +func (b *httpResponseWriter) Start(ctx context.Context) error { + return nil +} + +func (b *httpResponseWriter) SetAttribute(attribute spec.Attribute, value interface{}) error { + mapping := attributeHeadersMapping[attribute.Name()] + if value == nil { + delete(b.rw.Header(), mapping) + } + + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + b.rw.Header()[mapping] = append(b.rw.Header()[mapping], s) + return nil +} + +func (b *httpResponseWriter) SetExtension(name string, value interface{}) error { + if value == nil { + delete(b.rw.Header(), extNameToHeaderName(name)) + } + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + b.rw.Header()[extNameToHeaderName(name)] = []string{s} + return nil +} + +func (b *httpResponseWriter) SetData(reader io.Reader) error { + b.body = reader + return nil +} + +func (b *httpResponseWriter) finalizeWriter() error { + if b.body != nil { + // Try to figure it out if we have a content-length + contentLength := -1 + switch v := b.body.(type) { + case *bytes.Buffer: + contentLength = v.Len() + case *bytes.Reader: + contentLength = v.Len() + case *strings.Reader: + contentLength = v.Len() + } + + if contentLength != -1 { + b.rw.Header().Add("Content-length", strconv.Itoa(contentLength)) + } + + // Finalize the headers. + b.rw.WriteHeader(b.status) + + // Write body. + _, err := io.Copy(b.rw, b.body) + if err != nil { + return err + } + } else { + // Finalize the headers. + b.rw.WriteHeader(b.status) + } + return nil +} + +func (b *httpResponseWriter) End(ctx context.Context) error { + return b.finalizeWriter() +} + +var _ binding.StructuredWriter = (*httpResponseWriter)(nil) // Test it conforms to the interface +var _ binding.BinaryWriter = (*httpResponseWriter)(nil) // Test it conforms to the interface diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go new file mode 100644 index 000000000..e7a74294d --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go @@ -0,0 +1,54 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/binding" +) + +// Receiver receives messages. +type Receiver interface { + // Receive blocks till a message is received or ctx expires. + // Receive can be invoked safely from different goroutines. + // + // A non-nil error means the receiver is closed. + // io.EOF means it closed cleanly, any other value indicates an error. + // The caller is responsible for `Finish()` the returned message + Receive(ctx context.Context) (binding.Message, error) +} + +// ReceiveCloser is a Receiver that can be closed. +type ReceiveCloser interface { + Receiver + Closer +} + +// ResponseFn is the function callback provided from Responder.Respond to allow +// for a receiver to "reply" to a message it receives. +// transformers are applied when the message is written on the wire. +type ResponseFn func(ctx context.Context, m binding.Message, r Result, transformers ...binding.Transformer) error + +// Responder receives messages and is given a callback to respond. +type Responder interface { + // Respond blocks till a message is received or ctx expires. + // Respond can be invoked safely from different goroutines. + // + // A non-nil error means the receiver is closed. + // io.EOF means it closed cleanly, any other value indicates an error. + // The caller is responsible for `Finish()` the returned message, + // while the protocol implementation is responsible for `Finish()` the response message. + // The caller MUST invoke ResponseFn, in order to avoid leaks. + // The correct flow for the caller is to finish the received message and then invoke the ResponseFn + Respond(ctx context.Context) (binding.Message, ResponseFn, error) +} + +// ResponderCloser is a Responder that can be closed. +type ResponderCloser interface { + Responder + Closer +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go new file mode 100644 index 000000000..4a058c962 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go @@ -0,0 +1,23 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import ( + "context" +) + +// Opener is the common interface for things that need to be opened. +type Opener interface { + // OpenInbound is a blocking call and ctx is used to stop the Inbound message Receiver/Responder. + // Closing the context won't close the Receiver/Responder, aka it won't invoke Close(ctx). + OpenInbound(ctx context.Context) error +} + +// Closer is the common interface for things that can be closed. +// After invoking Close(ctx), you cannot reuse the object you closed. +type Closer interface { + Close(ctx context.Context) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go new file mode 100644 index 000000000..e44fa432a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go @@ -0,0 +1,49 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/binding" +) + +// Sender sends messages. +type Sender interface { + // Send a message. + // + // Send returns when the "outbound" message has been sent. The Sender may + // still be expecting acknowledgment or holding other state for the message. + // + // m.Finish() is called when sending is finished (both succeeded or failed): + // expected acknowledgments (or errors) have been received, the Sender is + // no longer holding any state for the message. + // m.Finish() may be called during or after Send(). + // + // transformers are applied when the message is written on the wire. + Send(ctx context.Context, m binding.Message, transformers ...binding.Transformer) error +} + +// SendCloser is a Sender that can be closed. +type SendCloser interface { + Sender + Closer +} + +// Requester sends a message and receives a response +// +// Optional interface that may be implemented by protocols that support +// request/response correlation. +type Requester interface { + // Request sends m like Sender.Send() but also arranges to receive a response. + Request(ctx context.Context, m binding.Message, transformers ...binding.Transformer) (binding.Message, error) +} + +// RequesterCloser is a Requester that can be closed. +type RequesterCloser interface { + Requester + Closer +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go new file mode 100644 index 000000000..eae64e018 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go @@ -0,0 +1,127 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import ( + "errors" + "fmt" +) + +// Result leverages go's error wrapping. +type Result error + +// ResultIs reports whether any error in err's chain matches target. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +// (text from errors/wrap.go) +var ResultIs = errors.Is + +// ResultAs finds the first error in err's chain that matches target, and if so, sets +// target to that error value and returns true. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error matches target if the error's concrete value is assignable to the value +// pointed to by target, or if the error has a method As(interface{}) bool such that +// As(target) returns true. In the latter case, the As method is responsible for +// setting target. +// +// As will panic if target is not a non-nil pointer to either a type that implements +// error, or to any interface type. As returns false if err is nil. +// (text from errors/wrap.go) +var ResultAs = errors.As + +func NewResult(messageFmt string, args ...interface{}) Result { + return fmt.Errorf(messageFmt, args...) +} + +// IsACK true means the recipient acknowledged the event. +func IsACK(target Result) bool { + // special case, nil target also means ACK. + if target == nil { + return true + } + + return ResultIs(target, ResultACK) +} + +// IsNACK true means the recipient did not acknowledge the event. +func IsNACK(target Result) bool { + return ResultIs(target, ResultNACK) +} + +// IsUndelivered true means the target result is not an ACK/NACK, but some other +// error unrelated to delivery not from the intended recipient. Likely target +// is an error that represents some part of the protocol is misconfigured or +// the event that was attempting to be sent was invalid. +func IsUndelivered(target Result) bool { + if target == nil { + // Short-circuit nil result is ACK. + return false + } + return !ResultIs(target, ResultACK) && !ResultIs(target, ResultNACK) +} + +var ( + ResultACK = NewReceipt(true, "") + ResultNACK = NewReceipt(false, "") +) + +// NewReceipt returns a fully populated protocol Receipt that should be used as +// a transport.Result. This type holds the base ACK/NACK results. +func NewReceipt(ack bool, messageFmt string, args ...interface{}) Result { + return &Receipt{ + Err: fmt.Errorf(messageFmt, args...), + ACK: ack, + } +} + +// Receipt wraps the fields required to understand if a protocol event is acknowledged. +type Receipt struct { + Err error + ACK bool +} + +// make sure Result implements error. +var _ error = (*Receipt)(nil) + +// Is returns if the target error is a Result type checking target. +func (e *Receipt) Is(target error) bool { + if o, ok := target.(*Receipt); ok { + if e == nil { + // Special case nil e as ACK. + return o.ACK + } + return e.ACK == o.ACK + } + // Allow for wrapped errors. + if e != nil { + return errors.Is(e.Err, target) + } + return false +} + +// Error returns the string that is formed by using the format string with the +// provided args. +func (e *Receipt) Error() string { + if e != nil { + return e.Err.Error() + } + return "" +} + +// Unwrap returns the wrapped error if exist or nil +func (e *Receipt) Unwrap() error { + if e != nil { + return errors.Unwrap(e.Err) + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/staticcheck.conf b/vendor/github.com/cloudevents/sdk-go/v2/staticcheck.conf new file mode 100644 index 000000000..d6f269556 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/staticcheck.conf @@ -0,0 +1,3 @@ +checks = [ + "all", "-ST1003", +] \ No newline at end of file diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go b/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go new file mode 100644 index 000000000..814626874 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go @@ -0,0 +1,41 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import "reflect" + +// Allocate allocates a new instance of type t and returns: +// asPtr is of type t if t is a pointer type and of type &t otherwise +// asValue is a Value of type t pointing to the same data as asPtr +func Allocate(obj interface{}) (asPtr interface{}, asValue reflect.Value) { + if obj == nil { + return nil, reflect.Value{} + } + + switch t := reflect.TypeOf(obj); t.Kind() { + case reflect.Ptr: + reflectPtr := reflect.New(t.Elem()) + asPtr = reflectPtr.Interface() + asValue = reflectPtr + case reflect.Map: + reflectPtr := reflect.MakeMap(t) + asPtr = reflectPtr.Interface() + asValue = reflectPtr + case reflect.String: + reflectPtr := reflect.New(t) + asPtr = "" + asValue = reflectPtr.Elem() + case reflect.Slice: + reflectPtr := reflect.MakeSlice(t, 0, 0) + asPtr = reflectPtr.Interface() + asValue = reflectPtr + default: + reflectPtr := reflect.New(t) + asPtr = reflectPtr.Interface() + asValue = reflectPtr.Elem() + } + return +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go new file mode 100644 index 000000000..3a0a595a1 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go @@ -0,0 +1,45 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package types implements the CloudEvents type system. + +CloudEvents defines a set of abstract types for event context attributes. Each +type has a corresponding native Go type and a canonical string encoding. The +native Go types used to represent the CloudEvents types are: +bool, int32, string, []byte, *url.URL, time.Time + + +----------------+----------------+-----------------------------------+ + |CloudEvents Type|Native Type |Convertible From | + +================+================+===================================+ + |Bool |bool |bool | + +----------------+----------------+-----------------------------------+ + |Integer |int32 |Any numeric type with value in | + | | |range of int32 | + +----------------+----------------+-----------------------------------+ + |String |string |string | + +----------------+----------------+-----------------------------------+ + |Binary |[]byte |[]byte | + +----------------+----------------+-----------------------------------+ + |URI-Reference |*url.URL |url.URL, types.URIRef, types.URI | + +----------------+----------------+-----------------------------------+ + |URI |*url.URL |url.URL, types.URIRef, types.URI | + | | |Must be an absolute URI. | + +----------------+----------------+-----------------------------------+ + |Timestamp |time.Time |time.Time, types.Timestamp | + +----------------+----------------+-----------------------------------+ + +Extension attributes may be stored as a native type or a canonical string. The +To functions will convert to the desired from any convertible type +or from the canonical string form. + +The Parse and Format functions convert native types to/from +canonical strings. + +Note are no Parse or Format functions for URL or string. For URL use the +standard url.Parse() and url.URL.String(). The canonical string format of a +string is the string itself. +*/ +package types diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go b/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go new file mode 100644 index 000000000..ff049727d --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go @@ -0,0 +1,75 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "time" +) + +// Timestamp wraps time.Time to normalize the time layout to RFC3339. It is +// intended to enforce compliance with the CloudEvents spec for their +// definition of Timestamp. Custom marshal methods are implemented to ensure +// the outbound Timestamp is a string in the RFC3339 layout. +type Timestamp struct { + time.Time +} + +// ParseTimestamp attempts to parse the given time assuming RFC3339 layout +func ParseTimestamp(s string) (*Timestamp, error) { + if s == "" { + return nil, nil + } + tt, err := ParseTime(s) + return &Timestamp{Time: tt}, err +} + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (t *Timestamp) MarshalJSON() ([]byte, error) { + if t == nil || t.IsZero() { + return []byte(`""`), nil + } + return []byte(fmt.Sprintf("%q", t)), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (t *Timestamp) UnmarshalJSON(b []byte) error { + var timestamp string + if err := json.Unmarshal(b, ×tamp); err != nil { + return err + } + var err error + t.Time, err = ParseTime(timestamp) + return err +} + +// MarshalXML implements a custom xml marshal method used when this type is +// marshaled using xml.Marshal. +func (t *Timestamp) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if t == nil || t.IsZero() { + return e.EncodeElement(nil, start) + } + return e.EncodeElement(t.String(), start) +} + +// UnmarshalXML implements the xml unmarshal method used when this type is +// unmarshaled using xml.Unmarshal. +func (t *Timestamp) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var timestamp string + if err := d.DecodeElement(×tamp, &start); err != nil { + return err + } + var err error + t.Time, err = ParseTime(timestamp) + return err +} + +// String outputs the time using RFC3339 format. +func (t Timestamp) String() string { return FormatTime(t.Time) } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go b/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go new file mode 100644 index 000000000..bed608094 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go @@ -0,0 +1,86 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "net/url" +) + +// URI is a wrapper to url.URL. It is intended to enforce compliance with +// the CloudEvents spec for their definition of URI. Custom +// marshal methods are implemented to ensure the outbound URI object +// is a flat string. +type URI struct { + url.URL +} + +// ParseURI attempts to parse the given string as a URI. +func ParseURI(u string) *URI { + if u == "" { + return nil + } + pu, err := url.Parse(u) + if err != nil { + return nil + } + return &URI{URL: *pu} +} + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (u URI) MarshalJSON() ([]byte, error) { + b := fmt.Sprintf("%q", u.String()) + return []byte(b), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (u *URI) UnmarshalJSON(b []byte) error { + var ref string + if err := json.Unmarshal(b, &ref); err != nil { + return err + } + r := ParseURI(ref) + if r != nil { + *u = *r + } + return nil +} + +// MarshalXML implements a custom xml marshal method used when this type is +// marshaled using xml.Marshal. +func (u URI) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + return e.EncodeElement(u.String(), start) +} + +// UnmarshalXML implements the xml unmarshal method used when this type is +// unmarshaled using xml.Unmarshal. +func (u *URI) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var ref string + if err := d.DecodeElement(&ref, &start); err != nil { + return err + } + r := ParseURI(ref) + if r != nil { + *u = *r + } + return nil +} + +func (u URI) Validate() bool { + return u.IsAbs() +} + +// String returns the full string representation of the URI-Reference. +func (u *URI) String() string { + if u == nil { + return "" + } + return u.URL.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go b/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go new file mode 100644 index 000000000..22fa12314 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go @@ -0,0 +1,82 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "net/url" +) + +// URIRef is a wrapper to url.URL. It is intended to enforce compliance with +// the CloudEvents spec for their definition of URI-Reference. Custom +// marshal methods are implemented to ensure the outbound URIRef object is +// is a flat string. +type URIRef struct { + url.URL +} + +// ParseURIRef attempts to parse the given string as a URI-Reference. +func ParseURIRef(u string) *URIRef { + if u == "" { + return nil + } + pu, err := url.Parse(u) + if err != nil { + return nil + } + return &URIRef{URL: *pu} +} + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (u URIRef) MarshalJSON() ([]byte, error) { + b := fmt.Sprintf("%q", u.String()) + return []byte(b), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (u *URIRef) UnmarshalJSON(b []byte) error { + var ref string + if err := json.Unmarshal(b, &ref); err != nil { + return err + } + r := ParseURIRef(ref) + if r != nil { + *u = *r + } + return nil +} + +// MarshalXML implements a custom xml marshal method used when this type is +// marshaled using xml.Marshal. +func (u URIRef) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + return e.EncodeElement(u.String(), start) +} + +// UnmarshalXML implements the xml unmarshal method used when this type is +// unmarshaled using xml.Unmarshal. +func (u *URIRef) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var ref string + if err := d.DecodeElement(&ref, &start); err != nil { + return err + } + r := ParseURIRef(ref) + if r != nil { + *u = *r + } + return nil +} + +// String returns the full string representation of the URI-Reference. +func (u *URIRef) String() string { + if u == nil { + return "" + } + return u.URL.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/value.go b/vendor/github.com/cloudevents/sdk-go/v2/types/value.go new file mode 100644 index 000000000..14004d3e1 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/value.go @@ -0,0 +1,337 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import ( + "encoding/base64" + "fmt" + "math" + "net/url" + "reflect" + "strconv" + "time" +) + +// FormatBool returns canonical string format: "true" or "false" +func FormatBool(v bool) string { return strconv.FormatBool(v) } + +// FormatInteger returns canonical string format: decimal notation. +func FormatInteger(v int32) string { return strconv.Itoa(int(v)) } + +// FormatBinary returns canonical string format: standard base64 encoding +func FormatBinary(v []byte) string { return base64.StdEncoding.EncodeToString(v) } + +// FormatTime returns canonical string format: RFC3339 with nanoseconds +func FormatTime(v time.Time) string { return v.UTC().Format(time.RFC3339Nano) } + +// ParseBool parse canonical string format: "true" or "false" +func ParseBool(v string) (bool, error) { return strconv.ParseBool(v) } + +// ParseInteger parse canonical string format: decimal notation. +func ParseInteger(v string) (int32, error) { + // Accept floating-point but truncate to int32 as per CE spec. + f, err := strconv.ParseFloat(v, 64) + if err != nil { + return 0, err + } + if f > math.MaxInt32 || f < math.MinInt32 { + return 0, rangeErr(v) + } + return int32(f), nil +} + +// ParseBinary parse canonical string format: standard base64 encoding +func ParseBinary(v string) ([]byte, error) { return base64.StdEncoding.DecodeString(v) } + +// ParseTime parse canonical string format: RFC3339 with nanoseconds +func ParseTime(v string) (time.Time, error) { + t, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + err := convertErr(time.Time{}, v) + err.extra = ": not in RFC3339 format" + return time.Time{}, err + } + return t, nil +} + +// Format returns the canonical string format of v, where v can be +// any type that is convertible to a CloudEvents type. +func Format(v interface{}) (string, error) { + v, err := Validate(v) + if err != nil { + return "", err + } + switch v := v.(type) { + case bool: + return FormatBool(v), nil + case int32: + return FormatInteger(v), nil + case string: + return v, nil + case []byte: + return FormatBinary(v), nil + case URI: + return v.String(), nil + case URIRef: + // url.URL is often passed by pointer so allow both + return v.String(), nil + case Timestamp: + return FormatTime(v.Time), nil + default: + return "", fmt.Errorf("%T is not a CloudEvents type", v) + } +} + +// Validate v is a valid CloudEvents attribute value, convert it to one of: +// bool, int32, string, []byte, types.URI, types.URIRef, types.Timestamp +func Validate(v interface{}) (interface{}, error) { + switch v := v.(type) { + case bool, int32, string, []byte: + return v, nil // Already a CloudEvents type, no validation needed. + + case uint, uintptr, uint8, uint16, uint32, uint64: + u := reflect.ValueOf(v).Uint() + if u > math.MaxInt32 { + return nil, rangeErr(v) + } + return int32(u), nil + case int, int8, int16, int64: + i := reflect.ValueOf(v).Int() + if i > math.MaxInt32 || i < math.MinInt32 { + return nil, rangeErr(v) + } + return int32(i), nil + case float32, float64: + f := reflect.ValueOf(v).Float() + if f > math.MaxInt32 || f < math.MinInt32 { + return nil, rangeErr(v) + } + return int32(f), nil + + case *url.URL: + if v == nil { + break + } + return URI{URL: *v}, nil + case url.URL: + return URI{URL: v}, nil + case *URIRef: + if v != nil { + return *v, nil + } + return nil, nil + case URIRef: + return v, nil + case *URI: + if v != nil { + return *v, nil + } + return nil, nil + case URI: + return v, nil + case time.Time: + return Timestamp{Time: v}, nil + case *time.Time: + if v == nil { + break + } + return Timestamp{Time: *v}, nil + case Timestamp: + return v, nil + } + rx := reflect.ValueOf(v) + if rx.Kind() == reflect.Ptr && !rx.IsNil() { + // Allow pointers-to convertible types + return Validate(rx.Elem().Interface()) + } + return nil, fmt.Errorf("invalid CloudEvents value: %#v", v) +} + +// Clone v clones a CloudEvents attribute value, which is one of the valid types: +// +// bool, int32, string, []byte, types.URI, types.URIRef, types.Timestamp +// +// Returns the same type +// Panics if the type is not valid +func Clone(v interface{}) interface{} { + if v == nil { + return nil + } + switch v := v.(type) { + case bool, int32, string, nil: + return v // Already a CloudEvents type, no validation needed. + case []byte: + clone := make([]byte, len(v)) + copy(clone, v) + return v + case url.URL: + return URI{v} + case *url.URL: + return &URI{*v} + case URIRef: + return v + case *URIRef: + return &URIRef{v.URL} + case URI: + return v + case *URI: + return &URI{v.URL} + case time.Time: + return Timestamp{v} + case *time.Time: + return &Timestamp{*v} + case Timestamp: + return v + case *Timestamp: + return &Timestamp{v.Time} + } + panic(fmt.Errorf("invalid CloudEvents value: %#v", v)) +} + +// ToBool accepts a bool value or canonical "true"/"false" string. +func ToBool(v interface{}) (bool, error) { + v, err := Validate(v) + if err != nil { + return false, err + } + switch v := v.(type) { + case bool: + return v, nil + case string: + return ParseBool(v) + default: + return false, convertErr(true, v) + } +} + +// ToInteger accepts any numeric value in int32 range, or canonical string. +func ToInteger(v interface{}) (int32, error) { + v, err := Validate(v) + if err != nil { + return 0, err + } + switch v := v.(type) { + case int32: + return v, nil + case string: + return ParseInteger(v) + default: + return 0, convertErr(int32(0), v) + } +} + +// ToString returns a string value unaltered. +// +// This function does not perform canonical string encoding, use one of the +// Format functions for that. +func ToString(v interface{}) (string, error) { + v, err := Validate(v) + if err != nil { + return "", err + } + switch v := v.(type) { + case string: + return v, nil + default: + return "", convertErr("", v) + } +} + +// ToBinary returns a []byte value, decoding from base64 string if necessary. +func ToBinary(v interface{}) ([]byte, error) { + v, err := Validate(v) + if err != nil { + return nil, err + } + switch v := v.(type) { + case []byte: + return v, nil + case string: + return base64.StdEncoding.DecodeString(v) + default: + return nil, convertErr([]byte(nil), v) + } +} + +// ToURL returns a *url.URL value, parsing from string if necessary. +func ToURL(v interface{}) (*url.URL, error) { + v, err := Validate(v) + if err != nil { + return nil, err + } + switch v := v.(type) { + case *URI: + return &v.URL, nil + case URI: + return &v.URL, nil + case *URIRef: + return &v.URL, nil + case URIRef: + return &v.URL, nil + case string: + u, err := url.Parse(v) + if err != nil { + return nil, err + } + return u, nil + default: + return nil, convertErr((*url.URL)(nil), v) + } +} + +// ToTime returns a time.Time value, parsing from RFC3339 string if necessary. +func ToTime(v interface{}) (time.Time, error) { + v, err := Validate(v) + if err != nil { + return time.Time{}, err + } + switch v := v.(type) { + case Timestamp: + return v.Time, nil + case string: + ts, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + return time.Time{}, err + } + return ts, nil + default: + return time.Time{}, convertErr(time.Time{}, v) + } +} + +func IsZero(v interface{}) bool { + // Fast path + if v == nil { + return true + } + if s, ok := v.(string); ok && s == "" { + return true + } + return reflect.ValueOf(v).IsZero() +} + +type ConvertErr struct { + // Value being converted + Value interface{} + // Type of attempted conversion + Type reflect.Type + + extra string +} + +func (e *ConvertErr) Error() string { + return fmt.Sprintf("cannot convert %#v to %s%s", e.Value, e.Type, e.extra) +} + +func convertErr(target, v interface{}) *ConvertErr { + return &ConvertErr{Value: v, Type: reflect.TypeOf(target)} +} + +func rangeErr(v interface{}) error { + e := convertErr(int32(0), v) + e.extra = ": out of range" + return e +} diff --git a/vendor/github.com/eclipse/paho.golang/LICENSE b/vendor/github.com/eclipse/paho.golang/LICENSE new file mode 100644 index 000000000..d3087e4c5 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/LICENSE @@ -0,0 +1,277 @@ +Eclipse Public License - v 2.0 + + THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE + PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION + OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +1. DEFINITIONS + +"Contribution" means: + + a) in the case of the initial Contributor, the initial content + Distributed under this Agreement, and + + b) in the case of each subsequent Contributor: + i) changes to the Program, and + ii) additions to the Program; + where such changes and/or additions to the Program originate from + and are Distributed by that particular Contributor. A Contribution + "originates" from a Contributor if it was added to the Program by + such Contributor itself or anyone acting on such Contributor's behalf. + Contributions do not include changes or additions to the Program that + are not Modified Works. + +"Contributor" means any person or entity that Distributes the Program. + +"Licensed Patents" mean patent claims licensable by a Contributor which +are necessarily infringed by the use or sale of its Contribution alone +or when combined with the Program. + +"Program" means the Contributions Distributed in accordance with this +Agreement. + +"Recipient" means anyone who receives the Program under this Agreement +or any Secondary License (as applicable), including Contributors. + +"Derivative Works" shall mean any work, whether in Source Code or other +form, that is based on (or derived from) the Program and for which the +editorial revisions, annotations, elaborations, or other modifications +represent, as a whole, an original work of authorship. + +"Modified Works" shall mean any work in Source Code or other form that +results from an addition to, deletion from, or modification of the +contents of the Program, including, for purposes of clarity any new file +in Source Code form that contains any contents of the Program. Modified +Works shall not include works that contain only declarations, +interfaces, types, classes, structures, or files of the Program solely +in each case in order to link to, bind by name, or subclass the Program +or Modified Works thereof. + +"Distribute" means the acts of a) distributing or b) making available +in any manner that enables the transfer of a copy. + +"Source Code" means the form of a Program preferred for making +modifications, including but not limited to software source code, +documentation source, and configuration files. + +"Secondary License" means either the GNU General Public License, +Version 2.0, or any later versions of that license, including any +exceptions or additional permissions as identified by the initial +Contributor. + +2. GRANT OF RIGHTS + + a) Subject to the terms of this Agreement, each Contributor hereby + grants Recipient a non-exclusive, worldwide, royalty-free copyright + license to reproduce, prepare Derivative Works of, publicly display, + publicly perform, Distribute and sublicense the Contribution of such + Contributor, if any, and such Derivative Works. + + b) Subject to the terms of this Agreement, each Contributor hereby + grants Recipient a non-exclusive, worldwide, royalty-free patent + license under Licensed Patents to make, use, sell, offer to sell, + import and otherwise transfer the Contribution of such Contributor, + if any, in Source Code or other form. This patent license shall + apply to the combination of the Contribution and the Program if, at + the time the Contribution is added by the Contributor, such addition + of the Contribution causes such combination to be covered by the + Licensed Patents. The patent license shall not apply to any other + combinations which include the Contribution. No hardware per se is + licensed hereunder. + + c) Recipient understands that although each Contributor grants the + licenses to its Contributions set forth herein, no assurances are + provided by any Contributor that the Program does not infringe the + patent or other intellectual property rights of any other entity. + Each Contributor disclaims any liability to Recipient for claims + brought by any other entity based on infringement of intellectual + property rights or otherwise. As a condition to exercising the + rights and licenses granted hereunder, each Recipient hereby + assumes sole responsibility to secure any other intellectual + property rights needed, if any. For example, if a third party + patent license is required to allow Recipient to Distribute the + Program, it is Recipient's responsibility to acquire that license + before distributing the Program. + + d) Each Contributor represents that to its knowledge it has + sufficient copyright rights in its Contribution, if any, to grant + the copyright license set forth in this Agreement. + + e) Notwithstanding the terms of any Secondary License, no + Contributor makes additional grants to any Recipient (other than + those set forth in this Agreement) as a result of such Recipient's + receipt of the Program under the terms of a Secondary License + (if permitted under the terms of Section 3). + +3. REQUIREMENTS + +3.1 If a Contributor Distributes the Program in any form, then: + + a) the Program must also be made available as Source Code, in + accordance with section 3.2, and the Contributor must accompany + the Program with a statement that the Source Code for the Program + is available under this Agreement, and informs Recipients how to + obtain it in a reasonable manner on or through a medium customarily + used for software exchange; and + + b) the Contributor may Distribute the Program under a license + different than this Agreement, provided that such license: + i) effectively disclaims on behalf of all other Contributors all + warranties and conditions, express and implied, including + warranties or conditions of title and non-infringement, and + implied warranties or conditions of merchantability and fitness + for a particular purpose; + + ii) effectively excludes on behalf of all other Contributors all + liability for damages, including direct, indirect, special, + incidental and consequential damages, such as lost profits; + + iii) does not attempt to limit or alter the recipients' rights + in the Source Code under section 3.2; and + + iv) requires any subsequent distribution of the Program by any + party to be under a license that satisfies the requirements + of this section 3. + +3.2 When the Program is Distributed as Source Code: + + a) it must be made available under this Agreement, or if the + Program (i) is combined with other material in a separate file or + files made available under a Secondary License, and (ii) the initial + Contributor attached to the Source Code the notice described in + Exhibit A of this Agreement, then the Program may be made available + under the terms of such Secondary Licenses, and + + b) a copy of this Agreement must be included with each copy of + the Program. + +3.3 Contributors may not remove or alter any copyright, patent, +trademark, attribution notices, disclaimers of warranty, or limitations +of liability ("notices") contained within the Program from any copy of +the Program which they Distribute, provided that Contributors may add +their own appropriate notices. + +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities +with respect to end users, business partners and the like. While this +license is intended to facilitate the commercial use of the Program, +the Contributor who includes the Program in a commercial product +offering should do so in a manner which does not create potential +liability for other Contributors. Therefore, if a Contributor includes +the Program in a commercial product offering, such Contributor +("Commercial Contributor") hereby agrees to defend and indemnify every +other Contributor ("Indemnified Contributor") against any losses, +damages and costs (collectively "Losses") arising from claims, lawsuits +and other legal actions brought by a third party against the Indemnified +Contributor to the extent caused by the acts or omissions of such +Commercial Contributor in connection with its distribution of the Program +in a commercial product offering. The obligations in this section do not +apply to any claims or Losses relating to any actual or alleged +intellectual property infringement. In order to qualify, an Indemnified +Contributor must: a) promptly notify the Commercial Contributor in +writing of such claim, and b) allow the Commercial Contributor to control, +and cooperate with the Commercial Contributor in, the defense and any +related settlement negotiations. The Indemnified Contributor may +participate in any such claim at its own expense. + +For example, a Contributor might include the Program in a commercial +product offering, Product X. That Contributor is then a Commercial +Contributor. If that Commercial Contributor then makes performance +claims, or offers warranties related to Product X, those performance +claims and warranties are such Commercial Contributor's responsibility +alone. Under this section, the Commercial Contributor would have to +defend claims against the other Contributors related to those performance +claims and warranties, and if a court requires any other Contributor to +pay any damages as a result, the Commercial Contributor must pay +those damages. + +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT +PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN "AS IS" +BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR +IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF +TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR +PURPOSE. Each Recipient is solely responsible for determining the +appropriateness of using and distributing the Program and assumes all +risks associated with its exercise of rights under this Agreement, +including but not limited to the risks and costs of program errors, +compliance with applicable laws, damage to or loss of data, programs +or equipment, and unavailability or interruption of operations. + +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT +PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS +SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST +PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE +EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under +applicable law, it shall not affect the validity or enforceability of +the remainder of the terms of this Agreement, and without further +action by the parties hereto, such provision shall be reformed to the +minimum extent necessary to make such provision valid and enforceable. + +If Recipient institutes patent litigation against any entity +(including a cross-claim or counterclaim in a lawsuit) alleging that the +Program itself (excluding combinations of the Program with other software +or hardware) infringes such Recipient's patent(s), then such Recipient's +rights granted under Section 2(b) shall terminate as of the date such +litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it +fails to comply with any of the material terms or conditions of this +Agreement and does not cure such failure in a reasonable period of +time after becoming aware of such noncompliance. If all Recipient's +rights under this Agreement terminate, Recipient agrees to cease use +and distribution of the Program as soon as reasonably practicable. +However, Recipient's obligations under this Agreement and any licenses +granted by Recipient relating to the Program shall continue and survive. + +Everyone is permitted to copy and distribute copies of this Agreement, +but in order to avoid inconsistency the Agreement is copyrighted and +may only be modified in the following manner. The Agreement Steward +reserves the right to publish new versions (including revisions) of +this Agreement from time to time. No one other than the Agreement +Steward has the right to modify this Agreement. The Eclipse Foundation +is the initial Agreement Steward. The Eclipse Foundation may assign the +responsibility to serve as the Agreement Steward to a suitable separate +entity. Each new version of the Agreement will be given a distinguishing +version number. The Program (including Contributions) may always be +Distributed subject to the version of the Agreement under which it was +received. In addition, after a new version of the Agreement is published, +Contributor may elect to Distribute the Program (including its +Contributions) under the new version. + +Except as expressly stated in Sections 2(a) and 2(b) above, Recipient +receives no rights or licenses to the intellectual property of any +Contributor under this Agreement, whether expressly, by implication, +estoppel or otherwise. All rights in the Program not expressly granted +under this Agreement are reserved. Nothing in this Agreement is intended +to be enforceable by any entity that is not a Contributor or Recipient. +No third-party beneficiary rights are created under this Agreement. + +Exhibit A - Form of Secondary Licenses Notice + +"This Source Code may also be made available under the following +Secondary Licenses when the conditions for such availability set forth +in the Eclipse Public License, v. 2.0 are satisfied: {name license(s), +version(s), and exceptions or additional permissions here}." + + Simply including a copy of this Agreement, including this Exhibit A + is not sufficient to license the Source Code under Secondary Licenses. + + If it is not possible or desirable to put the notice in a particular + file, then You may include the notice in a location (such as a LICENSE + file in a relevant directory) where a recipient would be likely to + look for such a notice. + + You may add additional accurate notices of copyright ownership. diff --git a/vendor/github.com/eclipse/paho.golang/packets/auth.go b/vendor/github.com/eclipse/paho.golang/packets/auth.go new file mode 100644 index 000000000..56237e00c --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/auth.go @@ -0,0 +1,77 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Auth is the Variable Header definition for a Auth control packet +type Auth struct { + Properties *Properties + ReasonCode byte +} + +// AuthSuccess is the return code for successful authentication +const ( + AuthSuccess = 0x00 + AuthContinueAuthentication = 0x18 + AuthReauthenticate = 0x19 +) + +func (a *Auth) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "AUTH: ReasonCode:%X", a.ReasonCode) + if a.Properties != nil { + fmt.Fprintf(&b, " Properties:\n%s", a.Properties) + } else { + fmt.Fprint(&b, "\n") + } + + return b.String() +} + +// Unpack is the implementation of the interface required function for a packet +func (a *Auth) Unpack(r *bytes.Buffer) error { + var err error + + success := r.Len() == 0 + noProps := r.Len() == 1 + if !success { + a.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + if !noProps { + err = a.Properties.Unpack(r, AUTH) + if err != nil { + return err + } + } + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (a *Auth) Buffers() net.Buffers { + idvp := a.Properties.Pack(AUTH) + propLen := encodeVBI(len(idvp)) + n := net.Buffers{[]byte{a.ReasonCode}, propLen} + if len(idvp) > 0 { + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (a *Auth) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: AUTH}} + cp.Content = a + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/connack.go b/vendor/github.com/eclipse/paho.golang/packets/connack.go new file mode 100644 index 000000000..3041fbcb5 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/connack.go @@ -0,0 +1,145 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Connack is the Variable Header definition for a connack control packet +type Connack struct { + Properties *Properties + ReasonCode byte + SessionPresent bool +} + +const ( + ConnackSuccess = 0x00 + ConnackUnspecifiedError = 0x80 + ConnackMalformedPacket = 0x81 + ConnackProtocolError = 0x81 + ConnackImplementationSpecificError = 0x83 + ConnackUnsupportedProtocolVersion = 0x84 + ConnackInvalidClientID = 0x85 + ConnackBadUsernameOrPassword = 0x86 + ConnackNotAuthorized = 0x87 + ConnackServerUnavailable = 0x88 + ConnackServerBusy = 0x89 + ConnackBanned = 0x8A + ConnackBadAuthenticationMethod = 0x8C + ConnackTopicNameInvalid = 0x90 + ConnackPacketTooLarge = 0x95 + ConnackQuotaExceeded = 0x97 + ConnackPayloadFormatInvalid = 0x99 + ConnackRetainNotSupported = 0x9A + ConnackQoSNotSupported = 0x9B + ConnackUseAnotherServer = 0x9C + ConnackServerMoved = 0x9D + ConnackConnectionRateExceeded = 0x9F +) + +func (c *Connack) String() string { + return fmt.Sprintf("CONNACK: ReasonCode:%d SessionPresent:%t\nProperties:\n%s", c.ReasonCode, c.SessionPresent, c.Properties) +} + +//Unpack is the implementation of the interface required function for a packet +func (c *Connack) Unpack(r *bytes.Buffer) error { + connackFlags, err := r.ReadByte() + if err != nil { + return err + } + c.SessionPresent = connackFlags&0x01 > 0 + + c.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + err = c.Properties.Unpack(r, CONNACK) + if err != nil { + return err + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (c *Connack) Buffers() net.Buffers { + var header bytes.Buffer + + if c.SessionPresent { + header.WriteByte(1) + } else { + header.WriteByte(0) + } + header.WriteByte(c.ReasonCode) + + idvp := c.Properties.Pack(CONNACK) + propLen := encodeVBI(len(idvp)) + + n := net.Buffers{header.Bytes(), propLen} + if len(idvp) > 0 { + n = append(n, idvp) + } + + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (c *Connack) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: CONNACK}} + cp.Content = c + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (c *Connack) Reason() string { + switch c.ReasonCode { + case 0: + return "Success - The Connection is accepted." + case 128: + return "Unspecified error - The Server does not wish to reveal the reason for the failure, or none of the other Reason Codes apply." + case 129: + return "Malformed Packet - Data within the CONNECT packet could not be correctly parsed." + case 130: + return "Protocol Error - Data in the CONNECT packet does not conform to this specification." + case 131: + return "Implementation specific error - The CONNECT is valid but is not accepted by this Server." + case 132: + return "Unsupported Protocol Version - The Server does not support the version of the MQTT protocol requested by the Client." + case 133: + return "Client Identifier not valid - The Client Identifier is a valid string but is not allowed by the Server." + case 134: + return "Bad User Name or Password - The Server does not accept the User Name or Password specified by the Client" + case 135: + return "Not authorized - The Client is not authorized to connect." + case 136: + return "Server unavailable - The MQTT Server is not available." + case 137: + return "Server busy - The Server is busy. Try again later." + case 138: + return "Banned - This Client has been banned by administrative action. Contact the server administrator." + case 140: + return "Bad authentication method - The authentication method is not supported or does not match the authentication method currently in use." + case 144: + return "Topic Name invalid - The Will Topic Name is not malformed, but is not accepted by this Server." + case 149: + return "Packet too large - The CONNECT packet exceeded the maximum permissible size." + case 151: + return "Quota exceeded - An implementation or administrative imposed limit has been exceeded." + case 154: + return "Retain not supported - The Server does not support retained messages, and Will Retain was set to 1." + case 155: + return "QoS not supported - The Server does not support the QoS set in Will QoS." + case 156: + return "Use another server - The Client should temporarily use another server." + case 157: + return "Server moved - The Client should permanently use another server." + case 159: + return "Connection rate exceeded - The connection rate limit has been exceeded." + } + + return "" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/connect.go b/vendor/github.com/eclipse/paho.golang/packets/connect.go new file mode 100644 index 000000000..31340f6bd --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/connect.go @@ -0,0 +1,189 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Connect is the Variable Header definition for a connect control packet +type Connect struct { + WillMessage []byte + Password []byte + Username string + ProtocolName string + ClientID string + WillTopic string + Properties *Properties + WillProperties *Properties + KeepAlive uint16 + ProtocolVersion byte + WillQOS byte + PasswordFlag bool + UsernameFlag bool + WillRetain bool + WillFlag bool + CleanStart bool +} + +func (c *Connect) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "CONNECT: ProtocolName:%s ProtocolVersion:%d ClientID:%s KeepAlive:%d CleanStart:%t", c.ProtocolName, c.ProtocolVersion, c.ClientID, c.KeepAlive, c.CleanStart) + if c.UsernameFlag { + fmt.Fprintf(&b, " Username:%s", c.Username) + } + if c.PasswordFlag { + fmt.Fprintf(&b, " Password:%s", c.Password) + } + fmt.Fprint(&b, "\n") + if c.WillFlag { + fmt.Fprintf(&b, " WillTopic:%s WillQOS:%d WillRetain:%t WillMessage:\n%s\n", c.WillTopic, c.WillQOS, c.WillRetain, c.WillMessage) + if c.WillProperties != nil { + fmt.Fprintf(&b, "WillProperties:\n%s", c.WillProperties) + } + } + if c.Properties != nil { + fmt.Fprintf(&b, "Properties:\n%s", c.Properties) + } + + return b.String() +} + +// PackFlags takes the Connect flags and packs them into the single byte +// representation used on the wire by MQTT +func (c *Connect) PackFlags() (f byte) { + if c.UsernameFlag { + f |= 0x01 << 7 + } + if c.PasswordFlag { + f |= 0x01 << 6 + } + if c.WillFlag { + f |= 0x01 << 2 + f |= c.WillQOS << 3 + if c.WillRetain { + f |= 0x01 << 5 + } + } + if c.CleanStart { + f |= 0x01 << 1 + } + return +} + +// UnpackFlags takes the wire byte representing the connect options flags +// and fills out the appropriate variables in the struct +func (c *Connect) UnpackFlags(b byte) { + c.CleanStart = 1&(b>>1) > 0 + c.WillFlag = 1&(b>>2) > 0 + c.WillQOS = 3 & (b >> 3) + c.WillRetain = 1&(b>>5) > 0 + c.PasswordFlag = 1&(b>>6) > 0 + c.UsernameFlag = 1&(b>>7) > 0 +} + +//Unpack is the implementation of the interface required function for a packet +func (c *Connect) Unpack(r *bytes.Buffer) error { + var err error + + if c.ProtocolName, err = readString(r); err != nil { + return err + } + + if c.ProtocolVersion, err = r.ReadByte(); err != nil { + return err + } + + flags, err := r.ReadByte() + if err != nil { + return err + } + c.UnpackFlags(flags) + + if c.KeepAlive, err = readUint16(r); err != nil { + return err + } + + err = c.Properties.Unpack(r, CONNECT) + if err != nil { + return err + } + + c.ClientID, err = readString(r) + if err != nil { + return err + } + + if c.WillFlag { + c.WillProperties = &Properties{} + err = c.WillProperties.Unpack(r, CONNECT) + if err != nil { + return err + } + c.WillTopic, err = readString(r) + if err != nil { + return err + } + c.WillMessage, err = readBinary(r) + if err != nil { + return err + } + } + + if c.UsernameFlag { + c.Username, err = readString(r) + if err != nil { + return err + } + } + + if c.PasswordFlag { + c.Password, err = readBinary(r) + if err != nil { + return err + } + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (c *Connect) Buffers() net.Buffers { + var cp bytes.Buffer + + writeString(c.ProtocolName, &cp) + cp.WriteByte(c.ProtocolVersion) + cp.WriteByte(c.PackFlags()) + writeUint16(c.KeepAlive, &cp) + idvp := c.Properties.Pack(CONNECT) + encodeVBIdirect(len(idvp), &cp) + cp.Write(idvp) + + writeString(c.ClientID, &cp) + if c.WillFlag { + willIdvp := c.WillProperties.Pack(CONNECT) + encodeVBIdirect(len(willIdvp), &cp) + cp.Write(willIdvp) + writeString(c.WillTopic, &cp) + writeBinary(c.WillMessage, &cp) + } + if c.UsernameFlag { + writeString(c.Username, &cp) + } + if c.PasswordFlag { + writeBinary(c.Password, &cp) + } + + return net.Buffers{cp.Bytes()} +} + +// WriteTo is the implementation of the interface required function for a packet +func (c *Connect) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: CONNECT}} + cp.Content = c + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/disconnect.go b/vendor/github.com/eclipse/paho.golang/packets/disconnect.go new file mode 100644 index 000000000..9180207a6 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/disconnect.go @@ -0,0 +1,152 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Disconnect is the Variable Header definition for a Disconnect control packet +type Disconnect struct { + Properties *Properties + ReasonCode byte +} + +func (d *Disconnect) String() string { + return fmt.Sprintf("DISCONNECT: ReasonCode:%X Properties\n%s", d.ReasonCode, d.Properties) +} + +// DisconnectNormalDisconnection, etc are the list of valid disconnection reason codes. +const ( + DisconnectNormalDisconnection = 0x00 + DisconnectDisconnectWithWillMessage = 0x04 + DisconnectUnspecifiedError = 0x80 + DisconnectMalformedPacket = 0x81 + DisconnectProtocolError = 0x82 + DisconnectImplementationSpecificError = 0x83 + DisconnectNotAuthorized = 0x87 + DisconnectServerBusy = 0x89 + DisconnectServerShuttingDown = 0x8B + DisconnectKeepAliveTimeout = 0x8D + DisconnectSessionTakenOver = 0x8E + DisconnectTopicFilterInvalid = 0x8F + DisconnectTopicNameInvalid = 0x90 + DisconnectReceiveMaximumExceeded = 0x93 + DisconnectTopicAliasInvalid = 0x94 + DisconnectPacketTooLarge = 0x95 + DisconnectMessageRateTooHigh = 0x96 + DisconnectQuotaExceeded = 0x97 + DisconnectAdministrativeAction = 0x98 + DisconnectPayloadFormatInvalid = 0x99 + DisconnectRetainNotSupported = 0x9A + DisconnectQoSNotSupported = 0x9B + DisconnectUseAnotherServer = 0x9C + DisconnectServerMoved = 0x9D + DisconnectSharedSubscriptionNotSupported = 0x9E + DisconnectConnectionRateExceeded = 0x9F + DisconnectMaximumConnectTime = 0xA0 + DisconnectSubscriptionIdentifiersNotSupported = 0xA1 + DisconnectWildcardSubscriptionsNotSupported = 0xA2 +) + +// Unpack is the implementation of the interface required function for a packet +func (d *Disconnect) Unpack(r *bytes.Buffer) error { + var err error + d.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + err = d.Properties.Unpack(r, DISCONNECT) + if err != nil { + return err + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (d *Disconnect) Buffers() net.Buffers { + idvp := d.Properties.Pack(DISCONNECT) + propLen := encodeVBI(len(idvp)) + n := net.Buffers{[]byte{d.ReasonCode}, propLen} + if len(idvp) > 0 { + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (d *Disconnect) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: DISCONNECT}} + cp.Content = d + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (d *Disconnect) Reason() string { + switch d.ReasonCode { + case 0: + return "Normal disconnection - Close the connection normally. Do not send the Will Message." + case 4: + return "Disconnect with Will Message - The Client wishes to disconnect but requires that the Server also publishes its Will Message." + case 128: + return "Unspecified error - The Connection is closed but the sender either does not wish to reveal the reason, or none of the other Reason Codes apply." + case 129: + return "Malformed Packet - The received packet does not conform to this specification." + case 130: + return "Protocol Error - An unexpected or out of order packet was received." + case 131: + return "Implementation specific error - The packet received is valid but cannot be processed by this implementation." + case 135: + return "Not authorized - The request is not authorized." + case 137: + return "Server busy - The Server is busy and cannot continue processing requests from this Client." + case 139: + return "Server shutting down - The Server is shutting down." + case 141: + return "Keep Alive timeout - The Connection is closed because no packet has been received for 1.5 times the Keepalive time." + case 142: + return "Session taken over - Another Connection using the same ClientID has connected causing this Connection to be closed." + case 143: + return "Topic Filter invalid - The Topic Filter is correctly formed, but is not accepted by this Sever." + case 144: + return "Topic Name invalid - The Topic Name is correctly formed, but is not accepted by this Client or Server." + case 147: + return "Receive Maximum exceeded - The Client or Server has received more than Receive Maximum publication for which it has not sent PUBACK or PUBCOMP." + case 148: + return "Topic Alias invalid - The Client or Server has received a PUBLISH packet containing a Topic Alias which is greater than the Maximum Topic Alias it sent in the CONNECT or CONNACK packet." + case 149: + return "Packet too large - The packet size is greater than Maximum Packet Size for this Client or Server." + case 150: + return "Message rate too high - The received data rate is too high." + case 151: + return "Quota exceeded - An implementation or administrative imposed limit has been exceeded." + case 152: + return "Administrative action - The Connection is closed due to an administrative action." + case 153: + return "Payload format invalid - The payload format does not match the one specified by the Payload Format Indicator." + case 154: + return "Retain not supported - The Server has does not support retained messages." + case 155: + return "QoS not supported - The Client specified a QoS greater than the QoS specified in a Maximum QoS in the CONNACK." + case 156: + return "Use another server - The Client should temporarily change its Server." + case 157: + return "Server moved - The Server is moved and the Client should permanently change its server location." + case 158: + return "Shared Subscription not supported - The Server does not support Shared Subscriptions." + case 159: + return "Connection rate exceeded - This connection is closed because the connection rate is too high." + case 160: + return "Maximum connect time - The maximum connection time authorized for this connection has been exceeded." + case 161: + return "Subscription Identifiers not supported - The Server does not support Subscription Identifiers; the subscription is not accepted." + case 162: + return "Wildcard subscriptions not supported - The Server does not support Wildcard subscription; the subscription is not accepted." + } + + return "" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/packets.go b/vendor/github.com/eclipse/paho.golang/packets/packets.go new file mode 100644 index 000000000..496594012 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/packets.go @@ -0,0 +1,447 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "sync" +) + +// PacketType is a type alias to byte representing the different +// MQTT control packet types +// type PacketType byte + +// The following consts are the packet type number for each of the +// different control packets in MQTT +const ( + _ byte = iota + CONNECT + CONNACK + PUBLISH + PUBACK + PUBREC + PUBREL + PUBCOMP + SUBSCRIBE + SUBACK + UNSUBSCRIBE + UNSUBACK + PINGREQ + PINGRESP + DISCONNECT + AUTH +) + +type ( + // Packet is the interface defining the unique parts of a controlpacket + Packet interface { + Unpack(*bytes.Buffer) error + Buffers() net.Buffers + WriteTo(io.Writer) (int64, error) + } + + // FixedHeader is the definition of a control packet fixed header + FixedHeader struct { + remainingLength int + Type byte + Flags byte + } + + // ControlPacket is the definition of a control packet + ControlPacket struct { + Content Packet + FixedHeader + } +) + +// NewThreadSafeConn wraps net.Conn with a mutex. ControlPacket uses it in +// WriteTo method to ensure parallel writes are thread-Safe. +func NewThreadSafeConn(c net.Conn) net.Conn { + type threadSafeConn struct { + net.Conn + sync.Locker + } + + return &threadSafeConn{ + Conn: c, + Locker: &sync.Mutex{}, + } +} + +// WriteTo operates on a FixedHeader and takes the option values and produces +// the wire format byte that represents these. +func (f *FixedHeader) WriteTo(w io.Writer) (int64, error) { + if _, err := w.Write([]byte{byte(f.Type)<<4 | f.Flags}); err != nil { + return 0, err + } + if _, err := w.Write(encodeVBI(f.remainingLength)); err != nil { + return 0, err + } + + return 0, nil +} + +// PacketID is a helper function that returns the value of the PacketID +// field from any kind of mqtt packet in the Content element +func (c *ControlPacket) PacketID() uint16 { + switch r := c.Content.(type) { + case *Publish: + return r.PacketID + case *Puback: + return r.PacketID + case *Pubrec: + return r.PacketID + case *Pubrel: + return r.PacketID + case *Pubcomp: + return r.PacketID + case *Subscribe: + return r.PacketID + case *Suback: + return r.PacketID + case *Unsubscribe: + return r.PacketID + case *Unsuback: + return r.PacketID + default: + return 0 + } +} + +func (c *ControlPacket) PacketType() string { + return [...]string{ + "", + "CONNECT", + "CONNACK", + "PUBLISH", + "PUBACK", + "PUBREC", + "PUBREL", + "PUBCOMP", + "SUBSCRIBE", + "SUBACK", + "UNSUBSCRIBE", + "UNSUBACK", + "PINGREQ", + "PINGRESP", + "DISCONNECT", + "AUTH", + }[c.FixedHeader.Type] +} + +// NewControlPacket takes a packetType and returns a pointer to a +// ControlPacket where the VariableHeader field is a pointer to an +// instance of a VariableHeader definition for that packetType +func NewControlPacket(t byte) *ControlPacket { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: t}} + switch t { + case CONNECT: + cp.Content = &Connect{ + ProtocolName: "MQTT", + ProtocolVersion: 5, + Properties: &Properties{}, + } + case CONNACK: + cp.Content = &Connack{Properties: &Properties{}} + case PUBLISH: + cp.Content = &Publish{Properties: &Properties{}} + case PUBACK: + cp.Content = &Puback{Properties: &Properties{}} + case PUBREC: + cp.Content = &Pubrec{Properties: &Properties{}} + case PUBREL: + cp.Flags = 2 + cp.Content = &Pubrel{Properties: &Properties{}} + case PUBCOMP: + cp.Content = &Pubcomp{Properties: &Properties{}} + case SUBSCRIBE: + cp.Flags = 2 + cp.Content = &Subscribe{ + Subscriptions: make(map[string]SubOptions), + Properties: &Properties{}, + } + case SUBACK: + cp.Content = &Suback{Properties: &Properties{}} + case UNSUBSCRIBE: + cp.Flags = 2 + cp.Content = &Unsubscribe{Properties: &Properties{}} + case UNSUBACK: + cp.Content = &Unsuback{Properties: &Properties{}} + case PINGREQ: + cp.Content = &Pingreq{} + case PINGRESP: + cp.Content = &Pingresp{} + case DISCONNECT: + cp.Content = &Disconnect{Properties: &Properties{}} + case AUTH: + cp.Flags = 1 + cp.Content = &Auth{Properties: &Properties{}} + default: + return nil + } + return cp +} + +// ReadPacket reads a control packet from a io.Reader and returns a completed +// struct with the appropriate data +func ReadPacket(r io.Reader) (*ControlPacket, error) { + t := [1]byte{} + _, err := io.ReadFull(r, t[:]) + if err != nil { + return nil, err + } + // cp := NewControlPacket(PacketType(t[0] >> 4)) + // if cp == nil { + // return nil, fmt.Errorf("invalid packet type requested, %d", t[0]>>4) + // } + + pt := t[0] >> 4 + cp := &ControlPacket{FixedHeader: FixedHeader{Type: pt}} + switch pt { + case CONNECT: + cp.Content = &Connect{ + ProtocolName: "MQTT", + ProtocolVersion: 5, + Properties: &Properties{}, + } + case CONNACK: + cp.Content = &Connack{Properties: &Properties{}} + case PUBLISH: + cp.Content = &Publish{Properties: &Properties{}} + case PUBACK: + cp.Content = &Puback{Properties: &Properties{}} + case PUBREC: + cp.Content = &Pubrec{Properties: &Properties{}} + case PUBREL: + cp.Flags = 2 + cp.Content = &Pubrel{Properties: &Properties{}} + case PUBCOMP: + cp.Content = &Pubcomp{Properties: &Properties{}} + case SUBSCRIBE: + cp.Flags = 2 + cp.Content = &Subscribe{ + Subscriptions: make(map[string]SubOptions), + Properties: &Properties{}, + } + case SUBACK: + cp.Content = &Suback{Properties: &Properties{}} + case UNSUBSCRIBE: + cp.Flags = 2 + cp.Content = &Unsubscribe{Properties: &Properties{}} + case UNSUBACK: + cp.Content = &Unsuback{Properties: &Properties{}} + case PINGREQ: + cp.Content = &Pingreq{} + case PINGRESP: + cp.Content = &Pingresp{} + case DISCONNECT: + cp.Content = &Disconnect{Properties: &Properties{}} + case AUTH: + cp.Flags = 1 + cp.Content = &Auth{Properties: &Properties{}} + default: + return nil, fmt.Errorf("unknown packet type %d requested", pt) + } + + cp.Flags = t[0] & 0xF + if cp.Type == PUBLISH { + cp.Content.(*Publish).QoS = (cp.Flags & 0x6) >> 1 + } + vbi, err := getVBI(r) + if err != nil { + return nil, err + } + cp.remainingLength, err = decodeVBI(vbi) + if err != nil { + return nil, err + } + + var content bytes.Buffer + content.Grow(cp.remainingLength) + + n, err := io.CopyN(&content, r, int64(cp.remainingLength)) + if err != nil { + return nil, err + } + + if n != int64(cp.remainingLength) { + return nil, fmt.Errorf("failed to read packet, expected %d bytes, read %d", cp.remainingLength, n) + } + err = cp.Content.Unpack(&content) + if err != nil { + return nil, err + } + return cp, nil +} + +// WriteTo writes a packet to an io.Writer, handling packing all the parts of +// a control packet. +func (c *ControlPacket) WriteTo(w io.Writer) (int64, error) { + buffers := c.Content.Buffers() + for _, b := range buffers { + c.remainingLength += len(b) + } + + var header bytes.Buffer + if _, err := c.FixedHeader.WriteTo(&header); err != nil { + return 0, err + } + + buffers = append(net.Buffers{header.Bytes()}, buffers...) + + if safe, ok := w.(sync.Locker); ok { + safe.Lock() + defer safe.Unlock() + } + return buffers.WriteTo(w) +} + +func encodeVBI(length int) []byte { + var x int + b := [4]byte{} + for { + digit := byte(length % 128) + length /= 128 + if length > 0 { + digit |= 0x80 + } + b[x] = digit + x++ + if length == 0 { + return b[:x] + } + } +} + +func encodeVBIdirect(length int, buf *bytes.Buffer) { + var x int + b := [4]byte{} + for { + digit := byte(length % 128) + length /= 128 + if length > 0 { + digit |= 0x80 + } + b[x] = digit + x++ + if length == 0 { + buf.Write(b[:x]) + return + } + } +} + +func getVBI(r io.Reader) (*bytes.Buffer, error) { + var ret bytes.Buffer + digit := [1]byte{} + for { + _, err := io.ReadFull(r, digit[:]) + if err != nil { + return nil, err + } + ret.WriteByte(digit[0]) + if digit[0] <= 0x7f { + return &ret, nil + } + } +} + +func decodeVBI(r *bytes.Buffer) (int, error) { + var vbi uint32 + var multiplier uint32 + for { + digit, err := r.ReadByte() + if err != nil && err != io.EOF { + return 0, err + } + vbi |= uint32(digit&127) << multiplier + if (digit & 128) == 0 { + break + } + multiplier += 7 + } + return int(vbi), nil +} + +func writeUint16(u uint16, b *bytes.Buffer) error { + if err := b.WriteByte(byte(u >> 8)); err != nil { + return err + } + return b.WriteByte(byte(u)) +} + +func writeUint32(u uint32, b *bytes.Buffer) error { + if err := b.WriteByte(byte(u >> 24)); err != nil { + return err + } + if err := b.WriteByte(byte(u >> 16)); err != nil { + return err + } + if err := b.WriteByte(byte(u >> 8)); err != nil { + return err + } + return b.WriteByte(byte(u)) +} + +func writeString(s string, b *bytes.Buffer) { + writeUint16(uint16(len(s)), b) + b.WriteString(s) +} + +func writeBinary(d []byte, b *bytes.Buffer) { + writeUint16(uint16(len(d)), b) + b.Write(d) +} + +func readUint16(b *bytes.Buffer) (uint16, error) { + b1, err := b.ReadByte() + if err != nil { + return 0, err + } + b2, err := b.ReadByte() + if err != nil { + return 0, err + } + return (uint16(b1) << 8) | uint16(b2), nil +} + +func readUint32(b *bytes.Buffer) (uint32, error) { + b1, err := b.ReadByte() + if err != nil { + return 0, err + } + b2, err := b.ReadByte() + if err != nil { + return 0, err + } + b3, err := b.ReadByte() + if err != nil { + return 0, err + } + b4, err := b.ReadByte() + if err != nil { + return 0, err + } + return (uint32(b1) << 24) | (uint32(b2) << 16) | (uint32(b3) << 8) | uint32(b4), nil +} + +func readBinary(b *bytes.Buffer) ([]byte, error) { + size, err := readUint16(b) + if err != nil { + return nil, err + } + + var s bytes.Buffer + s.Grow(int(size)) + if _, err := io.CopyN(&s, b, int64(size)); err != nil { + return nil, err + } + + return s.Bytes(), nil +} + +func readString(b *bytes.Buffer) (string, error) { + s, err := readBinary(b) + return string(s), err +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/pingreq.go b/vendor/github.com/eclipse/paho.golang/packets/pingreq.go new file mode 100644 index 000000000..85f30c2b5 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/pingreq.go @@ -0,0 +1,34 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Pingreq is the Variable Header definition for a Pingreq control packet +type Pingreq struct { +} + +func (p *Pingreq) String() string { + return fmt.Sprintf("PINGREQ") +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Pingreq) Unpack(r *bytes.Buffer) error { + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Pingreq) Buffers() net.Buffers { + return nil +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Pingreq) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PINGREQ}} + cp.Content = p + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/pingresp.go b/vendor/github.com/eclipse/paho.golang/packets/pingresp.go new file mode 100644 index 000000000..c110fc4dc --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/pingresp.go @@ -0,0 +1,34 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Pingresp is the Variable Header definition for a Pingresp control packet +type Pingresp struct { +} + +func (p *Pingresp) String() string { + return fmt.Sprintf("PINGRESP") +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Pingresp) Unpack(r *bytes.Buffer) error { + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Pingresp) Buffers() net.Buffers { + return nil +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Pingresp) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PINGRESP}} + cp.Content = p + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/properties.go b/vendor/github.com/eclipse/paho.golang/packets/properties.go new file mode 100644 index 000000000..fe1f5e22e --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/properties.go @@ -0,0 +1,804 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "strings" +) + +// PropPayloadFormat, etc are the list of property codes for the +// MQTT packet properties +const ( + PropPayloadFormat byte = 1 + PropMessageExpiry byte = 2 + PropContentType byte = 3 + PropResponseTopic byte = 8 + PropCorrelationData byte = 9 + PropSubscriptionIdentifier byte = 11 + PropSessionExpiryInterval byte = 17 + PropAssignedClientID byte = 18 + PropServerKeepAlive byte = 19 + PropAuthMethod byte = 21 + PropAuthData byte = 22 + PropRequestProblemInfo byte = 23 + PropWillDelayInterval byte = 24 + PropRequestResponseInfo byte = 25 + PropResponseInfo byte = 26 + PropServerReference byte = 28 + PropReasonString byte = 31 + PropReceiveMaximum byte = 33 + PropTopicAliasMaximum byte = 34 + PropTopicAlias byte = 35 + PropMaximumQOS byte = 36 + PropRetainAvailable byte = 37 + PropUser byte = 38 + PropMaximumPacketSize byte = 39 + PropWildcardSubAvailable byte = 40 + PropSubIDAvailable byte = 41 + PropSharedSubAvailable byte = 42 +) + +// User is a struct for the User properties, originally it was a map +// then it was pointed out that user properties are allowed to appear +// more than once +type User struct { + Key, Value string +} + +// Properties is a struct representing the all the described properties +// allowed by the MQTT protocol, determining the validity of a property +// relvative to the packettype it was received in is provided by the +// ValidateID function +type Properties struct { + // PayloadFormat indicates the format of the payload of the message + // 0 is unspecified bytes + // 1 is UTF8 encoded character data + PayloadFormat *byte + // MessageExpiry is the lifetime of the message in seconds + MessageExpiry *uint32 + // ContentType is a UTF8 string describing the content of the message + // for example it could be a MIME type + ContentType string + // ResponseTopic is a UTF8 string indicating the topic name to which any + // response to this message should be sent + ResponseTopic string + // CorrelationData is binary data used to associate future response + // messages with the original request message + CorrelationData []byte + // SubscriptionIdentifier is an identifier of the subscription to which + // the Publish matched + SubscriptionIdentifier *int + // SessionExpiryInterval is the time in seconds after a client disconnects + // that the server should retain the session information (subscriptions etc) + SessionExpiryInterval *uint32 + // AssignedClientID is the server assigned client identifier in the case + // that a client connected without specifying a clientID the server + // generates one and returns it in the Connack + AssignedClientID string + // ServerKeepAlive allows the server to specify in the Connack packet + // the time in seconds to be used as the keep alive value + ServerKeepAlive *uint16 + // AuthMethod is a UTF8 string containing the name of the authentication + // method to be used for extended authentication + AuthMethod string + // AuthData is binary data containing authentication data + AuthData []byte + // RequestProblemInfo is used by the Client to indicate to the server to + // include the Reason String and/or User Properties in case of failures + RequestProblemInfo *byte + // WillDelayInterval is the number of seconds the server waits after the + // point at which it would otherwise send the will message before sending + // it. The client reconnecting before that time expires causes the server + // to cancel sending the will + WillDelayInterval *uint32 + // RequestResponseInfo is used by the Client to request the Server provide + // Response Information in the Connack + RequestResponseInfo *byte + // ResponseInfo is a UTF8 encoded string that can be used as the basis for + // createing a Response Topic. The way in which the Client creates a + // Response Topic from the Response Information is not defined. A common + // use of this is to pass a globally unique portion of the topic tree which + // is reserved for this Client for at least the lifetime of its Session. This + // often cannot just be a random name as both the requesting Client and the + // responding Client need to be authorized to use it. It is normal to use this + // as the root of a topic tree for a particular Client. For the Server to + // return this information, it normally needs to be correctly configured. + // Using this mechanism allows this configuration to be done once in the + // Server rather than in each Client + ResponseInfo string + // ServerReference is a UTF8 string indicating another server the client + // can use + ServerReference string + // ReasonString is a UTF8 string representing the reason associated with + // this response, intended to be human readable for diagnostic purposes + ReasonString string + // ReceiveMaximum is the maximum number of QOS1 & 2 messages allowed to be + // 'inflight' (not having received a PUBACK/PUBCOMP response for) + ReceiveMaximum *uint16 + // TopicAliasMaximum is the highest value permitted as a Topic Alias + TopicAliasMaximum *uint16 + // TopicAlias is used in place of the topic string to reduce the size of + // packets for repeated messages on a topic + TopicAlias *uint16 + // MaximumQOS is the highest QOS level permitted for a Publish + MaximumQOS *byte + // RetainAvailable indicates whether the server supports messages with the + // retain flag set + RetainAvailable *byte + // User is a slice of user provided properties (key and value) + User []User + // MaximumPacketSize allows the client or server to specify the maximum packet + // size in bytes that they support + MaximumPacketSize *uint32 + // WildcardSubAvailable indicates whether wildcard subscriptions are permitted + WildcardSubAvailable *byte + // SubIDAvailable indicates whether subscription identifiers are supported + SubIDAvailable *byte + // SharedSubAvailable indicates whether shared subscriptions are supported + SharedSubAvailable *byte +} + +func (p *Properties) String() string { + var b strings.Builder + if p.PayloadFormat != nil { + fmt.Fprintf(&b, "\tPayloadFormat:%d\n", *p.PayloadFormat) + } + if p.MessageExpiry != nil { + fmt.Fprintf(&b, "\tMessageExpiry:%d\n", *p.MessageExpiry) + } + if p.ContentType != "" { + fmt.Fprintf(&b, "\tContentType:%s\n", p.ContentType) + } + if p.ResponseTopic != "" { + fmt.Fprintf(&b, "\tResponseTopic:%s\n", p.ResponseTopic) + } + if len(p.CorrelationData) > 0 { + fmt.Fprintf(&b, "\tCorrelationData:%X\n", p.CorrelationData) + } + if p.SubscriptionIdentifier != nil { + fmt.Fprintf(&b, "\tSubscriptionIdentifier:%d\n", *p.SubscriptionIdentifier) + } + if p.SessionExpiryInterval != nil { + fmt.Fprintf(&b, "\tSessionExpiryInterval:%d\n", *p.SessionExpiryInterval) + } + if p.AssignedClientID != "" { + fmt.Fprintf(&b, "\tAssignedClientID:%s\n", p.AssignedClientID) + } + if p.ServerKeepAlive != nil { + fmt.Fprintf(&b, "\tServerKeepAlive:%d\n", *p.ServerKeepAlive) + } + if p.AuthMethod != "" { + fmt.Fprintf(&b, "\tAuthMethod:%s\n", p.AuthMethod) + } + if len(p.AuthData) > 0 { + fmt.Fprintf(&b, "\tAuthData:%X\n", p.AuthData) + } + if p.RequestProblemInfo != nil { + fmt.Fprintf(&b, "\tRequestProblemInfo:%d\n", *p.RequestProblemInfo) + } + if p.WillDelayInterval != nil { + fmt.Fprintf(&b, "\tWillDelayInterval:%d\n", *p.WillDelayInterval) + } + if p.RequestResponseInfo != nil { + fmt.Fprintf(&b, "\tRequestResponseInfo:%d\n", *p.RequestResponseInfo) + } + if p.ServerReference != "" { + fmt.Fprintf(&b, "\tServerReference:%s\n", p.ServerReference) + } + if p.ReasonString != "" { + fmt.Fprintf(&b, "\tReasonString:%s\n", p.ReasonString) + } + if p.ReceiveMaximum != nil { + fmt.Fprintf(&b, "\tReceiveMaximum:%d\n", *p.ReceiveMaximum) + } + if p.TopicAliasMaximum != nil { + fmt.Fprintf(&b, "\tTopicAliasMaximum:%d\n", *p.TopicAliasMaximum) + } + if p.TopicAlias != nil { + fmt.Fprintf(&b, "\tTopicAlias:%d\n", *p.TopicAlias) + } + if p.MaximumQOS != nil { + fmt.Fprintf(&b, "\tMaximumQOS:%d\n", *p.MaximumQOS) + } + if p.RetainAvailable != nil { + fmt.Fprintf(&b, "\tRetainAvailable:%d\n", *p.RetainAvailable) + } + if p.MaximumPacketSize != nil { + fmt.Fprintf(&b, "\tMaximumPacketSize:%d\n", *p.MaximumPacketSize) + } + if p.WildcardSubAvailable != nil { + fmt.Fprintf(&b, "\tWildcardSubAvailable:%d\n", *p.WildcardSubAvailable) + } + if p.SubIDAvailable != nil { + fmt.Fprintf(&b, "\tSubIDAvailable:%d\n", *p.SubIDAvailable) + } + if p.SharedSubAvailable != nil { + fmt.Fprintf(&b, "\tSharedSubAvailable:%d\n", *p.SharedSubAvailable) + } + if len(p.User) > 0 { + fmt.Fprint(&b, "\tUser Properties:\n") + for _, v := range p.User { + fmt.Fprintf(&b, "\t\t%s:%s\n", v.Key, v.Value) + } + } + + return b.String() +} + +// Pack takes all the defined properties for an Properties and produces +// a slice of bytes representing the wire format for the information +func (i *Properties) Pack(p byte) []byte { + var b bytes.Buffer + + if i == nil { + return nil + } + + if p == PUBLISH { + if i.PayloadFormat != nil { + b.WriteByte(PropPayloadFormat) + b.WriteByte(*i.PayloadFormat) + } + + if i.MessageExpiry != nil { + b.WriteByte(PropMessageExpiry) + writeUint32(*i.MessageExpiry, &b) + } + + if i.ContentType != "" { + b.WriteByte(PropContentType) + writeString(i.ContentType, &b) + } + + if i.ResponseTopic != "" { + b.WriteByte(PropResponseTopic) + writeString(i.ResponseTopic, &b) + } + + if len(i.CorrelationData) > 0 { + b.WriteByte(PropCorrelationData) + writeBinary(i.CorrelationData, &b) + } + + if i.TopicAlias != nil { + b.WriteByte(PropTopicAlias) + writeUint16(*i.TopicAlias, &b) + } + } + + if p == PUBLISH || p == SUBSCRIBE { + if i.SubscriptionIdentifier != nil { + b.WriteByte(PropSubscriptionIdentifier) + encodeVBIdirect(*i.SubscriptionIdentifier, &b) + } + } + + if p == CONNECT || p == CONNACK { + if i.ReceiveMaximum != nil { + b.WriteByte(PropReceiveMaximum) + writeUint16(*i.ReceiveMaximum, &b) + } + + if i.TopicAliasMaximum != nil { + b.WriteByte(PropTopicAliasMaximum) + writeUint16(*i.TopicAliasMaximum, &b) + } + + if i.MaximumQOS != nil { + b.WriteByte(PropMaximumQOS) + b.WriteByte(*i.MaximumQOS) + } + + if i.MaximumPacketSize != nil { + b.WriteByte(PropMaximumPacketSize) + writeUint32(*i.MaximumPacketSize, &b) + } + } + + if p == CONNACK { + if i.AssignedClientID != "" { + b.WriteByte(PropAssignedClientID) + writeString(i.AssignedClientID, &b) + } + + if i.ServerKeepAlive != nil { + b.WriteByte(PropServerKeepAlive) + writeUint16(*i.ServerKeepAlive, &b) + } + + if i.WildcardSubAvailable != nil { + b.WriteByte(PropWildcardSubAvailable) + b.WriteByte(*i.WildcardSubAvailable) + } + + if i.SubIDAvailable != nil { + b.WriteByte(PropSubIDAvailable) + b.WriteByte(*i.SubIDAvailable) + } + + if i.SharedSubAvailable != nil { + b.WriteByte(PropSharedSubAvailable) + b.WriteByte(*i.SharedSubAvailable) + } + + if i.RetainAvailable != nil { + b.WriteByte(PropRetainAvailable) + b.WriteByte(*i.RetainAvailable) + } + + if i.ResponseInfo != "" { + b.WriteByte(PropResponseInfo) + writeString(i.ResponseInfo, &b) + } + } + + if p == CONNECT { + if i.RequestProblemInfo != nil { + b.WriteByte(PropRequestProblemInfo) + b.WriteByte(*i.RequestProblemInfo) + } + + if i.WillDelayInterval != nil { + b.WriteByte(PropWillDelayInterval) + writeUint32(*i.WillDelayInterval, &b) + } + + if i.RequestResponseInfo != nil { + b.WriteByte(PropRequestResponseInfo) + b.WriteByte(*i.RequestResponseInfo) + } + } + + if p == CONNECT || p == CONNACK || p == DISCONNECT { + if i.SessionExpiryInterval != nil { + b.WriteByte(PropSessionExpiryInterval) + writeUint32(*i.SessionExpiryInterval, &b) + } + } + + if p == CONNECT || p == CONNACK || p == AUTH { + if i.AuthMethod != "" { + b.WriteByte(PropAuthMethod) + writeString(i.AuthMethod, &b) + } + + if i.AuthData != nil && len(i.AuthData) > 0 { + b.WriteByte(PropAuthData) + writeBinary(i.AuthData, &b) + } + } + + if p == CONNACK || p == DISCONNECT { + if i.ServerReference != "" { + b.WriteByte(PropServerReference) + writeString(i.ServerReference, &b) + } + } + + if p != CONNECT { + if i.ReasonString != "" { + b.WriteByte(PropReasonString) + writeString(i.ReasonString, &b) + } + } + + for _, v := range i.User { + b.WriteByte(PropUser) + writeString(v.Key, &b) + writeString(v.Value, &b) + } + + return b.Bytes() +} + +// PackBuf will create a bytes.Buffer of the packed properties, it +// will only pack the properties appropriate to the packet type p +// even though other properties may exist, it will silently ignore +// them +func (i *Properties) PackBuf(p byte) *bytes.Buffer { + var b bytes.Buffer + + if i == nil { + return nil + } + + if p == PUBLISH { + if i.PayloadFormat != nil { + b.WriteByte(PropPayloadFormat) + b.WriteByte(*i.PayloadFormat) + } + + if i.MessageExpiry != nil { + b.WriteByte(PropMessageExpiry) + writeUint32(*i.MessageExpiry, &b) + } + + if i.ContentType != "" { + b.WriteByte(PropContentType) + writeString(i.ContentType, &b) + } + + if i.ResponseTopic != "" { + b.WriteByte(PropResponseTopic) + writeString(i.ResponseTopic, &b) + } + + if i.CorrelationData != nil && len(i.CorrelationData) > 0 { + b.WriteByte(PropCorrelationData) + writeBinary(i.CorrelationData, &b) + } + + if i.TopicAlias != nil { + b.WriteByte(PropTopicAlias) + writeUint16(*i.TopicAlias, &b) + } + } + + if p == PUBLISH || p == SUBSCRIBE { + if i.SubscriptionIdentifier != nil { + b.WriteByte(PropSubscriptionIdentifier) + encodeVBIdirect(*i.SubscriptionIdentifier, &b) + } + } + + if p == CONNECT || p == CONNACK { + if i.ReceiveMaximum != nil { + b.WriteByte(PropReceiveMaximum) + writeUint16(*i.ReceiveMaximum, &b) + } + + if i.TopicAliasMaximum != nil { + b.WriteByte(PropTopicAliasMaximum) + writeUint16(*i.TopicAliasMaximum, &b) + } + + if i.MaximumQOS != nil { + b.WriteByte(PropMaximumQOS) + b.WriteByte(*i.MaximumQOS) + } + + if i.MaximumPacketSize != nil { + b.WriteByte(PropMaximumPacketSize) + writeUint32(*i.MaximumPacketSize, &b) + } + } + + if p == CONNACK { + if i.AssignedClientID != "" { + b.WriteByte(PropAssignedClientID) + writeString(i.AssignedClientID, &b) + } + + if i.ServerKeepAlive != nil { + b.WriteByte(PropServerKeepAlive) + writeUint16(*i.ServerKeepAlive, &b) + } + + if i.WildcardSubAvailable != nil { + b.WriteByte(PropWildcardSubAvailable) + b.WriteByte(*i.WildcardSubAvailable) + } + + if i.SubIDAvailable != nil { + b.WriteByte(PropSubIDAvailable) + b.WriteByte(*i.SubIDAvailable) + } + + if i.SharedSubAvailable != nil { + b.WriteByte(PropSharedSubAvailable) + b.WriteByte(*i.SharedSubAvailable) + } + + if i.RetainAvailable != nil { + b.WriteByte(PropRetainAvailable) + b.WriteByte(*i.RetainAvailable) + } + + if i.ResponseInfo != "" { + b.WriteByte(PropResponseInfo) + writeString(i.ResponseInfo, &b) + } + } + + if p == CONNECT { + if i.RequestProblemInfo != nil { + b.WriteByte(PropRequestProblemInfo) + b.WriteByte(*i.RequestProblemInfo) + } + + if i.WillDelayInterval != nil { + b.WriteByte(PropWillDelayInterval) + writeUint32(*i.WillDelayInterval, &b) + } + + if i.RequestResponseInfo != nil { + b.WriteByte(PropRequestResponseInfo) + b.WriteByte(*i.RequestResponseInfo) + } + } + + if p == CONNECT || p == CONNACK || p == DISCONNECT { + if i.SessionExpiryInterval != nil { + b.WriteByte(PropSessionExpiryInterval) + writeUint32(*i.SessionExpiryInterval, &b) + } + } + + if p == CONNECT || p == CONNACK || p == AUTH { + if i.AuthMethod != "" { + b.WriteByte(PropAuthMethod) + writeString(i.AuthMethod, &b) + } + + if i.AuthData != nil && len(i.AuthData) > 0 { + b.WriteByte(PropAuthData) + writeBinary(i.AuthData, &b) + } + } + + if p == CONNACK || p == DISCONNECT { + if i.ServerReference != "" { + b.WriteByte(PropServerReference) + writeString(i.ServerReference, &b) + } + } + + if p != CONNECT { + if i.ReasonString != "" { + b.WriteByte(PropReasonString) + writeString(i.ReasonString, &b) + } + } + + for _, v := range i.User { + b.WriteByte(PropUser) + writeString(v.Key, &b) + writeString(v.Value, &b) + } + + return &b +} + +// Unpack takes a buffer of bytes and reads out the defined properties +// filling in the appropriate entries in the struct, it returns the number +// of bytes used to store the Prop data and any error in decoding them +func (i *Properties) Unpack(r *bytes.Buffer, p byte) error { + vbi, err := getVBI(r) + if err != nil { + return err + } + size, err := decodeVBI(vbi) + if err != nil { + return err + } + if size == 0 { + return nil + } + + buf := bytes.NewBuffer(r.Next(size)) + for { + PropType, err := buf.ReadByte() + if err != nil && err != io.EOF { + return err + } + if err == io.EOF { + break + } + if !ValidateID(p, PropType) { + return fmt.Errorf("invalid Prop type %d for packet %d", PropType, p) + } + switch PropType { + case PropPayloadFormat: + pf, err := buf.ReadByte() + if err != nil { + return err + } + i.PayloadFormat = &pf + case PropMessageExpiry: + pe, err := readUint32(buf) + if err != nil { + return err + } + i.MessageExpiry = &pe + case PropContentType: + ct, err := readString(buf) + if err != nil { + return err + } + i.ContentType = ct + case PropResponseTopic: + tr, err := readString(buf) + if err != nil { + return err + } + i.ResponseTopic = tr + case PropCorrelationData: + cd, err := readBinary(buf) + if err != nil { + return err + } + i.CorrelationData = cd + case PropSubscriptionIdentifier: + si, err := decodeVBI(buf) + if err != nil { + return err + } + i.SubscriptionIdentifier = &si + case PropSessionExpiryInterval: + se, err := readUint32(buf) + if err != nil { + return err + } + i.SessionExpiryInterval = &se + case PropAssignedClientID: + ac, err := readString(buf) + if err != nil { + return err + } + i.AssignedClientID = ac + case PropServerKeepAlive: + sk, err := readUint16(buf) + if err != nil { + return err + } + i.ServerKeepAlive = &sk + case PropAuthMethod: + am, err := readString(buf) + if err != nil { + return err + } + i.AuthMethod = am + case PropAuthData: + ad, err := readBinary(buf) + if err != nil { + return err + } + i.AuthData = ad + case PropRequestProblemInfo: + rp, err := buf.ReadByte() + if err != nil { + return err + } + i.RequestProblemInfo = &rp + case PropWillDelayInterval: + wd, err := readUint32(buf) + if err != nil { + return err + } + i.WillDelayInterval = &wd + case PropRequestResponseInfo: + rp, err := buf.ReadByte() + if err != nil { + return err + } + i.RequestResponseInfo = &rp + case PropResponseInfo: + ri, err := readString(buf) + if err != nil { + return err + } + i.ResponseInfo = ri + case PropServerReference: + sr, err := readString(buf) + if err != nil { + return err + } + i.ServerReference = sr + case PropReasonString: + rs, err := readString(buf) + if err != nil { + return err + } + i.ReasonString = rs + case PropReceiveMaximum: + rm, err := readUint16(buf) + if err != nil { + return err + } + i.ReceiveMaximum = &rm + case PropTopicAliasMaximum: + ta, err := readUint16(buf) + if err != nil { + return err + } + i.TopicAliasMaximum = &ta + case PropTopicAlias: + ta, err := readUint16(buf) + if err != nil { + return err + } + i.TopicAlias = &ta + case PropMaximumQOS: + mq, err := buf.ReadByte() + if err != nil { + return err + } + i.MaximumQOS = &mq + case PropRetainAvailable: + ra, err := buf.ReadByte() + if err != nil { + return err + } + i.RetainAvailable = &ra + case PropUser: + k, err := readString(buf) + if err != nil { + return err + } + v, err := readString(buf) + if err != nil { + return err + } + i.User = append(i.User, User{k, v}) + case PropMaximumPacketSize: + mp, err := readUint32(buf) + if err != nil { + return err + } + i.MaximumPacketSize = &mp + case PropWildcardSubAvailable: + ws, err := buf.ReadByte() + if err != nil { + return err + } + i.WildcardSubAvailable = &ws + case PropSubIDAvailable: + si, err := buf.ReadByte() + if err != nil { + return err + } + i.SubIDAvailable = &si + case PropSharedSubAvailable: + ss, err := buf.ReadByte() + if err != nil { + return err + } + i.SharedSubAvailable = &ss + default: + return fmt.Errorf("unknown Prop type %d", PropType) + } + } + + return nil +} + +// ValidProperties is a map of the various properties and the +// PacketTypes that property is valid for. +var ValidProperties = map[byte]map[byte]struct{}{ + PropPayloadFormat: {PUBLISH: {}}, + PropMessageExpiry: {PUBLISH: {}}, + PropContentType: {PUBLISH: {}}, + PropResponseTopic: {PUBLISH: {}}, + PropCorrelationData: {PUBLISH: {}}, + PropTopicAlias: {PUBLISH: {}}, + PropSubscriptionIdentifier: {PUBLISH: {}, SUBSCRIBE: {}}, + PropSessionExpiryInterval: {CONNECT: {}, CONNACK: {}, DISCONNECT: {}}, + PropAssignedClientID: {CONNACK: {}}, + PropServerKeepAlive: {CONNACK: {}}, + PropWildcardSubAvailable: {CONNACK: {}}, + PropSubIDAvailable: {CONNACK: {}}, + PropSharedSubAvailable: {CONNACK: {}}, + PropRetainAvailable: {CONNACK: {}}, + PropResponseInfo: {CONNACK: {}}, + PropAuthMethod: {CONNECT: {}, CONNACK: {}, AUTH: {}}, + PropAuthData: {CONNECT: {}, CONNACK: {}, AUTH: {}}, + PropRequestProblemInfo: {CONNECT: {}}, + PropWillDelayInterval: {CONNECT: {}}, + PropRequestResponseInfo: {CONNECT: {}}, + PropServerReference: {CONNACK: {}, DISCONNECT: {}}, + PropReasonString: {CONNACK: {}, PUBACK: {}, PUBREC: {}, PUBREL: {}, PUBCOMP: {}, SUBACK: {}, UNSUBACK: {}, DISCONNECT: {}, AUTH: {}}, + PropReceiveMaximum: {CONNECT: {}, CONNACK: {}}, + PropTopicAliasMaximum: {CONNECT: {}, CONNACK: {}}, + PropMaximumQOS: {CONNECT: {}, CONNACK: {}}, + PropMaximumPacketSize: {CONNECT: {}, CONNACK: {}}, + PropUser: {CONNECT: {}, CONNACK: {}, PUBLISH: {}, PUBACK: {}, PUBREC: {}, PUBREL: {}, PUBCOMP: {}, SUBSCRIBE: {}, UNSUBSCRIBE: {}, SUBACK: {}, UNSUBACK: {}, DISCONNECT: {}, AUTH: {}}, +} + +// ValidateID takes a PacketType and a property name and returns +// a boolean indicating if that property is valid for that +// PacketType +func ValidateID(p byte, i byte) bool { + _, ok := ValidProperties[i][p] + return ok +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/puback.go b/vendor/github.com/eclipse/paho.golang/packets/puback.go new file mode 100644 index 000000000..67f404ce6 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/puback.go @@ -0,0 +1,115 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Puback is the Variable Header definition for a Puback control packet +type Puback struct { + Properties *Properties + PacketID uint16 + ReasonCode byte +} + +// PubackSuccess, etc are the list of valid puback reason codes. +const ( + PubackSuccess = 0x00 + PubackNoMatchingSubscribers = 0x10 + PubackUnspecifiedError = 0x80 + PubackImplementationSpecificError = 0x83 + PubackNotAuthorized = 0x87 + PubackTopicNameInvalid = 0x90 + PubackPacketIdentifierInUse = 0x91 + PubackQuotaExceeded = 0x97 + PubackPayloadFormatInvalid = 0x99 +) + +func (p *Puback) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "PUBACK: PacketID:%d ReasonCode:%X", p.PacketID, p.ReasonCode) + if p.Properties != nil { + fmt.Fprintf(&b, " Properties:\n%s", p.Properties) + } else { + fmt.Fprint(&b, "\n") + } + + return b.String() +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Puback) Unpack(r *bytes.Buffer) error { + var err error + success := r.Len() == 2 + noProps := r.Len() == 3 + p.PacketID, err = readUint16(r) + if err != nil { + return err + } + if !success { + p.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + if !noProps { + err = p.Properties.Unpack(r, PUBACK) + if err != nil { + return err + } + } + } + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Puback) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(p.PacketID, &b) + b.WriteByte(p.ReasonCode) + idvp := p.Properties.Pack(PUBACK) + propLen := encodeVBI(len(idvp)) + n := net.Buffers{b.Bytes(), propLen} + if len(idvp) > 0 { + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Puback) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PUBACK}} + cp.Content = p + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (p *Puback) Reason() string { + switch p.ReasonCode { + case 0: + return "The message is accepted. Publication of the QoS 1 message proceeds." + case 16: + return "The message is accepted but there are no subscribers. This is sent only by the Server. If the Server knows that there are no matching subscribers, it MAY use this Reason Code instead of 0x00 (Success)." + case 128: + return "The receiver does not accept the publish but either does not want to reveal the reason, or it does not match one of the other values." + case 131: + return "The PUBLISH is valid but the receiver is not willing to accept it." + case 135: + return "The PUBLISH is not authorized." + case 144: + return "The Topic Name is not malformed, but is not accepted by this Client or Server." + case 145: + return "The Packet Identifier is already in use. This might indicate a mismatch in the Session State between the Client and Server." + case 151: + return "An implementation or administrative imposed limit has been exceeded." + case 153: + return "The payload format does not match the specified Payload Format Indicator." + } + + return "" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/pubcomp.go b/vendor/github.com/eclipse/paho.golang/packets/pubcomp.go new file mode 100644 index 000000000..1cdfe61e9 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/pubcomp.go @@ -0,0 +1,95 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Pubcomp is the Variable Header definition for a Pubcomp control packet +type Pubcomp struct { + Properties *Properties + PacketID uint16 + ReasonCode byte +} + +// PubcompSuccess, etc are the list of valid pubcomp reason codes. +const ( + PubcompSuccess = 0x00 + PubcompPacketIdentifierNotFound = 0x92 +) + +func (p *Pubcomp) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "PUBCOMP: ReasonCode:%X PacketID:%d", p.ReasonCode, p.PacketID) + if p.Properties != nil { + fmt.Fprintf(&b, " Properties:\n%s", p.Properties) + } else { + fmt.Fprint(&b, "\n") + } + + return b.String() +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Pubcomp) Unpack(r *bytes.Buffer) error { + var err error + success := r.Len() == 2 + noProps := r.Len() == 3 + p.PacketID, err = readUint16(r) + if err != nil { + return err + } + if !success { + p.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + if !noProps { + err = p.Properties.Unpack(r, PUBACK) + if err != nil { + return err + } + } + } + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Pubcomp) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(p.PacketID, &b) + b.WriteByte(p.ReasonCode) + n := net.Buffers{b.Bytes()} + idvp := p.Properties.Pack(PUBCOMP) + propLen := encodeVBI(len(idvp)) + if len(idvp) > 0 { + n = append(n, propLen) + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Pubcomp) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PUBCOMP}} + cp.Content = p + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (p *Pubcomp) Reason() string { + switch p.ReasonCode { + case 0: + return "Success - Packet Identifier released. Publication of QoS 2 message is complete." + case 146: + return "Packet Identifier not found - The Packet Identifier is not known. This is not an error during recovery, but at other times indicates a mismatch between the Session State on the Client and Server." + } + + return "" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/publish.go b/vendor/github.com/eclipse/paho.golang/packets/publish.go new file mode 100644 index 000000000..ef834b7b9 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/publish.go @@ -0,0 +1,80 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net" +) + +// Publish is the Variable Header definition for a publish control packet +type Publish struct { + Payload []byte + Topic string + Properties *Properties + PacketID uint16 + QoS byte + Duplicate bool + Retain bool +} + +func (p *Publish) String() string { + return fmt.Sprintf("PUBLISH: PacketID:%d QOS:%d Topic:%s Duplicate:%t Retain:%t Payload:\n%s\nProperties\n%s", p.PacketID, p.QoS, p.Topic, p.Duplicate, p.Retain, string(p.Payload), p.Properties) +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Publish) Unpack(r *bytes.Buffer) error { + var err error + p.Topic, err = readString(r) + if err != nil { + return err + } + if p.QoS > 0 { + p.PacketID, err = readUint16(r) + if err != nil { + return err + } + } + + err = p.Properties.Unpack(r, PUBLISH) + if err != nil { + return err + } + + p.Payload, err = ioutil.ReadAll(r) + if err != nil { + return err + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Publish) Buffers() net.Buffers { + var b bytes.Buffer + writeString(p.Topic, &b) + if p.QoS > 0 { + _ = writeUint16(p.PacketID, &b) + } + idvp := p.Properties.Pack(PUBLISH) + encodeVBIdirect(len(idvp), &b) + return net.Buffers{b.Bytes(), idvp, p.Payload} + +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Publish) WriteTo(w io.Writer) (int64, error) { + f := p.QoS << 1 + if p.Duplicate { + f |= 1 << 3 + } + if p.Retain { + f |= 1 + } + + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PUBLISH, Flags: f}} + cp.Content = p + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/pubrec.go b/vendor/github.com/eclipse/paho.golang/packets/pubrec.go new file mode 100644 index 000000000..c3820191a --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/pubrec.go @@ -0,0 +1,117 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Pubrec is the Variable Header definition for a Pubrec control packet +type Pubrec struct { + Properties *Properties + PacketID uint16 + ReasonCode byte +} + +// PubrecSuccess, etc are the list of valid Pubrec reason codes +const ( + PubrecSuccess = 0x00 + PubrecNoMatchingSubscribers = 0x10 + PubrecUnspecifiedError = 0x80 + PubrecImplementationSpecificError = 0x83 + PubrecNotAuthorized = 0x87 + PubrecTopicNameInvalid = 0x90 + PubrecPacketIdentifierInUse = 0x91 + PubrecQuotaExceeded = 0x97 + PubrecPayloadFormatInvalid = 0x99 +) + +func (p *Pubrec) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "PUBREC: ReasonCode:%X PacketID:%d", p.ReasonCode, p.PacketID) + if p.Properties != nil { + fmt.Fprintf(&b, " Properties:\n%s", p.Properties) + } else { + fmt.Fprint(&b, "\n") + } + + return b.String() +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Pubrec) Unpack(r *bytes.Buffer) error { + var err error + success := r.Len() == 2 + noProps := r.Len() == 3 + p.PacketID, err = readUint16(r) + if err != nil { + return err + } + if !success { + p.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + if !noProps { + err = p.Properties.Unpack(r, PUBACK) + if err != nil { + return err + } + } + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Pubrec) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(p.PacketID, &b) + b.WriteByte(p.ReasonCode) + n := net.Buffers{b.Bytes()} + idvp := p.Properties.Pack(PUBREC) + propLen := encodeVBI(len(idvp)) + if len(idvp) > 0 { + n = append(n, propLen) + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Pubrec) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PUBREC}} + cp.Content = p + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (p *Pubrec) Reason() string { + switch p.ReasonCode { + case 0: + return "Success - The message is accepted. Publication of the QoS 2 message proceeds." + case 16: + return "No matching subscribers. - The message is accepted but there are no subscribers. This is sent only by the Server. If the Server knows that case there are no matching subscribers, it MAY use this Reason Code instead of 0x00 (Success)" + case 128: + return "Unspecified error - The receiver does not accept the publish but either does not want to reveal the reason, or it does not match one of the other values." + case 131: + return "Implementation specific error - The PUBLISH is valid but the receiver is not willing to accept it." + case 135: + return "Not authorized - The PUBLISH is not authorized." + case 144: + return "Topic Name invalid - The Topic Name is not malformed, but is not accepted by this Client or Server." + case 145: + return "Packet Identifier in use - The Packet Identifier is already in use. This might indicate a mismatch in the Session State between the Client and Server." + case 151: + return "Quota exceeded - An implementation or administrative imposed limit has been exceeded." + case 153: + return "Payload format invalid - The payload format does not match the one specified in the Payload Format Indicator." + } + + return "" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/pubrel.go b/vendor/github.com/eclipse/paho.golang/packets/pubrel.go new file mode 100644 index 000000000..27c48c240 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/pubrel.go @@ -0,0 +1,77 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Pubrel is the Variable Header definition for a Pubrel control packet +type Pubrel struct { + Properties *Properties + PacketID uint16 + ReasonCode byte +} + +func (p *Pubrel) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "PUBREL: ReasonCode:%X PacketID:%d", p.ReasonCode, p.PacketID) + if p.Properties != nil { + fmt.Fprintf(&b, " Properties:\n%s", p.Properties) + } else { + fmt.Fprint(&b, "\n") + } + + return b.String() +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Pubrel) Unpack(r *bytes.Buffer) error { + var err error + success := r.Len() == 2 + noProps := r.Len() == 3 + p.PacketID, err = readUint16(r) + if err != nil { + return err + } + if !success { + p.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + if !noProps { + err = p.Properties.Unpack(r, PUBACK) + if err != nil { + return err + } + } + } + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Pubrel) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(p.PacketID, &b) + b.WriteByte(p.ReasonCode) + n := net.Buffers{b.Bytes()} + idvp := p.Properties.Pack(PUBREL) + propLen := encodeVBI(len(idvp)) + if len(idvp) > 0 { + n = append(n, propLen) + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Pubrel) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PUBREL, Flags: 2}} + cp.Content = p + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/suback.go b/vendor/github.com/eclipse/paho.golang/packets/suback.go new file mode 100644 index 000000000..2503aaf1a --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/suback.go @@ -0,0 +1,103 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Suback is the Variable Header definition for a Suback control packet +type Suback struct { + Properties *Properties + Reasons []byte + PacketID uint16 +} + +func (s *Suback) String() string { + return fmt.Sprintf("SUBACK: ReasonCode:%v PacketID:%d Properties:\n%s", s.Reasons, s.PacketID, s.Properties) +} + +// SubackGrantedQoS0, etc are the list of valid suback reason codes. +const ( + SubackGrantedQoS0 = 0x00 + SubackGrantedQoS1 = 0x01 + SubackGrantedQoS2 = 0x02 + SubackUnspecifiederror = 0x80 + SubackImplementationspecificerror = 0x83 + SubackNotauthorized = 0x87 + SubackTopicFilterinvalid = 0x8F + SubackPacketIdentifierinuse = 0x91 + SubackQuotaexceeded = 0x97 + SubackSharedSubscriptionnotsupported = 0x9E + SubackSubscriptionIdentifiersnotsupported = 0xA1 + SubackWildcardsubscriptionsnotsupported = 0xA2 +) + +//Unpack is the implementation of the interface required function for a packet +func (s *Suback) Unpack(r *bytes.Buffer) error { + var err error + s.PacketID, err = readUint16(r) + if err != nil { + return err + } + + err = s.Properties.Unpack(r, SUBACK) + if err != nil { + return err + } + + s.Reasons = r.Bytes() + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (s *Suback) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(s.PacketID, &b) + idvp := s.Properties.Pack(SUBACK) + propLen := encodeVBI(len(idvp)) + return net.Buffers{b.Bytes(), propLen, idvp, s.Reasons} +} + +// WriteTo is the implementation of the interface required function for a packet +func (s *Suback) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: SUBACK}} + cp.Content = s + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (s *Suback) Reason(index int) string { + if index >= 0 && index < len(s.Reasons) { + switch s.Reasons[index] { + case 0: + return "Granted QoS 0 - The subscription is accepted and the maximum QoS sent will be QoS 0. This might be a lower QoS than was requested." + case 1: + return "Granted QoS 1 - The subscription is accepted and the maximum QoS sent will be QoS 1. This might be a lower QoS than was requested." + case 2: + return "Granted QoS 2 - The subscription is accepted and any received QoS will be sent to this subscription." + case 128: + return "Unspecified error - The subscription is not accepted and the Server either does not wish to reveal the reason or none of the other Reason Codes apply." + case 131: + return "Implementation specific error - The SUBSCRIBE is valid but the Server does not accept it." + case 135: + return "Not authorized - The Client is not authorized to make this subscription." + case 143: + return "Topic Filter invalid - The Topic Filter is correctly formed but is not allowed for this Client." + case 145: + return "Packet Identifier in use - The specified Packet Identifier is already in use." + case 151: + return "Quota exceeded - An implementation or administrative imposed limit has been exceeded." + case 158: + return "Shared Subscription not supported - The Server does not support Shared Subscriptions for this Client." + case 161: + return "Subscription Identifiers not supported - The Server does not support Subscription Identifiers; the subscription is not accepted." + case 162: + return "Wildcard subscriptions not supported - The Server does not support Wildcard subscription; the subscription is not accepted." + } + } + return "Invalid Reason index" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/subscribe.go b/vendor/github.com/eclipse/paho.golang/packets/subscribe.go new file mode 100644 index 000000000..3f457a28a --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/subscribe.go @@ -0,0 +1,116 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Subscribe is the Variable Header definition for a Subscribe control packet +type Subscribe struct { + Properties *Properties + Subscriptions map[string]SubOptions + PacketID uint16 +} + +func (s *Subscribe) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "SUBSCRIBE: PacketID:%d Subscriptions:\n", s.PacketID) + for sub, o := range s.Subscriptions { + fmt.Fprintf(&b, "\t%s: QOS:%d RetainHandling:%X NoLocal:%t RetainAsPublished:%t\n", sub, o.QoS, o.RetainHandling, o.NoLocal, o.RetainAsPublished) + } + fmt.Fprintf(&b, "Properties:\n%s", s.Properties) + + return b.String() +} + +// SubOptions is the struct representing the options for a subscription +type SubOptions struct { + QoS byte + RetainHandling byte + NoLocal bool + RetainAsPublished bool +} + +// Pack is the implementation of the interface required function for a packet +func (s *SubOptions) Pack() byte { + var ret byte + ret |= s.QoS & 0x03 + if s.NoLocal { + ret |= 1 << 2 + } + if s.RetainAsPublished { + ret |= 1 << 3 + } + ret |= s.RetainHandling & 0x30 + + return ret +} + +// Unpack is the implementation of the interface required function for a packet +func (s *SubOptions) Unpack(r *bytes.Buffer) error { + b, err := r.ReadByte() + if err != nil { + return err + } + + s.QoS = b & 0x03 + s.NoLocal = (b & 1 << 2) == 1 + s.RetainAsPublished = (b & 1 << 3) == 1 + s.RetainHandling = b & 0x30 + + return nil +} + +// Unpack is the implementation of the interface required function for a packet +func (s *Subscribe) Unpack(r *bytes.Buffer) error { + var err error + s.PacketID, err = readUint16(r) + if err != nil { + return err + } + + err = s.Properties.Unpack(r, SUBSCRIBE) + if err != nil { + return err + } + + for r.Len() > 0 { + var so SubOptions + t, err := readString(r) + if err != nil { + return err + } + if err = so.Unpack(r); err != nil { + return err + } + s.Subscriptions[t] = so + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (s *Subscribe) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(s.PacketID, &b) + var subs bytes.Buffer + for t, o := range s.Subscriptions { + writeString(t, &subs) + subs.WriteByte(o.Pack()) + } + idvp := s.Properties.Pack(SUBSCRIBE) + propLen := encodeVBI(len(idvp)) + return net.Buffers{b.Bytes(), propLen, idvp, subs.Bytes()} +} + +// WriteTo is the implementation of the interface required function for a packet +func (s *Subscribe) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: SUBSCRIBE, Flags: 2}} + cp.Content = s + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/unsuback.go b/vendor/github.com/eclipse/paho.golang/packets/unsuback.go new file mode 100644 index 000000000..ba5164b9f --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/unsuback.go @@ -0,0 +1,88 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Unsuback is the Variable Header definition for a Unsuback control packet +type Unsuback struct { + Reasons []byte + Properties *Properties + PacketID uint16 +} + +func (u *Unsuback) String() string { + return fmt.Sprintf("UNSUBACK: ReasonCode:%v PacketID:%d Properties:\n%s", u.Reasons, u.PacketID, u.Properties) +} + +// UnsubackSuccess, etc are the list of valid unsuback reason codes. +const ( + UnsubackSuccess = 0x00 + UnsubackNoSubscriptionFound = 0x11 + UnsubackUnspecifiedError = 0x80 + UnsubackImplementationSpecificError = 0x83 + UnsubackNotAuthorized = 0x87 + UnsubackTopicFilterInvalid = 0x8F + UnsubackPacketIdentifierInUse = 0x91 +) + +// Unpack is the implementation of the interface required function for a packet +func (u *Unsuback) Unpack(r *bytes.Buffer) error { + var err error + u.PacketID, err = readUint16(r) + if err != nil { + return err + } + + err = u.Properties.Unpack(r, UNSUBACK) + if err != nil { + return err + } + + u.Reasons = r.Bytes() + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (u *Unsuback) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(u.PacketID, &b) + idvp := u.Properties.Pack(UNSUBACK) + propLen := encodeVBI(len(idvp)) + return net.Buffers{b.Bytes(), propLen, idvp, u.Reasons} +} + +// WriteTo is the implementation of the interface required function for a packet +func (u *Unsuback) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: UNSUBACK}} + cp.Content = u + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (u *Unsuback) Reason(index int) string { + if index >= 0 && index < len(u.Reasons) { + switch u.Reasons[index] { + case 0x00: + return "Success - The subscription is deleted" + case 0x11: + return "No subscription found - No matching Topic Filter is being used by the Client." + case 0x80: + return "Unspecified error - The unsubscribe could not be completed and the Server either does not wish to reveal the reason or none of the other Reason Codes apply." + case 0x83: + return "Implementation specific error - The UNSUBSCRIBE is valid but the Server does not accept it." + case 0x87: + return "Not authorized - The Client is not authorized to unsubscribe." + case 0x8F: + return "Topic Filter invalid - The Topic Filter is correctly formed but is not allowed for this Client." + case 0x91: + return "Packet Identifier in use - The specified Packet Identifier is already in use." + } + } + return "Invalid Reason index" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/unsubscribe.go b/vendor/github.com/eclipse/paho.golang/packets/unsubscribe.go new file mode 100644 index 000000000..dc4e2f89e --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/unsubscribe.go @@ -0,0 +1,67 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Unsubscribe is the Variable Header definition for a Unsubscribe control packet +type Unsubscribe struct { + Topics []string + Properties *Properties + PacketID uint16 +} + +func (u *Unsubscribe) String() string { + return fmt.Sprintf("UNSUBSCRIBE: PacketID:%d Topics:%v Properties:\n%s", u.PacketID, u.Topics, u.Properties) +} + +// Unpack is the implementation of the interface required function for a packet +func (u *Unsubscribe) Unpack(r *bytes.Buffer) error { + var err error + u.PacketID, err = readUint16(r) + if err != nil { + return err + } + + err = u.Properties.Unpack(r, UNSUBSCRIBE) + if err != nil { + return err + } + + for { + t, err := readString(r) + if err != nil && err != io.EOF { + return err + } + if err == io.EOF { + break + } + u.Topics = append(u.Topics, t) + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (u *Unsubscribe) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(u.PacketID, &b) + var topics bytes.Buffer + for _, t := range u.Topics { + writeString(t, &topics) + } + idvp := u.Properties.Pack(UNSUBSCRIBE) + propLen := encodeVBI(len(idvp)) + return net.Buffers{b.Bytes(), propLen, idvp, topics.Bytes()} +} + +// WriteTo is the implementation of the interface required function for a packet +func (u *Unsubscribe) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: UNSUBSCRIBE, Flags: 2}} + cp.Content = u + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/acks_tracker.go b/vendor/github.com/eclipse/paho.golang/paho/acks_tracker.go new file mode 100644 index 000000000..47f11cb67 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/acks_tracker.go @@ -0,0 +1,79 @@ +package paho + +import ( + "errors" + "sync" + + "github.com/eclipse/paho.golang/packets" +) + +var ( + ErrPacketNotFound = errors.New("packet not found") +) + +type acksTracker struct { + mx sync.Mutex + order []packet +} + +func (t *acksTracker) add(pb *packets.Publish) { + t.mx.Lock() + defer t.mx.Unlock() + + for _, v := range t.order { + if v.pb.PacketID == pb.PacketID { + return // already added + } + } + + t.order = append(t.order, packet{pb: pb}) +} + +func (t *acksTracker) markAsAcked(pb *packets.Publish) error { + t.mx.Lock() + defer t.mx.Unlock() + + for k, v := range t.order { + if pb.PacketID == v.pb.PacketID { + t.order[k].acknowledged = true + return nil + } + } + + return ErrPacketNotFound +} + +func (t *acksTracker) flush(do func([]*packets.Publish)) { + t.mx.Lock() + defer t.mx.Unlock() + + var ( + buf []*packets.Publish + ) + for _, v := range t.order { + if v.acknowledged { + buf = append(buf, v.pb) + } else { + break + } + } + + if len(buf) == 0 { + return + } + + do(buf) + t.order = t.order[len(buf):] +} + +// reset should be used upon disconnections +func (t *acksTracker) reset() { + t.mx.Lock() + defer t.mx.Unlock() + t.order = nil +} + +type packet struct { + pb *packets.Publish + acknowledged bool +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/auth.go b/vendor/github.com/eclipse/paho.golang/paho/auth.go new file mode 100644 index 000000000..7d3a3c972 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/auth.go @@ -0,0 +1,8 @@ +package paho + +// Auther is the interface for something that implements the extended authentication +// flows in MQTT v5 +type Auther interface { + Authenticate(*Auth) *Auth + Authenticated() +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/client.go b/vendor/github.com/eclipse/paho.golang/paho/client.go new file mode 100644 index 000000000..f41e3d068 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/client.go @@ -0,0 +1,923 @@ +package paho + +import ( + "context" + "errors" + "fmt" + "math" + "net" + "strings" + "sync" + "time" + + "github.com/eclipse/paho.golang/packets" + "golang.org/x/sync/semaphore" +) + +type MQTTVersion byte + +const ( + MQTTv311 MQTTVersion = 4 + MQTTv5 MQTTVersion = 5 +) + +const defaultSendAckInterval = 50 * time.Millisecond + +var ( + ErrManualAcknowledgmentDisabled = errors.New("manual acknowledgments disabled") +) + +type ( + // ClientConfig are the user configurable options for the client, an + // instance of this struct is passed into NewClient(), not all options + // are required to be set, defaults are provided for Persistence, MIDs, + // PingHandler, PacketTimeout and Router. + ClientConfig struct { + ClientID string + // Conn is the connection to broker. + // BEWARE that most wrapped net.Conn implementations like tls.Conn are + // not thread safe for writing. To fix, use packets.NewThreadSafeConn + // wrapper or extend the custom net.Conn struct with sync.Locker. + Conn net.Conn + MIDs MIDService + AuthHandler Auther + PingHandler Pinger + Router Router + Persistence Persistence + PacketTimeout time.Duration + // OnServerDisconnect is called only when a packets.DISCONNECT is received from server + OnServerDisconnect func(*Disconnect) + // OnClientError is for example called on net.Error + OnClientError func(error) + // PublishHook allows a user provided function to be called before + // a Publish packet is sent allowing it to inspect or modify the + // Publish, an example of the utility of this is provided in the + // Topic Alias Handler extension which will automatically assign + // and use topic alias values rather than topic strings. + PublishHook func(*Publish) + // EnableManualAcknowledgment is used to control the acknowledgment of packets manually. + // BEWARE that the MQTT specs require clients to send acknowledgments in the order in which the corresponding + // PUBLISH packets were received. + // Consider the following scenario: the client receives packets 1,2,3,4 + // If you acknowledge 3 first, no ack is actually sent to the server but it's buffered until also 1 and 2 + // are acknowledged. + EnableManualAcknowledgment bool + // SendAcksInterval is used only when EnableManualAcknowledgment is true + // it determines how often the client tries to send a batch of acknowledgments in the right order to the server. + SendAcksInterval time.Duration + } + // Client is the struct representing an MQTT client + Client struct { + mu sync.Mutex + ClientConfig + // raCtx is used for handling the MQTTv5 authentication exchange. + raCtx *CPContext + stop chan struct{} + publishPackets chan *packets.Publish + acksTracker acksTracker + workers sync.WaitGroup + serverProps CommsProperties + clientProps CommsProperties + serverInflight *semaphore.Weighted + clientInflight *semaphore.Weighted + debug Logger + errors Logger + } + + // CommsProperties is a struct of the communication properties that may + // be set by the server in the Connack and that the client needs to be + // aware of for future subscribes/publishes + CommsProperties struct { + MaximumPacketSize uint32 + ReceiveMaximum uint16 + TopicAliasMaximum uint16 + MaximumQoS byte + RetainAvailable bool + WildcardSubAvailable bool + SubIDAvailable bool + SharedSubAvailable bool + } + + caContext struct { + Context context.Context + Return chan *packets.Connack + } +) + +// NewClient is used to create a new default instance of an MQTT client. +// It returns a pointer to the new client instance. +// The default client uses the provided PingHandler, MessageID and +// StandardRouter implementations, and a noop Persistence. +// These should be replaced if desired before the client is connected. +// client.Conn *MUST* be set to an already connected net.Conn before +// Connect() is called. +func NewClient(conf ClientConfig) *Client { + c := &Client{ + serverProps: CommsProperties{ + ReceiveMaximum: 65535, + MaximumQoS: 2, + MaximumPacketSize: 0, + TopicAliasMaximum: 0, + RetainAvailable: true, + WildcardSubAvailable: true, + SubIDAvailable: true, + SharedSubAvailable: true, + }, + clientProps: CommsProperties{ + ReceiveMaximum: 65535, + MaximumQoS: 2, + MaximumPacketSize: 0, + TopicAliasMaximum: 0, + }, + ClientConfig: conf, + errors: NOOPLogger{}, + debug: NOOPLogger{}, + } + + if c.Persistence == nil { + c.Persistence = &noopPersistence{} + } + if c.MIDs == nil { + c.MIDs = &MIDs{index: make([]*CPContext, int(midMax))} + } + if c.PacketTimeout == 0 { + c.PacketTimeout = 10 * time.Second + } + if c.Router == nil { + c.Router = NewStandardRouter() + } + if c.PingHandler == nil { + c.PingHandler = DefaultPingerWithCustomFailHandler(func(e error) { + go c.error(e) + }) + } + if c.OnClientError == nil { + c.OnClientError = func(e error) {} + } + + return c +} + +// Connect is used to connect the client to a server. It presumes that +// the Client instance already has a working network connection. +// The function takes a pre-prepared Connect packet, and uses that to +// establish an MQTT connection. Assuming the connection completes +// successfully the rest of the client is initiated and the Connack +// returned. Otherwise the failure Connack (if there is one) is returned +// along with an error indicating the reason for the failure to connect. +func (c *Client) Connect(ctx context.Context, cp *Connect) (*Connack, error) { + if c.Conn == nil { + return nil, fmt.Errorf("client connection is nil") + } + + cleanup := func() { + close(c.stop) + close(c.publishPackets) + _ = c.Conn.Close() + c.mu.Unlock() + } + + c.mu.Lock() + c.stop = make(chan struct{}) + + var publishPacketsSize uint16 = math.MaxUint16 + if cp.Properties != nil && cp.Properties.ReceiveMaximum != nil { + publishPacketsSize = *cp.Properties.ReceiveMaximum + } + c.publishPackets = make(chan *packets.Publish, publishPacketsSize) + + keepalive := cp.KeepAlive + c.ClientID = cp.ClientID + if cp.Properties != nil { + if cp.Properties.MaximumPacketSize != nil { + c.clientProps.MaximumPacketSize = *cp.Properties.MaximumPacketSize + } + if cp.Properties.MaximumQOS != nil { + c.clientProps.MaximumQoS = *cp.Properties.MaximumQOS + } + if cp.Properties.ReceiveMaximum != nil { + c.clientProps.ReceiveMaximum = *cp.Properties.ReceiveMaximum + } + if cp.Properties.TopicAliasMaximum != nil { + c.clientProps.TopicAliasMaximum = *cp.Properties.TopicAliasMaximum + } + } + + c.debug.Println("connecting") + connCtx, cf := context.WithTimeout(ctx, c.PacketTimeout) + defer cf() + + ccp := cp.Packet() + ccp.ProtocolName = "MQTT" + ccp.ProtocolVersion = 5 + + c.debug.Println("sending CONNECT") + if _, err := ccp.WriteTo(c.Conn); err != nil { + cleanup() + return nil, err + } + + c.debug.Println("waiting for CONNACK/AUTH") + var ( + caPacket *packets.Connack + caPacketCh = make(chan *packets.Connack) + caPacketErr = make(chan error) + ) + go c.expectConnack(caPacketCh, caPacketErr) + select { + case <-connCtx.Done(): + if ctxErr := connCtx.Err(); ctxErr != nil { + c.debug.Println(fmt.Sprintf("terminated due to context: %v", ctxErr)) + } + cleanup() + return nil, connCtx.Err() + case err := <-caPacketErr: + c.debug.Println(err) + cleanup() + return nil, err + case caPacket = <-caPacketCh: + } + + ca := ConnackFromPacketConnack(caPacket) + + if ca.ReasonCode >= 0x80 { + var reason string + c.debug.Println("received an error code in Connack:", ca.ReasonCode) + if ca.Properties != nil { + reason = ca.Properties.ReasonString + } + cleanup() + return ca, fmt.Errorf("failed to connect to server: %s", reason) + } + + // no more possible calls to cleanup(), defer an unlock + defer c.mu.Unlock() + + if ca.Properties != nil { + if ca.Properties.ServerKeepAlive != nil { + keepalive = *ca.Properties.ServerKeepAlive + } + if ca.Properties.AssignedClientID != "" { + c.ClientID = ca.Properties.AssignedClientID + } + if ca.Properties.ReceiveMaximum != nil { + c.serverProps.ReceiveMaximum = *ca.Properties.ReceiveMaximum + } + if ca.Properties.MaximumQoS != nil { + c.serverProps.MaximumQoS = *ca.Properties.MaximumQoS + } + if ca.Properties.MaximumPacketSize != nil { + c.serverProps.MaximumPacketSize = *ca.Properties.MaximumPacketSize + } + if ca.Properties.TopicAliasMaximum != nil { + c.serverProps.TopicAliasMaximum = *ca.Properties.TopicAliasMaximum + } + c.serverProps.RetainAvailable = ca.Properties.RetainAvailable + c.serverProps.WildcardSubAvailable = ca.Properties.WildcardSubAvailable + c.serverProps.SubIDAvailable = ca.Properties.SubIDAvailable + c.serverProps.SharedSubAvailable = ca.Properties.SharedSubAvailable + } + + c.serverInflight = semaphore.NewWeighted(int64(c.serverProps.ReceiveMaximum)) + c.clientInflight = semaphore.NewWeighted(int64(c.clientProps.ReceiveMaximum)) + + c.debug.Println("received CONNACK, starting PingHandler") + c.workers.Add(1) + go func() { + defer c.workers.Done() + defer c.debug.Println("returning from ping handler worker") + c.PingHandler.Start(c.Conn, time.Duration(keepalive)*time.Second) + }() + + c.debug.Println("starting publish packets loop") + c.workers.Add(1) + go func() { + defer c.workers.Done() + defer c.debug.Println("returning from publish packets loop worker") + c.routePublishPackets() + }() + + c.debug.Println("starting incoming") + c.workers.Add(1) + go func() { + defer c.workers.Done() + defer c.debug.Println("returning from incoming worker") + c.incoming() + }() + + if c.EnableManualAcknowledgment { + c.debug.Println("starting acking routine") + + c.acksTracker.reset() + sendAcksInterval := defaultSendAckInterval + if c.SendAcksInterval > 0 { + sendAcksInterval = c.SendAcksInterval + } + + c.workers.Add(1) + go func() { + defer c.workers.Done() + defer c.debug.Println("returning from ack tracker routine") + t := time.NewTicker(sendAcksInterval) + for { + select { + case <-c.stop: + return + case <-t.C: + c.acksTracker.flush(func(pbs []*packets.Publish) { + for _, pb := range pbs { + c.ack(pb) + } + }) + } + } + }() + } + + return ca, nil +} + +func (c *Client) Ack(pb *Publish) error { + if !c.EnableManualAcknowledgment { + return ErrManualAcknowledgmentDisabled + } + if pb.QoS == 0 { + return nil + } + return c.acksTracker.markAsAcked(pb.Packet()) +} + +func (c *Client) ack(pb *packets.Publish) { + switch pb.QoS { + case 1: + pa := packets.Puback{ + Properties: &packets.Properties{}, + PacketID: pb.PacketID, + } + c.debug.Println("sending PUBACK") + _, err := pa.WriteTo(c.Conn) + if err != nil { + c.errors.Printf("failed to send PUBACK for %d: %s", pb.PacketID, err) + } + case 2: + pr := packets.Pubrec{ + Properties: &packets.Properties{}, + PacketID: pb.PacketID, + } + c.debug.Printf("sending PUBREC") + _, err := pr.WriteTo(c.Conn) + if err != nil { + c.errors.Printf("failed to send PUBREC for %d: %s", pb.PacketID, err) + } + } +} + +func (c *Client) routePublishPackets() { + for { + select { + case <-c.stop: + return + case pb, open := <-c.publishPackets: + if !open { + return + } + + if !c.ClientConfig.EnableManualAcknowledgment { + c.Router.Route(pb) + c.ack(pb) + continue + } + + if pb.QoS != 0 { + c.acksTracker.add(pb) + } + + c.Router.Route(pb) + } + } +} + +// incoming is the Client function that reads and handles incoming +// packets from the server. The function is started as a goroutine +// from Connect(), it exits when it receives a server initiated +// Disconnect, the Stop channel is closed or there is an error reading +// a packet from the network connection +func (c *Client) incoming() { + defer c.debug.Println("client stopping, incoming stopping") + for { + select { + case <-c.stop: + return + default: + recv, err := packets.ReadPacket(c.Conn) + if err != nil { + go c.error(err) + return + } + switch recv.Type { + case packets.CONNACK: + c.debug.Println("received CONNACK") + go c.error(fmt.Errorf("received unexpected CONNACK")) + return + case packets.AUTH: + c.debug.Println("received AUTH") + ap := recv.Content.(*packets.Auth) + switch ap.ReasonCode { + case 0x0: + if c.AuthHandler != nil { + go c.AuthHandler.Authenticated() + } + if c.raCtx != nil { + c.raCtx.Return <- *recv + } + case 0x18: + if c.AuthHandler != nil { + if _, err := c.AuthHandler.Authenticate(AuthFromPacketAuth(ap)).Packet().WriteTo(c.Conn); err != nil { + go c.error(err) + return + } + } + } + case packets.PUBLISH: + pb := recv.Content.(*packets.Publish) + c.debug.Printf("received QoS%d PUBLISH", pb.QoS) + c.mu.Lock() + select { + case <-c.stop: + c.mu.Unlock() + return + default: + c.publishPackets <- pb + c.mu.Unlock() + } + case packets.PUBACK, packets.PUBCOMP, packets.SUBACK, packets.UNSUBACK: + c.debug.Printf("received %s packet with id %d", recv.PacketType(), recv.PacketID()) + if cpCtx := c.MIDs.Get(recv.PacketID()); cpCtx != nil { + cpCtx.Return <- *recv + } else { + c.debug.Println("received a response for a message ID we don't know:", recv.PacketID()) + } + case packets.PUBREC: + c.debug.Println("received PUBREC for", recv.PacketID()) + if cpCtx := c.MIDs.Get(recv.PacketID()); cpCtx == nil { + c.debug.Println("received a PUBREC for a message ID we don't know:", recv.PacketID()) + pl := packets.Pubrel{ + PacketID: recv.Content.(*packets.Pubrec).PacketID, + ReasonCode: 0x92, + } + c.debug.Println("sending PUBREL for", pl.PacketID) + _, err := pl.WriteTo(c.Conn) + if err != nil { + c.errors.Printf("failed to send PUBREL for %d: %s", pl.PacketID, err) + } + } else { + pr := recv.Content.(*packets.Pubrec) + if pr.ReasonCode >= 0x80 { + //Received a failure code, shortcut and return + cpCtx.Return <- *recv + } else { + pl := packets.Pubrel{ + PacketID: pr.PacketID, + } + c.debug.Println("sending PUBREL for", pl.PacketID) + _, err := pl.WriteTo(c.Conn) + if err != nil { + c.errors.Printf("failed to send PUBREL for %d: %s", pl.PacketID, err) + } + } + } + case packets.PUBREL: + c.debug.Println("received PUBREL for", recv.PacketID()) + //Auto respond to pubrels unless failure code + pr := recv.Content.(*packets.Pubrel) + if pr.ReasonCode >= 0x80 { + //Received a failure code, continue + continue + } else { + pc := packets.Pubcomp{ + PacketID: pr.PacketID, + } + c.debug.Println("sending PUBCOMP for", pr.PacketID) + _, err := pc.WriteTo(c.Conn) + if err != nil { + c.errors.Printf("failed to send PUBCOMP for %d: %s", pc.PacketID, err) + } + } + case packets.DISCONNECT: + c.debug.Println("received DISCONNECT") + if c.raCtx != nil { + c.raCtx.Return <- *recv + } + go func() { + if c.OnServerDisconnect != nil { + go c.serverDisconnect(DisconnectFromPacketDisconnect(recv.Content.(*packets.Disconnect))) + } else { + go c.error(fmt.Errorf("server initiated disconnect")) + } + }() + return + case packets.PINGRESP: + c.debug.Println("received PINGRESP") + c.PingHandler.PingResp() + } + } + } +} + +func (c *Client) close() { + c.mu.Lock() + defer c.mu.Unlock() + + select { + case <-c.stop: + //already shutting down, do nothing + return + default: + } + + close(c.stop) + close(c.publishPackets) + + c.debug.Println("client stopped") + c.PingHandler.Stop() + c.debug.Println("ping stopped") + _ = c.Conn.Close() + c.debug.Println("conn closed") + c.acksTracker.reset() + c.debug.Println("acks tracker reset") +} + +// error is called to signify that an error situation has occurred, this +// causes the client's Stop channel to be closed (if it hasn't already been) +// which results in the other client goroutines terminating. +// It also closes the client network connection. +func (c *Client) error(e error) { + c.debug.Println("error called:", e) + c.close() + c.workers.Wait() + go c.OnClientError(e) +} + +func (c *Client) serverDisconnect(d *Disconnect) { + c.close() + c.workers.Wait() + c.debug.Println("calling OnServerDisconnect") + go c.OnServerDisconnect(d) +} + +// Authenticate is used to initiate a reauthentication of credentials with the +// server. This function sends the initial Auth packet to start the reauthentication +// then relies on the client AuthHandler managing any further requests from the +// server until either a successful Auth packet is passed back, or a Disconnect +// is received. +func (c *Client) Authenticate(ctx context.Context, a *Auth) (*AuthResponse, error) { + c.debug.Println("client initiated reauthentication") + + c.mu.Lock() + if c.raCtx != nil { + c.mu.Unlock() + return nil, fmt.Errorf("previous authentication is still in progress") + } + c.raCtx = &CPContext{ctx, make(chan packets.ControlPacket, 1)} + c.mu.Unlock() + defer func() { + c.mu.Lock() + c.raCtx = nil + c.mu.Unlock() + }() + + c.debug.Println("sending AUTH") + if _, err := a.Packet().WriteTo(c.Conn); err != nil { + return nil, err + } + + var rp packets.ControlPacket + select { + case <-ctx.Done(): + if ctxErr := ctx.Err(); ctxErr != nil { + c.debug.Println(fmt.Sprintf("terminated due to context: %v", ctxErr)) + return nil, ctxErr + } + case rp = <-c.raCtx.Return: + } + + switch rp.Type { + case packets.AUTH: + //If we've received one here it must be successful, the only way + //to abort a reauth is a server initiated disconnect + return AuthResponseFromPacketAuth(rp.Content.(*packets.Auth)), nil + case packets.DISCONNECT: + return AuthResponseFromPacketDisconnect(rp.Content.(*packets.Disconnect)), nil + } + + return nil, fmt.Errorf("error with Auth, didn't receive Auth or Disconnect") +} + +// Subscribe is used to send a Subscription request to the MQTT server. +// It is passed a pre-prepared Subscribe packet and blocks waiting for +// a response Suback, or for the timeout to fire. Any response Suback +// is returned from the function, along with any errors. +func (c *Client) Subscribe(ctx context.Context, s *Subscribe) (*Suback, error) { + if !c.serverProps.WildcardSubAvailable { + for t := range s.Subscriptions { + if strings.ContainsAny(t, "#+") { + // Using a wildcard in a subscription when not supported + return nil, fmt.Errorf("cannot subscribe to %s, server does not support wildcards", t) + } + } + } + if !c.serverProps.SubIDAvailable && s.Properties != nil && s.Properties.SubscriptionIdentifier != nil { + return nil, fmt.Errorf("cannot send subscribe with subID set, server does not support subID") + } + if !c.serverProps.SharedSubAvailable { + for t := range s.Subscriptions { + if strings.HasPrefix(t, "$share") { + return nil, fmt.Errorf("cannont subscribe to %s, server does not support shared subscriptions", t) + } + } + } + + c.debug.Printf("subscribing to %+v", s.Subscriptions) + + subCtx, cf := context.WithTimeout(ctx, c.PacketTimeout) + defer cf() + cpCtx := &CPContext{subCtx, make(chan packets.ControlPacket, 1)} + + sp := s.Packet() + + mid, err := c.MIDs.Request(cpCtx) + if err != nil { + return nil, err + } + defer c.MIDs.Free(mid) + sp.PacketID = mid + + c.debug.Println("sending SUBSCRIBE") + if _, err := sp.WriteTo(c.Conn); err != nil { + return nil, err + } + c.debug.Println("waiting for SUBACK") + var sap packets.ControlPacket + + select { + case <-subCtx.Done(): + if ctxErr := subCtx.Err(); ctxErr != nil { + c.debug.Println(fmt.Sprintf("terminated due to context: %v", ctxErr)) + return nil, ctxErr + } + case sap = <-cpCtx.Return: + } + + if sap.Type != packets.SUBACK { + return nil, fmt.Errorf("received %d instead of Suback", sap.Type) + } + c.debug.Println("received SUBACK") + + sa := SubackFromPacketSuback(sap.Content.(*packets.Suback)) + switch { + case len(sa.Reasons) == 1: + if sa.Reasons[0] >= 0x80 { + var reason string + c.debug.Println("received an error code in Suback:", sa.Reasons[0]) + if sa.Properties != nil { + reason = sa.Properties.ReasonString + } + return sa, fmt.Errorf("failed to subscribe to topic: %s", reason) + } + default: + for _, code := range sa.Reasons { + if code >= 0x80 { + c.debug.Println("received an error code in Suback:", code) + return sa, fmt.Errorf("at least one requested subscription failed") + } + } + } + + return sa, nil +} + +// Unsubscribe is used to send an Unsubscribe request to the MQTT server. +// It is passed a pre-prepared Unsubscribe packet and blocks waiting for +// a response Unsuback, or for the timeout to fire. Any response Unsuback +// is returned from the function, along with any errors. +func (c *Client) Unsubscribe(ctx context.Context, u *Unsubscribe) (*Unsuback, error) { + c.debug.Printf("unsubscribing from %+v", u.Topics) + unsubCtx, cf := context.WithTimeout(ctx, c.PacketTimeout) + defer cf() + cpCtx := &CPContext{unsubCtx, make(chan packets.ControlPacket, 1)} + + up := u.Packet() + + mid, err := c.MIDs.Request(cpCtx) + if err != nil { + return nil, err + } + defer c.MIDs.Free(mid) + up.PacketID = mid + + c.debug.Println("sending UNSUBSCRIBE") + if _, err := up.WriteTo(c.Conn); err != nil { + return nil, err + } + c.debug.Println("waiting for UNSUBACK") + var uap packets.ControlPacket + + select { + case <-unsubCtx.Done(): + if ctxErr := unsubCtx.Err(); ctxErr != nil { + c.debug.Println(fmt.Sprintf("terminated due to context: %v", ctxErr)) + return nil, ctxErr + } + case uap = <-cpCtx.Return: + } + + if uap.Type != packets.UNSUBACK { + return nil, fmt.Errorf("received %d instead of Unsuback", uap.Type) + } + c.debug.Println("received SUBACK") + + ua := UnsubackFromPacketUnsuback(uap.Content.(*packets.Unsuback)) + switch { + case len(ua.Reasons) == 1: + if ua.Reasons[0] >= 0x80 { + var reason string + c.debug.Println("received an error code in Unsuback:", ua.Reasons[0]) + if ua.Properties != nil { + reason = ua.Properties.ReasonString + } + return ua, fmt.Errorf("failed to unsubscribe from topic: %s", reason) + } + default: + for _, code := range ua.Reasons { + if code >= 0x80 { + c.debug.Println("received an error code in Suback:", code) + return ua, fmt.Errorf("at least one requested unsubscribe failed") + } + } + } + + return ua, nil +} + +// Publish is used to send a publication to the MQTT server. +// It is passed a pre-prepared Publish packet and blocks waiting for +// the appropriate response, or for the timeout to fire. +// Any response message is returned from the function, along with any errors. +func (c *Client) Publish(ctx context.Context, p *Publish) (*PublishResponse, error) { + if p.QoS > c.serverProps.MaximumQoS { + return nil, fmt.Errorf("cannot send Publish with QoS %d, server maximum QoS is %d", p.QoS, c.serverProps.MaximumQoS) + } + if p.Properties != nil && p.Properties.TopicAlias != nil { + if c.serverProps.TopicAliasMaximum > 0 && *p.Properties.TopicAlias > c.serverProps.TopicAliasMaximum { + return nil, fmt.Errorf("cannot send publish with TopicAlias %d, server topic alias maximum is %d", *p.Properties.TopicAlias, c.serverProps.TopicAliasMaximum) + } + } + if !c.serverProps.RetainAvailable && p.Retain { + return nil, fmt.Errorf("cannot send Publish with retain flag set, server does not support retained messages") + } + if (p.Properties == nil || p.Properties.TopicAlias == nil) && p.Topic == "" { + return nil, fmt.Errorf("cannot send a publish with no TopicAlias and no Topic set") + } + + if c.ClientConfig.PublishHook != nil { + c.ClientConfig.PublishHook(p) + } + + c.debug.Printf("sending message to %s", p.Topic) + + pb := p.Packet() + + switch p.QoS { + case 0: + c.debug.Println("sending QoS0 message") + if _, err := pb.WriteTo(c.Conn); err != nil { + return nil, err + } + return nil, nil + case 1, 2: + return c.publishQoS12(ctx, pb) + } + + return nil, fmt.Errorf("QoS isn't 0, 1 or 2") +} + +func (c *Client) publishQoS12(ctx context.Context, pb *packets.Publish) (*PublishResponse, error) { + c.debug.Println("sending QoS12 message") + pubCtx, cf := context.WithTimeout(ctx, c.PacketTimeout) + defer cf() + if err := c.serverInflight.Acquire(pubCtx, 1); err != nil { + return nil, err + } + defer c.serverInflight.Release(1) + cpCtx := &CPContext{pubCtx, make(chan packets.ControlPacket, 1)} + + mid, err := c.MIDs.Request(cpCtx) + if err != nil { + return nil, err + } + defer c.MIDs.Free(mid) + pb.PacketID = mid + + if _, err := pb.WriteTo(c.Conn); err != nil { + return nil, err + } + var resp packets.ControlPacket + + select { + case <-pubCtx.Done(): + if ctxErr := pubCtx.Err(); ctxErr != nil { + c.debug.Println(fmt.Sprintf("terminated due to context: %v", ctxErr)) + return nil, ctxErr + } + case resp = <-cpCtx.Return: + } + + switch pb.QoS { + case 1: + if resp.Type != packets.PUBACK { + return nil, fmt.Errorf("received %d instead of PUBACK", resp.Type) + } + + pr := PublishResponseFromPuback(resp.Content.(*packets.Puback)) + if pr.ReasonCode >= 0x80 { + c.debug.Println("received an error code in Puback:", pr.ReasonCode) + return pr, fmt.Errorf("error publishing: %s", resp.Content.(*packets.Puback).Reason()) + } + return pr, nil + case 2: + switch resp.Type { + case packets.PUBCOMP: + pr := PublishResponseFromPubcomp(resp.Content.(*packets.Pubcomp)) + return pr, nil + case packets.PUBREC: + c.debug.Printf("received PUBREC for %s (must have errored)", pb.PacketID) + pr := PublishResponseFromPubrec(resp.Content.(*packets.Pubrec)) + return pr, nil + default: + return nil, fmt.Errorf("received %d instead of PUBCOMP", resp.Type) + } + } + + c.debug.Println("ended up with a non QoS1/2 message:", pb.QoS) + return nil, fmt.Errorf("ended up with a non QoS1/2 message: %d", pb.QoS) +} + +func (c *Client) expectConnack(packet chan<- *packets.Connack, errs chan<- error) { + recv, err := packets.ReadPacket(c.Conn) + if err != nil { + errs <- err + return + } + switch r := recv.Content.(type) { + case *packets.Connack: + c.debug.Println("received CONNACK") + if r.ReasonCode == packets.ConnackSuccess && r.Properties != nil && r.Properties.AuthMethod != "" { + // Successful connack and AuthMethod is defined, must have successfully authed during connect + go c.AuthHandler.Authenticated() + } + packet <- r + case *packets.Auth: + c.debug.Println("received AUTH") + if c.AuthHandler == nil { + errs <- fmt.Errorf("enhanced authentication flow started but no AuthHandler configured") + return + } + c.debug.Println("sending AUTH") + _, err := c.AuthHandler.Authenticate(AuthFromPacketAuth(r)).Packet().WriteTo(c.Conn) + if err != nil { + errs <- fmt.Errorf("error sending authentication packet: %w", err) + return + } + // go round again, either another AUTH or CONNACK + go c.expectConnack(packet, errs) + default: + errs <- fmt.Errorf("received unexpected packet %v", recv.Type) + } + +} + +// Disconnect is used to send a Disconnect packet to the MQTT server +// Whether or not the attempt to send the Disconnect packet fails +// (and if it does this function returns any error) the network connection +// is closed. +func (c *Client) Disconnect(d *Disconnect) error { + c.debug.Println("disconnecting") + _, err := d.Packet().WriteTo(c.Conn) + + c.close() + c.workers.Wait() + + return err +} + +// SetDebugLogger takes an instance of the paho Logger interface +// and sets it to be used by the debug log endpoint +func (c *Client) SetDebugLogger(l Logger) { + c.debug = l +} + +// SetErrorLogger takes an instance of the paho Logger interface +// and sets it to be used by the error log endpoint +func (c *Client) SetErrorLogger(l Logger) { + c.errors = l +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_auth.go b/vendor/github.com/eclipse/paho.golang/paho/cp_auth.go new file mode 100644 index 000000000..6ccef9b47 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_auth.go @@ -0,0 +1,92 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Auth is a representation of the MQTT Auth packet + Auth struct { + Properties *AuthProperties + ReasonCode byte + } + + // AuthProperties is a struct of the properties that can be set + // for a Auth packet + AuthProperties struct { + AuthData []byte + AuthMethod string + ReasonString string + User UserProperties + } +) + +// InitProperties is a function that takes a lower level +// Properties struct and completes the properties of the Auth on +// which it is called +func (a *Auth) InitProperties(p *packets.Properties) { + a.Properties = &AuthProperties{ + AuthMethod: p.AuthMethod, + AuthData: p.AuthData, + ReasonString: p.ReasonString, + User: UserPropertiesFromPacketUser(p.User), + } +} + +// AuthFromPacketAuth takes a packets library Auth and +// returns a paho library Auth +func AuthFromPacketAuth(a *packets.Auth) *Auth { + v := &Auth{ReasonCode: a.ReasonCode} + v.InitProperties(a.Properties) + + return v +} + +// Packet returns a packets library Auth from the paho Auth +// on which it is called +func (a *Auth) Packet() *packets.Auth { + v := &packets.Auth{ReasonCode: a.ReasonCode} + + if a.Properties != nil { + v.Properties = &packets.Properties{ + AuthMethod: a.Properties.AuthMethod, + AuthData: a.Properties.AuthData, + ReasonString: a.Properties.ReasonString, + User: a.Properties.User.ToPacketProperties(), + } + } + + return v +} + +// AuthResponse is a represenation of the response to an Auth +// packet +type AuthResponse struct { + Properties *AuthProperties + ReasonCode byte + Success bool +} + +// AuthResponseFromPacketAuth takes a packets library Auth and +// returns a paho library AuthResponse +func AuthResponseFromPacketAuth(a *packets.Auth) *AuthResponse { + return &AuthResponse{ + Success: true, + ReasonCode: a.ReasonCode, + Properties: &AuthProperties{ + ReasonString: a.Properties.ReasonString, + User: UserPropertiesFromPacketUser(a.Properties.User), + }, + } +} + +// AuthResponseFromPacketDisconnect takes a packets library Disconnect and +// returns a paho library AuthResponse +func AuthResponseFromPacketDisconnect(d *packets.Disconnect) *AuthResponse { + return &AuthResponse{ + Success: true, + ReasonCode: d.ReasonCode, + Properties: &AuthProperties{ + ReasonString: d.Properties.ReasonString, + User: UserPropertiesFromPacketUser(d.Properties.User), + }, + } +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_connack.go b/vendor/github.com/eclipse/paho.golang/paho/cp_connack.go new file mode 100644 index 000000000..9c7233618 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_connack.go @@ -0,0 +1,84 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Connack is a representation of the MQTT Connack packet + Connack struct { + Properties *ConnackProperties + ReasonCode byte + SessionPresent bool + } + + // ConnackProperties is a struct of the properties that can be set + // for a Connack packet + ConnackProperties struct { + SessionExpiryInterval *uint32 + AuthData []byte + AuthMethod string + ResponseInfo string + ServerReference string + ReasonString string + AssignedClientID string + MaximumPacketSize *uint32 + ReceiveMaximum *uint16 + TopicAliasMaximum *uint16 + ServerKeepAlive *uint16 + MaximumQoS *byte + User UserProperties + WildcardSubAvailable bool + SubIDAvailable bool + SharedSubAvailable bool + RetainAvailable bool + } +) + +// InitProperties is a function that takes a lower level +// Properties struct and completes the properties of the Connack on +// which it is called +func (c *Connack) InitProperties(p *packets.Properties) { + c.Properties = &ConnackProperties{ + AssignedClientID: p.AssignedClientID, + ServerKeepAlive: p.ServerKeepAlive, + WildcardSubAvailable: true, + SubIDAvailable: true, + SharedSubAvailable: true, + RetainAvailable: true, + ResponseInfo: p.ResponseInfo, + SessionExpiryInterval: p.SessionExpiryInterval, + AuthMethod: p.AuthMethod, + AuthData: p.AuthData, + ServerReference: p.ServerReference, + ReasonString: p.ReasonString, + ReceiveMaximum: p.ReceiveMaximum, + TopicAliasMaximum: p.TopicAliasMaximum, + MaximumQoS: p.MaximumQOS, + MaximumPacketSize: p.MaximumPacketSize, + User: UserPropertiesFromPacketUser(p.User), + } + + if p.WildcardSubAvailable != nil { + c.Properties.WildcardSubAvailable = *p.WildcardSubAvailable == 1 + } + if p.SubIDAvailable != nil { + c.Properties.SubIDAvailable = *p.SubIDAvailable == 1 + } + if p.SharedSubAvailable != nil { + c.Properties.SharedSubAvailable = *p.SharedSubAvailable == 1 + } + if p.RetainAvailable != nil { + c.Properties.RetainAvailable = *p.RetainAvailable == 1 + } +} + +// ConnackFromPacketConnack takes a packets library Connack and +// returns a paho library Connack +func ConnackFromPacketConnack(c *packets.Connack) *Connack { + v := &Connack{ + SessionPresent: c.SessionPresent, + ReasonCode: c.ReasonCode, + } + v.InitProperties(c.Properties) + + return v +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_connect.go b/vendor/github.com/eclipse/paho.golang/paho/cp_connect.go new file mode 100644 index 000000000..8d731764d --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_connect.go @@ -0,0 +1,180 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Connect is a representation of the MQTT Connect packet + Connect struct { + Password []byte + Username string + ClientID string + Properties *ConnectProperties + WillMessage *WillMessage + WillProperties *WillProperties + KeepAlive uint16 + CleanStart bool + UsernameFlag bool + PasswordFlag bool + } + + // ConnectProperties is a struct of the properties that can be set + // for a Connect packet + ConnectProperties struct { + AuthData []byte + AuthMethod string + SessionExpiryInterval *uint32 + WillDelayInterval *uint32 + ReceiveMaximum *uint16 + TopicAliasMaximum *uint16 + MaximumQOS *byte + MaximumPacketSize *uint32 + User UserProperties + RequestProblemInfo bool + RequestResponseInfo bool + } +) + +// InitProperties is a function that takes a lower level +// Properties struct and completes the properties of the Connect on +// which it is called +func (c *Connect) InitProperties(p *packets.Properties) { + c.Properties = &ConnectProperties{ + SessionExpiryInterval: p.SessionExpiryInterval, + AuthMethod: p.AuthMethod, + AuthData: p.AuthData, + WillDelayInterval: p.WillDelayInterval, + RequestResponseInfo: false, + RequestProblemInfo: true, + ReceiveMaximum: p.ReceiveMaximum, + TopicAliasMaximum: p.TopicAliasMaximum, + MaximumQOS: p.MaximumQOS, + MaximumPacketSize: p.MaximumPacketSize, + User: UserPropertiesFromPacketUser(p.User), + } + + if p.RequestResponseInfo != nil { + c.Properties.RequestResponseInfo = *p.RequestProblemInfo == 1 + } + if p.RequestProblemInfo != nil { + c.Properties.RequestProblemInfo = *p.RequestProblemInfo == 1 + } +} + +// InitWillProperties is a function that takes a lower level +// Properties struct and completes the properties of the Will in the Connect on +// which it is called +func (c *Connect) InitWillProperties(p *packets.Properties) { + c.WillProperties = &WillProperties{ + WillDelayInterval: p.WillDelayInterval, + PayloadFormat: p.PayloadFormat, + MessageExpiry: p.MessageExpiry, + ContentType: p.ContentType, + ResponseTopic: p.ResponseTopic, + CorrelationData: p.CorrelationData, + User: UserPropertiesFromPacketUser(p.User), + } +} + +// ConnectFromPacketConnect takes a packets library Connect and +// returns a paho library Connect +func ConnectFromPacketConnect(p *packets.Connect) *Connect { + v := &Connect{ + UsernameFlag: p.UsernameFlag, + Username: p.Username, + PasswordFlag: p.PasswordFlag, + Password: p.Password, + ClientID: p.ClientID, + CleanStart: p.CleanStart, + KeepAlive: p.KeepAlive, + } + v.InitProperties(p.Properties) + if p.WillFlag { + v.WillMessage = &WillMessage{ + Retain: p.WillRetain, + QoS: p.WillQOS, + Topic: p.WillTopic, + Payload: p.WillMessage, + } + v.InitWillProperties(p.WillProperties) + } + + return v +} + +// Packet returns a packets library Connect from the paho Connect +// on which it is called +func (c *Connect) Packet() *packets.Connect { + v := &packets.Connect{ + UsernameFlag: c.UsernameFlag, + Username: c.Username, + PasswordFlag: c.PasswordFlag, + Password: c.Password, + ClientID: c.ClientID, + CleanStart: c.CleanStart, + KeepAlive: c.KeepAlive, + } + + if c.Properties != nil { + v.Properties = &packets.Properties{ + SessionExpiryInterval: c.Properties.SessionExpiryInterval, + AuthMethod: c.Properties.AuthMethod, + AuthData: c.Properties.AuthData, + WillDelayInterval: c.Properties.WillDelayInterval, + ReceiveMaximum: c.Properties.ReceiveMaximum, + TopicAliasMaximum: c.Properties.TopicAliasMaximum, + MaximumQOS: c.Properties.MaximumQOS, + MaximumPacketSize: c.Properties.MaximumPacketSize, + User: c.Properties.User.ToPacketProperties(), + } + if c.Properties.RequestResponseInfo { + v.Properties.RequestResponseInfo = Byte(1) + } + if !c.Properties.RequestProblemInfo { + v.Properties.RequestProblemInfo = Byte(0) + } + } + + if c.WillMessage != nil { + v.WillFlag = true + v.WillQOS = c.WillMessage.QoS + v.WillTopic = c.WillMessage.Topic + v.WillRetain = c.WillMessage.Retain + v.WillMessage = c.WillMessage.Payload + if c.WillProperties != nil { + v.WillProperties = &packets.Properties{ + WillDelayInterval: c.WillProperties.WillDelayInterval, + PayloadFormat: c.WillProperties.PayloadFormat, + MessageExpiry: c.WillProperties.MessageExpiry, + ContentType: c.WillProperties.ContentType, + ResponseTopic: c.WillProperties.ResponseTopic, + CorrelationData: c.WillProperties.CorrelationData, + User: c.WillProperties.User.ToPacketProperties(), + } + } + } + + return v +} + +type ( + // WillMessage is a representation of the LWT message that can + // be sent with the Connect packet + WillMessage struct { + Retain bool + QoS byte + Topic string + Payload []byte + } + + // WillProperties is a struct of the properties that can be set + // for a Will in a Connect packet + WillProperties struct { + WillDelayInterval *uint32 + PayloadFormat *byte + MessageExpiry *uint32 + ContentType string + ResponseTopic string + CorrelationData []byte + User UserProperties + } +) diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_disconnect.go b/vendor/github.com/eclipse/paho.golang/paho/cp_disconnect.go new file mode 100644 index 000000000..5caa85b14 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_disconnect.go @@ -0,0 +1,58 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Disconnect is a representation of the MQTT Disconnect packet + Disconnect struct { + Properties *DisconnectProperties + ReasonCode byte + } + + // DisconnectProperties is a struct of the properties that can be set + // for a Disconnect packet + DisconnectProperties struct { + ServerReference string + ReasonString string + SessionExpiryInterval *uint32 + User UserProperties + } +) + +// InitProperties is a function that takes a lower level +// Properties struct and completes the properties of the Disconnect on +// which it is called +func (d *Disconnect) InitProperties(p *packets.Properties) { + d.Properties = &DisconnectProperties{ + SessionExpiryInterval: p.SessionExpiryInterval, + ServerReference: p.ServerReference, + ReasonString: p.ReasonString, + User: UserPropertiesFromPacketUser(p.User), + } +} + +// DisconnectFromPacketDisconnect takes a packets library Disconnect and +// returns a paho library Disconnect +func DisconnectFromPacketDisconnect(p *packets.Disconnect) *Disconnect { + v := &Disconnect{ReasonCode: p.ReasonCode} + v.InitProperties(p.Properties) + + return v +} + +// Packet returns a packets library Disconnect from the paho Disconnect +// on which it is called +func (d *Disconnect) Packet() *packets.Disconnect { + v := &packets.Disconnect{ReasonCode: d.ReasonCode} + + if d.Properties != nil { + v.Properties = &packets.Properties{ + SessionExpiryInterval: d.Properties.SessionExpiryInterval, + ServerReference: d.Properties.ServerReference, + ReasonString: d.Properties.ReasonString, + User: d.Properties.User.ToPacketProperties(), + } + } + + return v +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_publish.go b/vendor/github.com/eclipse/paho.golang/paho/cp_publish.go new file mode 100644 index 000000000..1bb9654b3 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_publish.go @@ -0,0 +1,123 @@ +package paho + +import ( + "bytes" + "fmt" + + "github.com/eclipse/paho.golang/packets" +) + +type ( + // Publish is a representation of the MQTT Publish packet + Publish struct { + PacketID uint16 + QoS byte + Retain bool + Topic string + Properties *PublishProperties + Payload []byte + } + + // PublishProperties is a struct of the properties that can be set + // for a Publish packet + PublishProperties struct { + CorrelationData []byte + ContentType string + ResponseTopic string + PayloadFormat *byte + MessageExpiry *uint32 + SubscriptionIdentifier *int + TopicAlias *uint16 + User UserProperties + } +) + +// InitProperties is a function that takes a lower level +// Properties struct and completes the properties of the Publish on +// which it is called +func (p *Publish) InitProperties(prop *packets.Properties) { + p.Properties = &PublishProperties{ + PayloadFormat: prop.PayloadFormat, + MessageExpiry: prop.MessageExpiry, + ContentType: prop.ContentType, + ResponseTopic: prop.ResponseTopic, + CorrelationData: prop.CorrelationData, + TopicAlias: prop.TopicAlias, + SubscriptionIdentifier: prop.SubscriptionIdentifier, + User: UserPropertiesFromPacketUser(prop.User), + } +} + +// PublishFromPacketPublish takes a packets library Publish and +// returns a paho library Publish +func PublishFromPacketPublish(p *packets.Publish) *Publish { + v := &Publish{ + PacketID: p.PacketID, + QoS: p.QoS, + Retain: p.Retain, + Topic: p.Topic, + Payload: p.Payload, + } + v.InitProperties(p.Properties) + + return v +} + +// Packet returns a packets library Publish from the paho Publish +// on which it is called +func (p *Publish) Packet() *packets.Publish { + v := &packets.Publish{ + PacketID: p.PacketID, + QoS: p.QoS, + Retain: p.Retain, + Topic: p.Topic, + Payload: p.Payload, + } + if p.Properties != nil { + v.Properties = &packets.Properties{ + PayloadFormat: p.Properties.PayloadFormat, + MessageExpiry: p.Properties.MessageExpiry, + ContentType: p.Properties.ContentType, + ResponseTopic: p.Properties.ResponseTopic, + CorrelationData: p.Properties.CorrelationData, + TopicAlias: p.Properties.TopicAlias, + SubscriptionIdentifier: p.Properties.SubscriptionIdentifier, + User: p.Properties.User.ToPacketProperties(), + } + } + + return v +} + +func (p *Publish) String() string { + var b bytes.Buffer + + fmt.Fprintf(&b, "topic: %s qos: %d retain: %t\n", p.Topic, p.QoS, p.Retain) + if p.Properties.PayloadFormat != nil { + fmt.Fprintf(&b, "PayloadFormat: %v\n", p.Properties.PayloadFormat) + } + if p.Properties.MessageExpiry != nil { + fmt.Fprintf(&b, "MessageExpiry: %v\n", p.Properties.MessageExpiry) + } + if p.Properties.ContentType != "" { + fmt.Fprintf(&b, "ContentType: %v\n", p.Properties.ContentType) + } + if p.Properties.ResponseTopic != "" { + fmt.Fprintf(&b, "ResponseTopic: %v\n", p.Properties.ResponseTopic) + } + if p.Properties.CorrelationData != nil { + fmt.Fprintf(&b, "CorrelationData: %v\n", p.Properties.CorrelationData) + } + if p.Properties.TopicAlias != nil { + fmt.Fprintf(&b, "TopicAlias: %d\n", p.Properties.TopicAlias) + } + if p.Properties.SubscriptionIdentifier != nil { + fmt.Fprintf(&b, "SubscriptionIdentifier: %v\n", p.Properties.SubscriptionIdentifier) + } + for _, v := range p.Properties.User { + fmt.Fprintf(&b, "User: %s : %s\n", v.Key, v.Value) + } + b.WriteString(string(p.Payload)) + + return b.String() +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_pubresp.go b/vendor/github.com/eclipse/paho.golang/paho/cp_pubresp.go new file mode 100644 index 000000000..0c4e174aa --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_pubresp.go @@ -0,0 +1,55 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // PublishResponse is a generic representation of a response + // to a QoS1 or QoS2 Publish + PublishResponse struct { + Properties *PublishResponseProperties + ReasonCode byte + } + + // PublishResponseProperties is the properties associated with + // a response to a QoS1 or QoS2 Publish + PublishResponseProperties struct { + ReasonString string + User UserProperties + } +) + +// PublishResponseFromPuback takes a packets library Puback and +// returns a paho library PublishResponse +func PublishResponseFromPuback(pa *packets.Puback) *PublishResponse { + return &PublishResponse{ + ReasonCode: pa.ReasonCode, + Properties: &PublishResponseProperties{ + ReasonString: pa.Properties.ReasonString, + User: UserPropertiesFromPacketUser(pa.Properties.User), + }, + } +} + +// PublishResponseFromPubcomp takes a packets library Pubcomp and +// returns a paho library PublishResponse +func PublishResponseFromPubcomp(pc *packets.Pubcomp) *PublishResponse { + return &PublishResponse{ + ReasonCode: pc.ReasonCode, + Properties: &PublishResponseProperties{ + ReasonString: pc.Properties.ReasonString, + User: UserPropertiesFromPacketUser(pc.Properties.User), + }, + } +} + +// PublishResponseFromPubrec takes a packets library Pubrec and +// returns a paho library PublishResponse +func PublishResponseFromPubrec(pr *packets.Pubrec) *PublishResponse { + return &PublishResponse{ + ReasonCode: pr.ReasonCode, + Properties: &PublishResponseProperties{ + ReasonString: pr.Properties.ReasonString, + User: UserPropertiesFromPacketUser(pr.Properties.User), + }, + } +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_suback.go b/vendor/github.com/eclipse/paho.golang/paho/cp_suback.go new file mode 100644 index 000000000..c1034c26c --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_suback.go @@ -0,0 +1,41 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Suback is a representation of an MQTT suback packet + Suback struct { + Properties *SubackProperties + Reasons []byte + } + + // SubackProperties is a struct of the properties that can be set + // for a Suback packet + SubackProperties struct { + ReasonString string + User UserProperties + } +) + +// Packet returns a packets library Suback from the paho Suback +// on which it is called +func (s *Suback) Packet() *packets.Suback { + return &packets.Suback{ + Reasons: s.Reasons, + Properties: &packets.Properties{ + User: s.Properties.User.ToPacketProperties(), + }, + } +} + +// SubackFromPacketSuback takes a packets library Suback and +// returns a paho library Suback +func SubackFromPacketSuback(s *packets.Suback) *Suback { + return &Suback{ + Reasons: s.Reasons, + Properties: &SubackProperties{ + ReasonString: s.Properties.ReasonString, + User: UserPropertiesFromPacketUser(s.Properties.User), + }, + } +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_subscribe.go b/vendor/github.com/eclipse/paho.golang/paho/cp_subscribe.go new file mode 100644 index 000000000..e111f0cf6 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_subscribe.go @@ -0,0 +1,67 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Subscribe is a representation of a MQTT subscribe packet + Subscribe struct { + Properties *SubscribeProperties + Subscriptions map[string]SubscribeOptions + } + + // SubscribeOptions is the struct representing the options for a subscription + SubscribeOptions struct { + QoS byte + RetainHandling byte + NoLocal bool + RetainAsPublished bool + } +) + +// SubscribeProperties is a struct of the properties that can be set +// for a Subscribe packet +type SubscribeProperties struct { + SubscriptionIdentifier *int + User UserProperties +} + +// InitProperties is a function that takes a packet library +// Properties struct and completes the properties of the Subscribe on +// which it is called +func (s *Subscribe) InitProperties(prop *packets.Properties) { + s.Properties = &SubscribeProperties{ + SubscriptionIdentifier: prop.SubscriptionIdentifier, + User: UserPropertiesFromPacketUser(prop.User), + } +} + +// PacketSubOptionsFromSubscribeOptions returns a map of string to packet +// library SubOptions for the paho Subscribe on which it is called +func (s *Subscribe) PacketSubOptionsFromSubscribeOptions() map[string]packets.SubOptions { + r := make(map[string]packets.SubOptions) + for k, v := range s.Subscriptions { + r[k] = packets.SubOptions{ + QoS: v.QoS, + NoLocal: v.NoLocal, + RetainAsPublished: v.RetainAsPublished, + RetainHandling: v.RetainHandling, + } + } + + return r +} + +// Packet returns a packets library Subscribe from the paho Subscribe +// on which it is called +func (s *Subscribe) Packet() *packets.Subscribe { + v := &packets.Subscribe{Subscriptions: s.PacketSubOptionsFromSubscribeOptions()} + + if s.Properties != nil { + v.Properties = &packets.Properties{ + SubscriptionIdentifier: s.Properties.SubscriptionIdentifier, + User: s.Properties.User.ToPacketProperties(), + } + } + + return v +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_unsuback.go b/vendor/github.com/eclipse/paho.golang/paho/cp_unsuback.go new file mode 100644 index 000000000..15ca83885 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_unsuback.go @@ -0,0 +1,41 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Unsuback is a representation of an MQTT Unsuback packet + Unsuback struct { + Reasons []byte + Properties *UnsubackProperties + } + + // UnsubackProperties is a struct of the properties that can be set + // for a Unsuback packet + UnsubackProperties struct { + ReasonString string + User UserProperties + } +) + +// Packet returns a packets library Unsuback from the paho Unsuback +// on which it is called +func (u *Unsuback) Packet() *packets.Unsuback { + return &packets.Unsuback{ + Reasons: u.Reasons, + Properties: &packets.Properties{ + User: u.Properties.User.ToPacketProperties(), + }, + } +} + +// UnsubackFromPacketUnsuback takes a packets library Unsuback and +// returns a paho library Unsuback +func UnsubackFromPacketUnsuback(u *packets.Unsuback) *Unsuback { + return &Unsuback{ + Reasons: u.Reasons, + Properties: &UnsubackProperties{ + ReasonString: u.Properties.ReasonString, + User: UserPropertiesFromPacketUser(u.Properties.User), + }, + } +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_unsubscribe.go b/vendor/github.com/eclipse/paho.golang/paho/cp_unsubscribe.go new file mode 100644 index 000000000..375b917c8 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_unsubscribe.go @@ -0,0 +1,31 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Unsubscribe is a representation of an MQTT unsubscribe packet + Unsubscribe struct { + Topics []string + Properties *UnsubscribeProperties + } + + // UnsubscribeProperties is a struct of the properties that can be set + // for a Unsubscribe packet + UnsubscribeProperties struct { + User UserProperties + } +) + +// Packet returns a packets library Unsubscribe from the paho Unsubscribe +// on which it is called +func (u *Unsubscribe) Packet() *packets.Unsubscribe { + v := &packets.Unsubscribe{Topics: u.Topics} + + if u.Properties != nil { + v.Properties = &packets.Properties{ + User: u.Properties.User.ToPacketProperties(), + } + } + + return v +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_utils.go b/vendor/github.com/eclipse/paho.golang/paho/cp_utils.go new file mode 100644 index 000000000..2d7995f5c --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_utils.go @@ -0,0 +1,100 @@ +package paho + +import ( + "github.com/eclipse/paho.golang/packets" +) + +// UserProperty is a struct for the user provided values +// permitted in the properties section +type UserProperty struct { + Key, Value string +} + +// UserProperties is a slice of UserProperty +type UserProperties []UserProperty + +// Add is a helper function for easily adding a new user property +func (u *UserProperties) Add(key, value string) *UserProperties { + *u = append(*u, UserProperty{key, value}) + + return u +} + +// Get returns the first entry in the UserProperties that matches +// key, or an empty string if the key is not found. Note that it is +// permitted to have multiple entries with the same key, use GetAll +// if it is expected to have multiple matches +func (u UserProperties) Get(key string) string { + for _, v := range u { + if v.Key == key { + return v.Value + } + } + + return "" +} + +// GetAll returns a slice of all entries in the UserProperties +// that match key, or a nil slice if none were found. +func (u UserProperties) GetAll(key string) []string { + var ret []string + for _, v := range u { + if v.Key == key { + ret = append(ret, v.Value) + } + } + + return ret +} + +// ToPacketProperties converts a UserProperties to a slice +// of packets.User which is used internally in the packets +// library for user properties +func (u UserProperties) ToPacketProperties() []packets.User { + ret := make([]packets.User, len(u)) + for i, v := range u { + ret[i] = packets.User{Key: v.Key, Value: v.Value} + } + + return ret +} + +// UserPropertiesFromPacketUser converts a slice of packets.User +// to an instance of UserProperties for easier consumption within +// the client library +func UserPropertiesFromPacketUser(up []packets.User) UserProperties { + ret := make(UserProperties, len(up)) + for i, v := range up { + ret[i] = UserProperty{v.Key, v.Value} + } + + return ret +} + +// Byte is a helper function that take a byte and returns +// a pointer to a byte of that value +func Byte(b byte) *byte { + return &b +} + +// Uint32 is a helper function that take a uint32 and returns +// a pointer to a uint32 of that value +func Uint32(u uint32) *uint32 { + return &u +} + +// Uint16 is a helper function that take a uint16 and returns +// a pointer to a uint16 of that value +func Uint16(u uint16) *uint16 { + return &u +} + +// BoolToByte is a helper function that take a bool and returns +// a pointer to a byte of value 1 if true or 0 if false +func BoolToByte(b bool) *byte { + var v byte + if b { + v = 1 + } + return &v +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/message_ids.go b/vendor/github.com/eclipse/paho.golang/paho/message_ids.go new file mode 100644 index 000000000..58b03e324 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/message_ids.go @@ -0,0 +1,93 @@ +package paho + +import ( + "context" + "errors" + "sync" + + "github.com/eclipse/paho.golang/packets" +) + +const ( + midMin uint16 = 1 + midMax uint16 = 65535 +) + +// ErrorMidsExhausted is returned from Request() when there are no +// free message ids to be used. +var ErrorMidsExhausted = errors.New("all message ids in use") + +// MIDService defines the interface for a struct that handles the +// relationship between message ids and CPContexts +// Request() takes a *CPContext and returns a uint16 that is the +// messageid that should be used by the code that called Request() +// Get() takes a uint16 that is a messageid and returns the matching +// *CPContext that the MIDService has associated with that messageid +// Free() takes a uint16 that is a messageid and instructs the MIDService +// to mark that messageid as available for reuse +// Clear() resets the internal state of the MIDService +type MIDService interface { + Request(*CPContext) (uint16, error) + Get(uint16) *CPContext + Free(uint16) + Clear() +} + +// CPContext is the struct that is used to return responses to +// ControlPackets that have them, eg: the suback to a subscribe. +// The response packet is send down the Return channel and the +// Context is used to track timeouts. +type CPContext struct { + Context context.Context + Return chan packets.ControlPacket +} + +// MIDs is the default MIDService provided by this library. +// It uses a map of uint16 to *CPContext to track responses +// to messages with a messageid +type MIDs struct { + sync.Mutex + lastMid uint16 + index []*CPContext +} + +// Request is the library provided MIDService's implementation of +// the required interface function() +func (m *MIDs) Request(c *CPContext) (uint16, error) { + m.Lock() + defer m.Unlock() + for i := uint16(1); i < midMax; i++ { + v := (m.lastMid + i) % midMax + if v == 0 { + continue + } + if inuse := m.index[v]; inuse == nil { + m.index[v] = c + m.lastMid = v + return v, nil + } + } + return 0, ErrorMidsExhausted +} + +// Get is the library provided MIDService's implementation of +// the required interface function() +func (m *MIDs) Get(i uint16) *CPContext { + m.Lock() + defer m.Unlock() + return m.index[i] +} + +// Free is the library provided MIDService's implementation of +// the required interface function() +func (m *MIDs) Free(i uint16) { + m.Lock() + m.index[i] = nil + m.Unlock() +} + +// Clear is the library provided MIDService's implementation of +// the required interface function() +func (m *MIDs) Clear() { + m.index = make([]*CPContext, int(midMax)) +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/noop_persistence.go b/vendor/github.com/eclipse/paho.golang/paho/noop_persistence.go new file mode 100644 index 000000000..d2d15704f --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/noop_persistence.go @@ -0,0 +1,23 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type noopPersistence struct{} + +func (n *noopPersistence) Open() {} + +func (n *noopPersistence) Put(id uint16, cp packets.ControlPacket) {} + +func (n *noopPersistence) Get(id uint16) packets.ControlPacket { + return packets.ControlPacket{} +} + +func (n *noopPersistence) All() []packets.ControlPacket { + return nil +} + +func (n *noopPersistence) Delete(id uint16) {} + +func (n *noopPersistence) Close() {} + +func (n *noopPersistence) Reset() {} diff --git a/vendor/github.com/eclipse/paho.golang/paho/persistence.go b/vendor/github.com/eclipse/paho.golang/paho/persistence.go new file mode 100644 index 000000000..f02b846cc --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/persistence.go @@ -0,0 +1,98 @@ +package paho + +import ( + "sync" + + "github.com/eclipse/paho.golang/packets" +) + +// Persistence is an interface of the functions for a struct +// that is used to persist ControlPackets. +// Open() is an initialiser to prepare the Persistence for use +// Put() takes a uint16 which is a messageid and a ControlPacket +// to persist against that messageid +// Get() takes a uint16 which is a messageid and returns the +// persisted ControlPacket from the Persistence for that messageid +// All() returns a slice of all ControlPackets persisted +// Delete() takes a uint16 which is a messageid and deletes the +// associated stored ControlPacket from the Persistence +// Close() closes the Persistence +// Reset() clears the Persistence and prepares it to be reused +type Persistence interface { + Open() + Put(uint16, packets.ControlPacket) + Get(uint16) packets.ControlPacket + All() []packets.ControlPacket + Delete(uint16) + Close() + Reset() +} + +// MemoryPersistence is an implementation of a Persistence +// that stores the ControlPackets in memory using a map +type MemoryPersistence struct { + sync.RWMutex + packets map[uint16]packets.ControlPacket +} + +// Open is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Open() { + m.Lock() + m.packets = make(map[uint16]packets.ControlPacket) + m.Unlock() +} + +// Put is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Put(id uint16, cp packets.ControlPacket) { + m.Lock() + m.packets[id] = cp + m.Unlock() +} + +// Get is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Get(id uint16) packets.ControlPacket { + m.RLock() + defer m.RUnlock() + return m.packets[id] +} + +// All is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) All() []packets.ControlPacket { + m.Lock() + defer m.RUnlock() + ret := make([]packets.ControlPacket, len(m.packets)) + + for _, cp := range m.packets { + ret = append(ret, cp) + } + + return ret +} + +// Delete is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Delete(id uint16) { + m.Lock() + delete(m.packets, id) + m.Unlock() +} + +// Close is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Close() { + m.Lock() + m.packets = nil + m.Unlock() +} + +// Reset is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Reset() { + m.Lock() + m.packets = make(map[uint16]packets.ControlPacket) + m.Unlock() +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/pinger.go b/vendor/github.com/eclipse/paho.golang/paho/pinger.go new file mode 100644 index 000000000..e135d25ac --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/pinger.go @@ -0,0 +1,122 @@ +package paho + +import ( + "fmt" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/eclipse/paho.golang/packets" +) + +// PingFailHandler is a type for the function that is invoked +// when we have sent a Pingreq to the server and not received +// a Pingresp within 1.5x our pingtimeout +type PingFailHandler func(error) + +// Pinger is an interface of the functions for a struct that is +// used to manage sending PingRequests and responding to +// PingResponses +// Start() takes a net.Conn which is a connection over which an +// MQTT session has already been established, and a time.Duration +// of the keepalive setting passed to the server when the MQTT +// session was established. +// Stop() is used to stop the Pinger +// PingResp() is the function that is called by the Client when +// a PingResponse is received +// SetDebug() is used to pass in a Logger to be used to log debug +// information, for example sharing a logger with the main client +type Pinger interface { + Start(net.Conn, time.Duration) + Stop() + PingResp() + SetDebug(Logger) +} + +// PingHandler is the library provided default Pinger +type PingHandler struct { + mu sync.Mutex + lastPing time.Time + conn net.Conn + stop chan struct{} + pingFailHandler PingFailHandler + pingOutstanding int32 + debug Logger +} + +// DefaultPingerWithCustomFailHandler returns an instance of the +// default Pinger but with a custom PingFailHandler that is called +// when the client has not received a response to a PingRequest +// within the appropriate amount of time +func DefaultPingerWithCustomFailHandler(pfh PingFailHandler) *PingHandler { + return &PingHandler{ + pingFailHandler: pfh, + debug: NOOPLogger{}, + } +} + +// Start is the library provided Pinger's implementation of +// the required interface function() +func (p *PingHandler) Start(c net.Conn, pt time.Duration) { + p.mu.Lock() + p.conn = c + p.stop = make(chan struct{}) + p.mu.Unlock() + checkTicker := time.NewTicker(pt / 4) + defer checkTicker.Stop() + for { + select { + case <-p.stop: + return + case <-checkTicker.C: + if atomic.LoadInt32(&p.pingOutstanding) > 0 && time.Since(p.lastPing) > (pt+pt>>1) { + p.pingFailHandler(fmt.Errorf("ping resp timed out")) + //ping outstanding and not reset in 1.5 times ping timer + return + } + if time.Since(p.lastPing) >= pt { + //time to send a ping + if _, err := packets.NewControlPacket(packets.PINGREQ).WriteTo(p.conn); err != nil { + if p.pingFailHandler != nil { + p.pingFailHandler(err) + } + return + } + atomic.AddInt32(&p.pingOutstanding, 1) + p.lastPing = time.Now() + p.debug.Println("pingHandler sending ping request") + } + } + } +} + +// Stop is the library provided Pinger's implementation of +// the required interface function() +func (p *PingHandler) Stop() { + p.mu.Lock() + defer p.mu.Unlock() + if p.stop == nil { + return + } + p.debug.Println("pingHandler stopping") + select { + case <-p.stop: + //Already stopped, do nothing + default: + close(p.stop) + } +} + +// PingResp is the library provided Pinger's implementation of +// the required interface function() +func (p *PingHandler) PingResp() { + p.debug.Println("pingHandler resetting pingOutstanding") + atomic.StoreInt32(&p.pingOutstanding, 0) +} + +// SetDebug sets the logger l to be used for printing debug +// information for the pinger +func (p *PingHandler) SetDebug(l Logger) { + p.debug = l +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/router.go b/vendor/github.com/eclipse/paho.golang/paho/router.go new file mode 100644 index 000000000..05031596f --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/router.go @@ -0,0 +1,212 @@ +package paho + +import ( + "strings" + "sync" + + "github.com/eclipse/paho.golang/packets" +) + +// MessageHandler is a type for a function that is invoked +// by a Router when it has received a Publish. +type MessageHandler func(*Publish) + +// Router is an interface of the functions for a struct that is +// used to handle invoking MessageHandlers depending on the +// the topic the message was published on. +// RegisterHandler() takes a string of the topic, and a MessageHandler +// to be invoked when Publishes are received that match that topic +// UnregisterHandler() takes a string of the topic to remove +// MessageHandlers for +// Route() takes a Publish message and determines which MessageHandlers +// should be invoked +type Router interface { + RegisterHandler(string, MessageHandler) + UnregisterHandler(string) + Route(*packets.Publish) + SetDebugLogger(Logger) +} + +// StandardRouter is a library provided implementation of a Router that +// allows for unique and multiple MessageHandlers per topic +type StandardRouter struct { + sync.RWMutex + subscriptions map[string][]MessageHandler + aliases map[uint16]string + debug Logger +} + +// NewStandardRouter instantiates and returns an instance of a StandardRouter +func NewStandardRouter() *StandardRouter { + return &StandardRouter{ + subscriptions: make(map[string][]MessageHandler), + aliases: make(map[uint16]string), + debug: NOOPLogger{}, + } +} + +// RegisterHandler is the library provided StandardRouter's +// implementation of the required interface function() +func (r *StandardRouter) RegisterHandler(topic string, h MessageHandler) { + r.debug.Println("registering handler for:", topic) + r.Lock() + defer r.Unlock() + + r.subscriptions[topic] = append(r.subscriptions[topic], h) +} + +// UnregisterHandler is the library provided StandardRouter's +// implementation of the required interface function() +func (r *StandardRouter) UnregisterHandler(topic string) { + r.debug.Println("unregistering handler for:", topic) + r.Lock() + defer r.Unlock() + + delete(r.subscriptions, topic) +} + +// Route is the library provided StandardRouter's implementation +// of the required interface function() +func (r *StandardRouter) Route(pb *packets.Publish) { + r.debug.Println("routing message for:", pb.Topic) + r.RLock() + defer r.RUnlock() + + m := PublishFromPacketPublish(pb) + + var topic string + if pb.Properties.TopicAlias != nil { + r.debug.Println("message is using topic aliasing") + if pb.Topic != "" { + //Register new alias + r.debug.Printf("registering new topic alias '%d' for topic '%s'", *pb.Properties.TopicAlias, m.Topic) + r.aliases[*pb.Properties.TopicAlias] = pb.Topic + } + if t, ok := r.aliases[*pb.Properties.TopicAlias]; ok { + r.debug.Printf("aliased topic '%d' translates to '%s'", *pb.Properties.TopicAlias, m.Topic) + topic = t + } + } else { + topic = m.Topic + } + + for route, handlers := range r.subscriptions { + if match(route, topic) { + r.debug.Println("found handler for:", route) + for _, handler := range handlers { + handler(m) + } + } + } +} + +// SetDebugLogger sets the logger l to be used for printing debug +// information for the router +func (r *StandardRouter) SetDebugLogger(l Logger) { + r.debug = l +} + +func match(route, topic string) bool { + return route == topic || routeIncludesTopic(route, topic) +} + +func matchDeep(route []string, topic []string) bool { + if len(route) == 0 { + return len(topic) == 0 + } + + if len(topic) == 0 { + return route[0] == "#" + } + + if route[0] == "#" { + return true + } + + if (route[0] == "+") || (route[0] == topic[0]) { + return matchDeep(route[1:], topic[1:]) + } + return false +} + +func routeIncludesTopic(route, topic string) bool { + return matchDeep(routeSplit(route), topicSplit(topic)) +} + +func routeSplit(route string) []string { + if len(route) == 0 { + return nil + } + var result []string + if strings.HasPrefix(route, "$share") { + result = strings.Split(route, "/")[2:] + } else { + result = strings.Split(route, "/") + } + return result +} + +func topicSplit(topic string) []string { + if len(topic) == 0 { + return nil + } + return strings.Split(topic, "/") +} + +// SingleHandlerRouter is a library provided implementation of a Router +// that stores only a single MessageHandler and invokes this MessageHandler +// for all received Publishes +type SingleHandlerRouter struct { + sync.Mutex + aliases map[uint16]string + handler MessageHandler + debug Logger +} + +// NewSingleHandlerRouter instantiates and returns an instance of a SingleHandlerRouter +func NewSingleHandlerRouter(h MessageHandler) *SingleHandlerRouter { + return &SingleHandlerRouter{ + aliases: make(map[uint16]string), + handler: h, + debug: NOOPLogger{}, + } +} + +// RegisterHandler is the library provided SingleHandlerRouter's +// implementation of the required interface function() +func (s *SingleHandlerRouter) RegisterHandler(topic string, h MessageHandler) { + s.debug.Println("registering handler for:", topic) + s.handler = h +} + +// UnregisterHandler is the library provided SingleHandlerRouter's +// implementation of the required interface function() +func (s *SingleHandlerRouter) UnregisterHandler(topic string) {} + +// Route is the library provided SingleHandlerRouter's +// implementation of the required interface function() +func (s *SingleHandlerRouter) Route(pb *packets.Publish) { + m := PublishFromPacketPublish(pb) + + s.debug.Println("routing message for:", m.Topic) + + if pb.Properties.TopicAlias != nil { + s.debug.Println("message is using topic aliasing") + if pb.Topic != "" { + //Register new alias + s.debug.Printf("registering new topic alias '%d' for topic '%s'", *pb.Properties.TopicAlias, m.Topic) + s.aliases[*pb.Properties.TopicAlias] = pb.Topic + } + if t, ok := s.aliases[*pb.Properties.TopicAlias]; ok { + s.debug.Printf("aliased topic '%d' translates to '%s'", *pb.Properties.TopicAlias, m.Topic) + m.Topic = t + } + } + s.handler(m) +} + +// SetDebugLogger sets the logger l to be used for printing debug +// information for the router +func (s *SingleHandlerRouter) SetDebugLogger(l Logger) { + s.debug = l +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/trace.go b/vendor/github.com/eclipse/paho.golang/paho/trace.go new file mode 100644 index 000000000..586c92398 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/trace.go @@ -0,0 +1,22 @@ +package paho + +type ( + // Logger interface allows implementations to provide to this package any + // object that implements the methods defined in it. + Logger interface { + Println(v ...interface{}) + Printf(format string, v ...interface{}) + } + + // NOOPLogger implements the logger that does not perform any operation + // by default. This allows us to efficiently discard the unwanted messages. + NOOPLogger struct{} +) + +// Println is the library provided NOOPLogger's +// implementation of the required interface function() +func (NOOPLogger) Println(v ...interface{}) {} + +// Printf is the library provided NOOPLogger's +// implementation of the required interface function(){} +func (NOOPLogger) Printf(format string, v ...interface{}) {} diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go new file mode 100644 index 000000000..16686a655 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -0,0 +1,62 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/empty/empty.proto + +package empty + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/empty.proto. + +type Empty = emptypb.Empty + +var File_github_com_golang_protobuf_ptypes_empty_empty_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = []byte{ + 0x0a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x3b, 0x65, 0x6d, + 0x70, 0x74, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() } +func file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() { + if File_github_com_golang_protobuf_ptypes_empty_empty_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_empty_empty_proto = out.File + file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = nil +} diff --git a/vendor/github.com/gorilla/websocket/.editorconfig b/vendor/github.com/gorilla/websocket/.editorconfig new file mode 100644 index 000000000..2940ec92a --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.editorconfig @@ -0,0 +1,20 @@ +; https://editorconfig.org/ + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = space +indent_size = 2 + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +eclint_indent_style = unset diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore new file mode 100644 index 000000000..84039fec6 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.gitignore @@ -0,0 +1 @@ +coverage.coverprofile diff --git a/vendor/github.com/gorilla/websocket/.golangci.yml b/vendor/github.com/gorilla/websocket/.golangci.yml new file mode 100644 index 000000000..34882139e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.golangci.yml @@ -0,0 +1,3 @@ +run: + skip-dirs: + - examples/*.go diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE new file mode 100644 index 000000000..bb9d80bc9 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2023 The Gorilla Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/Makefile b/vendor/github.com/gorilla/websocket/Makefile new file mode 100644 index 000000000..603a63f50 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/Makefile @@ -0,0 +1,34 @@ +GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') +GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +GO_SEC=$(shell which gosec 2> /dev/null || echo '') +GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest + +GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') +GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest + +.PHONY: golangci-lint +golangci-lint: + $(if $(GO_LINT), ,go install $(GO_LINT_URI)) + @echo "##### Running golangci-lint" + golangci-lint run -v + +.PHONY: gosec +gosec: + $(if $(GO_SEC), ,go install $(GO_SEC_URI)) + @echo "##### Running gosec" + gosec -exclude-dir examples ./... + +.PHONY: govulncheck +govulncheck: + $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) + @echo "##### Running govulncheck" + govulncheck ./... + +.PHONY: verify +verify: golangci-lint gosec govulncheck + +.PHONY: test +test: + @echo "##### Running tests" + go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md new file mode 100644 index 000000000..1fd5e9c4e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/README.md @@ -0,0 +1,36 @@ +# gorilla/websocket + +![testing](https://github.com/gorilla/websocket/actions/workflows/test.yml/badge.svg) +[![codecov](https://codecov.io/github/gorilla/websocket/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/websocket) +[![godoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) +[![sourcegraph](https://sourcegraph.com/github.com/gorilla/websocket/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/websocket?badge) + +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. + +![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) + + +### Documentation + +* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc) +* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) +* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) +* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) +* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) +* [Write buffer pool example](https://github.com/gorilla/websocket/tree/master/examples/bufferpool) + +### Status + +The Gorilla WebSocket package provides a complete and tested implementation of +the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The +package API is stable. + +### Installation + + go get github.com/gorilla/websocket + +### Protocol Compliance + +The Gorilla WebSocket package passes the server tests in the [Autobahn Test +Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn +subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go new file mode 100644 index 000000000..815b0ca5c --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client.go @@ -0,0 +1,444 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "log" + + "net" + "net/http" + "net/http/httptrace" + "net/url" + "strings" + "time" + + "golang.org/x/net/proxy" +) + +// ErrBadHandshake is returned when the server response to opening handshake is +// invalid. +var ErrBadHandshake = errors.New("websocket: bad handshake") + +var errInvalidCompression = errors.New("websocket: invalid compression negotiation") + +// NewClient creates a new client connection using the given net connection. +// The URL u specifies the host and request URI. Use requestHeader to specify +// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies +// (Cookie). Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etc. +// +// Deprecated: Use Dialer instead. +func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { + d := Dialer{ + ReadBufferSize: readBufSize, + WriteBufferSize: writeBufSize, + NetDial: func(net, addr string) (net.Conn, error) { + return netConn, nil + }, + } + return d.Dial(u.String(), requestHeader) +} + +// A Dialer contains options for connecting to WebSocket server. +// +// It is safe to call Dialer's methods concurrently. +type Dialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // NetDialContext specifies the dial function for creating TCP connections. If + // NetDialContext is nil, NetDial is used. + NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error) + + // NetDialTLSContext specifies the dial function for creating TLS/TCP connections. If + // NetDialTLSContext is nil, NetDialContext is used. + // If NetDialTLSContext is set, Dial assumes the TLS handshake is done there and + // TLSClientConfig is ignored. + NetDialTLSContext func(ctx context.Context, network, addr string) (net.Conn, error) + + // Proxy specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxy func(*http.Request) (*url.URL, error) + + // TLSClientConfig specifies the TLS configuration to use with tls.Client. + // If nil, the default configuration is used. + // If either NetDialTLS or NetDialTLSContext are set, Dial assumes the TLS handshake + // is done there and TLSClientConfig is ignored. + TLSClientConfig *tls.Config + + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer + // size is zero, then a useful default size is used. The I/O buffer sizes + // do not limit the size of the messages that can be sent or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the client's requested subprotocols. + Subprotocols []string + + // EnableCompression specifies if the client should attempt to negotiate + // per message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool + + // Jar specifies the cookie jar. + // If Jar is nil, cookies are not sent in requests and ignored + // in responses. + Jar http.CookieJar +} + +// Dial creates a new client connection by calling DialContext with a background context. +func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + return d.DialContext(context.Background(), urlStr, requestHeader) +} + +var errMalformedURL = errors.New("malformed ws or wss URL") + +func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { + hostPort = u.Host + hostNoPort = u.Host + if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { + hostNoPort = hostNoPort[:i] + } else { + switch u.Scheme { + case "wss": + hostPort += ":443" + case "https": + hostPort += ":443" + default: + hostPort += ":80" + } + } + return hostPort, hostNoPort +} + +// DefaultDialer is a dialer with all fields set to the default values. +var DefaultDialer = &Dialer{ + Proxy: http.ProxyFromEnvironment, + HandshakeTimeout: 45 * time.Second, +} + +// nilDialer is dialer to use when receiver is nil. +var nilDialer = *DefaultDialer + +// DialContext creates a new client connection. Use requestHeader to specify the +// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). +// Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// The context will be used in the request and in the Dialer. +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etcetera. The response body may not contain the entire response and does not +// need to be closed by the application. +func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + if d == nil { + d = &nilDialer + } + + challengeKey, err := generateChallengeKey() + if err != nil { + return nil, nil, err + } + + u, err := url.Parse(urlStr) + if err != nil { + return nil, nil, err + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + default: + return nil, nil, errMalformedURL + } + + if u.User != nil { + // User name and password are not allowed in websocket URIs. + return nil, nil, errMalformedURL + } + + req := &http.Request{ + Method: http.MethodGet, + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: u.Host, + } + req = req.WithContext(ctx) + + // Set the cookies present in the cookie jar of the dialer + if d.Jar != nil { + for _, cookie := range d.Jar.Cookies(u) { + req.AddCookie(cookie) + } + } + + // Set the request headers using the capitalization for names and values in + // RFC examples. Although the capitalization shouldn't matter, there are + // servers that depend on it. The Header.Set method is not used because the + // method canonicalizes the header names. + req.Header["Upgrade"] = []string{"websocket"} + req.Header["Connection"] = []string{"Upgrade"} + req.Header["Sec-WebSocket-Key"] = []string{challengeKey} + req.Header["Sec-WebSocket-Version"] = []string{"13"} + if len(d.Subprotocols) > 0 { + req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} + } + for k, vs := range requestHeader { + switch { + case k == "Host": + if len(vs) > 0 { + req.Host = vs[0] + } + case k == "Upgrade" || + k == "Connection" || + k == "Sec-Websocket-Key" || + k == "Sec-Websocket-Version" || + //#nosec G101 (CWE-798): Potential HTTP request smuggling via parameter pollution + k == "Sec-Websocket-Extensions" || + (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): + return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) + case k == "Sec-Websocket-Protocol": + req.Header["Sec-WebSocket-Protocol"] = vs + default: + req.Header[k] = vs + } + } + + if d.EnableCompression { + req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"} + } + + if d.HandshakeTimeout != 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout) + defer cancel() + } + + // Get network dial function. + var netDial func(network, add string) (net.Conn, error) + + switch u.Scheme { + case "http": + if d.NetDialContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialContext(ctx, network, addr) + } + } else if d.NetDial != nil { + netDial = d.NetDial + } + case "https": + if d.NetDialTLSContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialTLSContext(ctx, network, addr) + } + } else if d.NetDialContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialContext(ctx, network, addr) + } + } else if d.NetDial != nil { + netDial = d.NetDial + } + default: + return nil, nil, errMalformedURL + } + + if netDial == nil { + netDialer := &net.Dialer{} + netDial = func(network, addr string) (net.Conn, error) { + return netDialer.DialContext(ctx, network, addr) + } + } + + // If needed, wrap the dial function to set the connection deadline. + if deadline, ok := ctx.Deadline(); ok { + forwardDial := netDial + netDial = func(network, addr string) (net.Conn, error) { + c, err := forwardDial(network, addr) + if err != nil { + return nil, err + } + err = c.SetDeadline(deadline) + if err != nil { + if err := c.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } + return c, nil + } + } + + // If needed, wrap the dial function to connect through a proxy. + if d.Proxy != nil { + proxyURL, err := d.Proxy(req) + if err != nil { + return nil, nil, err + } + if proxyURL != nil { + dialer, err := proxy.FromURL(proxyURL, netDialerFunc(netDial)) + if err != nil { + return nil, nil, err + } + netDial = dialer.Dial + } + } + + hostPort, hostNoPort := hostPortNoPort(u) + trace := httptrace.ContextClientTrace(ctx) + if trace != nil && trace.GetConn != nil { + trace.GetConn(hostPort) + } + + netConn, err := netDial("tcp", hostPort) + if err != nil { + return nil, nil, err + } + if trace != nil && trace.GotConn != nil { + trace.GotConn(httptrace.GotConnInfo{ + Conn: netConn, + }) + } + + defer func() { + if netConn != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + } + }() + + if u.Scheme == "https" && d.NetDialTLSContext == nil { + // If NetDialTLSContext is set, assume that the TLS handshake has already been done + + cfg := cloneTLSConfig(d.TLSClientConfig) + if cfg.ServerName == "" { + cfg.ServerName = hostNoPort + } + tlsConn := tls.Client(netConn, cfg) + netConn = tlsConn + + if trace != nil && trace.TLSHandshakeStart != nil { + trace.TLSHandshakeStart() + } + err := doHandshake(ctx, tlsConn, cfg) + if trace != nil && trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(tlsConn.ConnectionState(), err) + } + + if err != nil { + return nil, nil, err + } + } + + conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil) + + if err := req.Write(netConn); err != nil { + return nil, nil, err + } + + if trace != nil && trace.GotFirstResponseByte != nil { + if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 { + trace.GotFirstResponseByte() + } + } + + resp, err := http.ReadResponse(conn.br, req) + if err != nil { + if d.TLSClientConfig != nil { + for _, proto := range d.TLSClientConfig.NextProtos { + if proto != "http/1.1" { + return nil, nil, fmt.Errorf( + "websocket: protocol %q was given but is not supported;"+ + "sharing tls.Config with net/http Transport can cause this error: %w", + proto, err, + ) + } + } + } + return nil, nil, err + } + + if d.Jar != nil { + if rc := resp.Cookies(); len(rc) > 0 { + d.Jar.SetCookies(u, rc) + } + } + + if resp.StatusCode != 101 || + !tokenListContainsValue(resp.Header, "Upgrade", "websocket") || + !tokenListContainsValue(resp.Header, "Connection", "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { + // Before closing the network connection on return from this + // function, slurp up some of the response to aid application + // debugging. + buf := make([]byte, 1024) + n, _ := io.ReadFull(resp.Body, buf) + resp.Body = io.NopCloser(bytes.NewReader(buf[:n])) + return nil, resp, ErrBadHandshake + } + + for _, ext := range parseExtensions(resp.Header) { + if ext[""] != "permessage-deflate" { + continue + } + _, snct := ext["server_no_context_takeover"] + _, cnct := ext["client_no_context_takeover"] + if !snct || !cnct { + return nil, resp, errInvalidCompression + } + conn.newCompressionWriter = compressNoContextTakeover + conn.newDecompressionReader = decompressNoContextTakeover + break + } + + resp.Body = io.NopCloser(bytes.NewReader([]byte{})) + conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") + + if err := netConn.SetDeadline(time.Time{}); err != nil { + return nil, nil, err + } + netConn = nil // to avoid close in defer. + return conn, resp, nil +} + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{MinVersion: tls.VersionTLS12} + } + return cfg.Clone() +} diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go new file mode 100644 index 000000000..9fed0ef52 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -0,0 +1,153 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "compress/flate" + "errors" + "io" + "log" + "strings" + "sync" +) + +const ( + minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 + maxCompressionLevel = flate.BestCompression + defaultCompressionLevel = 1 +) + +var ( + flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool + flateReaderPool = sync.Pool{New: func() interface{} { + return flate.NewReader(nil) + }} +) + +func decompressNoContextTakeover(r io.Reader) io.ReadCloser { + const tail = + // Add four bytes as specified in RFC + "\x00\x00\xff\xff" + + // Add final block to squelch unexpected EOF error from flate reader. + "\x01\x00\x00\xff\xff" + + fr, _ := flateReaderPool.Get().(io.ReadCloser) + if err := fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil); err != nil { + panic(err) + } + return &flateReadWrapper{fr} +} + +func isValidCompressionLevel(level int) bool { + return minCompressionLevel <= level && level <= maxCompressionLevel +} + +func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { + p := &flateWriterPools[level-minCompressionLevel] + tw := &truncWriter{w: w} + fw, _ := p.Get().(*flate.Writer) + if fw == nil { + fw, _ = flate.NewWriter(tw, level) + } else { + fw.Reset(tw) + } + return &flateWriteWrapper{fw: fw, tw: tw, p: p} +} + +// truncWriter is an io.Writer that writes all but the last four bytes of the +// stream to another io.Writer. +type truncWriter struct { + w io.WriteCloser + n int + p [4]byte +} + +func (w *truncWriter) Write(p []byte) (int, error) { + n := 0 + + // fill buffer first for simplicity. + if w.n < len(w.p) { + n = copy(w.p[w.n:], p) + p = p[n:] + w.n += n + if len(p) == 0 { + return n, nil + } + } + + m := len(p) + if m > len(w.p) { + m = len(w.p) + } + + if nn, err := w.w.Write(w.p[:m]); err != nil { + return n + nn, err + } + + copy(w.p[:], w.p[m:]) + copy(w.p[len(w.p)-m:], p[len(p)-m:]) + nn, err := w.w.Write(p[:len(p)-m]) + return n + nn, err +} + +type flateWriteWrapper struct { + fw *flate.Writer + tw *truncWriter + p *sync.Pool +} + +func (w *flateWriteWrapper) Write(p []byte) (int, error) { + if w.fw == nil { + return 0, errWriteClosed + } + return w.fw.Write(p) +} + +func (w *flateWriteWrapper) Close() error { + if w.fw == nil { + return errWriteClosed + } + err1 := w.fw.Flush() + w.p.Put(w.fw) + w.fw = nil + if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { + return errors.New("websocket: internal error, unexpected bytes at end of flate stream") + } + err2 := w.tw.w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +type flateReadWrapper struct { + fr io.ReadCloser +} + +func (r *flateReadWrapper) Read(p []byte) (int, error) { + if r.fr == nil { + return 0, io.ErrClosedPipe + } + n, err := r.fr.Read(p) + if err == io.EOF { + // Preemptively place the reader back in the pool. This helps with + // scenarios where the application does not call NextReader() soon after + // this final read. + if err := r.Close(); err != nil { + log.Printf("websocket: flateReadWrapper.Close() returned error: %v", err) + } + } + return n, err +} + +func (r *flateReadWrapper) Close() error { + if r.fr == nil { + return io.ErrClosedPipe + } + err := r.fr.Close() + flateReaderPool.Put(r.fr) + r.fr = nil + return err +} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go new file mode 100644 index 000000000..221e6cf79 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -0,0 +1,1267 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "crypto/rand" + "encoding/binary" + "errors" + "io" + "log" + "net" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" +) + +const ( + // Frame header byte 0 bits from Section 5.2 of RFC 6455 + finalBit = 1 << 7 + rsv1Bit = 1 << 6 + rsv2Bit = 1 << 5 + rsv3Bit = 1 << 4 + + // Frame header byte 1 bits from Section 5.2 of RFC 6455 + maskBit = 1 << 7 + + maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask + maxControlFramePayloadSize = 125 + + writeWait = time.Second + + defaultReadBufferSize = 4096 + defaultWriteBufferSize = 4096 + + continuationFrame = 0 + noFrame = -1 +) + +// Close codes defined in RFC 6455, section 11.7. +const ( + CloseNormalClosure = 1000 + CloseGoingAway = 1001 + CloseProtocolError = 1002 + CloseUnsupportedData = 1003 + CloseNoStatusReceived = 1005 + CloseAbnormalClosure = 1006 + CloseInvalidFramePayloadData = 1007 + ClosePolicyViolation = 1008 + CloseMessageTooBig = 1009 + CloseMandatoryExtension = 1010 + CloseInternalServerErr = 1011 + CloseServiceRestart = 1012 + CloseTryAgainLater = 1013 + CloseTLSHandshake = 1015 +) + +// The message types are defined in RFC 6455, section 11.8. +const ( + // TextMessage denotes a text data message. The text message payload is + // interpreted as UTF-8 encoded text data. + TextMessage = 1 + + // BinaryMessage denotes a binary data message. + BinaryMessage = 2 + + // CloseMessage denotes a close control message. The optional message + // payload contains a numeric code and text. Use the FormatCloseMessage + // function to format a close message payload. + CloseMessage = 8 + + // PingMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PingMessage = 9 + + // PongMessage denotes a pong control message. The optional message payload + // is UTF-8 encoded text. + PongMessage = 10 +) + +// ErrCloseSent is returned when the application writes a message to the +// connection after sending a close message. +var ErrCloseSent = errors.New("websocket: close sent") + +// ErrReadLimit is returned when reading a message that is larger than the +// read limit set for the connection. +var ErrReadLimit = errors.New("websocket: read limit exceeded") + +// netError satisfies the net Error interface. +type netError struct { + msg string + temporary bool + timeout bool +} + +func (e *netError) Error() string { return e.msg } +func (e *netError) Temporary() bool { return e.temporary } +func (e *netError) Timeout() bool { return e.timeout } + +// CloseError represents a close message. +type CloseError struct { + // Code is defined in RFC 6455, section 11.7. + Code int + + // Text is the optional text payload. + Text string +} + +func (e *CloseError) Error() string { + s := []byte("websocket: close ") + s = strconv.AppendInt(s, int64(e.Code), 10) + switch e.Code { + case CloseNormalClosure: + s = append(s, " (normal)"...) + case CloseGoingAway: + s = append(s, " (going away)"...) + case CloseProtocolError: + s = append(s, " (protocol error)"...) + case CloseUnsupportedData: + s = append(s, " (unsupported data)"...) + case CloseNoStatusReceived: + s = append(s, " (no status)"...) + case CloseAbnormalClosure: + s = append(s, " (abnormal closure)"...) + case CloseInvalidFramePayloadData: + s = append(s, " (invalid payload data)"...) + case ClosePolicyViolation: + s = append(s, " (policy violation)"...) + case CloseMessageTooBig: + s = append(s, " (message too big)"...) + case CloseMandatoryExtension: + s = append(s, " (mandatory extension missing)"...) + case CloseInternalServerErr: + s = append(s, " (internal server error)"...) + case CloseTLSHandshake: + s = append(s, " (TLS handshake error)"...) + } + if e.Text != "" { + s = append(s, ": "...) + s = append(s, e.Text...) + } + return string(s) +} + +// IsCloseError returns boolean indicating whether the error is a *CloseError +// with one of the specified codes. +func IsCloseError(err error, codes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range codes { + if e.Code == code { + return true + } + } + } + return false +} + +// IsUnexpectedCloseError returns boolean indicating whether the error is a +// *CloseError with a code not in the list of expected codes. +func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range expectedCodes { + if e.Code == code { + return false + } + } + return true + } + return false +} + +var ( + errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} + errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} + errBadWriteOpCode = errors.New("websocket: bad write message type") + errWriteClosed = errors.New("websocket: write closed") + errInvalidControlFrame = errors.New("websocket: invalid control frame") +) + +// maskRand is an io.Reader for generating mask bytes. The reader is initialized +// to crypto/rand Reader. Tests swap the reader to a math/rand reader for +// reproducible results. +var maskRand = rand.Reader + +// newMaskKey returns a new 32 bit value for masking client frames. +func newMaskKey() [4]byte { + var k [4]byte + _, _ = io.ReadFull(maskRand, k[:]) + return k +} + +func hideTempErr(err error) error { + if e, ok := err.(net.Error); ok { + err = &netError{msg: e.Error(), timeout: e.Timeout()} + } + return err +} + +func isControl(frameType int) bool { + return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage +} + +func isData(frameType int) bool { + return frameType == TextMessage || frameType == BinaryMessage +} + +var validReceivedCloseCodes = map[int]bool{ + // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number + + CloseNormalClosure: true, + CloseGoingAway: true, + CloseProtocolError: true, + CloseUnsupportedData: true, + CloseNoStatusReceived: false, + CloseAbnormalClosure: false, + CloseInvalidFramePayloadData: true, + ClosePolicyViolation: true, + CloseMessageTooBig: true, + CloseMandatoryExtension: true, + CloseInternalServerErr: true, + CloseServiceRestart: true, + CloseTryAgainLater: true, + CloseTLSHandshake: false, +} + +func isValidReceivedCloseCode(code int) bool { + return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) +} + +// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this +// interface. The type of the value stored in a pool is not specified. +type BufferPool interface { + // Get gets a value from the pool or returns nil if the pool is empty. + Get() interface{} + // Put adds a value to the pool. + Put(interface{}) +} + +// writePoolData is the type added to the write buffer pool. This wrapper is +// used to prevent applications from peeking at and depending on the values +// added to the pool. +type writePoolData struct{ buf []byte } + +// The Conn type represents a WebSocket connection. +type Conn struct { + conn net.Conn + isServer bool + subprotocol string + + // Write fields + mu chan struct{} // used as mutex to protect write to conn + writeBuf []byte // frame is constructed in this buffer. + writePool BufferPool + writeBufSize int + writeDeadline time.Time + writer io.WriteCloser // the current writer returned to the application + isWriting bool // for best-effort concurrent write detection + + writeErrMu sync.Mutex + writeErr error + + enableWriteCompression bool + compressionLevel int + newCompressionWriter func(io.WriteCloser, int) io.WriteCloser + + // Read fields + reader io.ReadCloser // the current reader returned to the application + readErr error + br *bufio.Reader + // bytes remaining in current frame. + // set setReadRemaining to safely update this value and prevent overflow + readRemaining int64 + readFinal bool // true the current message has more frames. + readLength int64 // Message size. + readLimit int64 // Maximum message size. + readMaskPos int + readMaskKey [4]byte + handlePong func(string) error + handlePing func(string) error + handleClose func(int, string) error + readErrCount int + messageReader *messageReader // the current low-level reader + + readDecompress bool // whether last read frame had RSV1 set + newDecompressionReader func(io.Reader) io.ReadCloser +} + +func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn { + + if br == nil { + if readBufferSize == 0 { + readBufferSize = defaultReadBufferSize + } else if readBufferSize < maxControlFramePayloadSize { + // must be large enough for control frame + readBufferSize = maxControlFramePayloadSize + } + br = bufio.NewReaderSize(conn, readBufferSize) + } + + if writeBufferSize <= 0 { + writeBufferSize = defaultWriteBufferSize + } + writeBufferSize += maxFrameHeaderSize + + if writeBuf == nil && writeBufferPool == nil { + writeBuf = make([]byte, writeBufferSize) + } + + mu := make(chan struct{}, 1) + mu <- struct{}{} + c := &Conn{ + isServer: isServer, + br: br, + conn: conn, + mu: mu, + readFinal: true, + writeBuf: writeBuf, + writePool: writeBufferPool, + writeBufSize: writeBufferSize, + enableWriteCompression: true, + compressionLevel: defaultCompressionLevel, + } + c.SetCloseHandler(nil) + c.SetPingHandler(nil) + c.SetPongHandler(nil) + return c +} + +// setReadRemaining tracks the number of bytes remaining on the connection. If n +// overflows, an ErrReadLimit is returned. +func (c *Conn) setReadRemaining(n int64) error { + if n < 0 { + return ErrReadLimit + } + + c.readRemaining = n + return nil +} + +// Subprotocol returns the negotiated protocol for the connection. +func (c *Conn) Subprotocol() string { + return c.subprotocol +} + +// Close closes the underlying network connection without sending or waiting +// for a close message. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// Write methods + +func (c *Conn) writeFatal(err error) error { + err = hideTempErr(err) + c.writeErrMu.Lock() + if c.writeErr == nil { + c.writeErr = err + } + c.writeErrMu.Unlock() + return err +} + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + if _, err := c.br.Discard(len(p)); err != nil { + return p, err + } + return p, err +} + +func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error { + <-c.mu + defer func() { c.mu <- struct{}{} }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + if err := c.conn.SetWriteDeadline(deadline); err != nil { + return c.writeFatal(err) + } + if len(buf1) == 0 { + _, err = c.conn.Write(buf0) + } else { + err = c.writeBufs(buf0, buf1) + } + if err != nil { + return c.writeFatal(err) + } + if frameType == CloseMessage { + _ = c.writeFatal(ErrCloseSent) + } + return nil +} + +func (c *Conn) writeBufs(bufs ...[]byte) error { + b := net.Buffers(bufs) + _, err := b.WriteTo(c.conn) + return err +} + +// WriteControl writes a control message with the given deadline. The allowed +// message types are CloseMessage, PingMessage and PongMessage. +func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { + if !isControl(messageType) { + return errBadWriteOpCode + } + if len(data) > maxControlFramePayloadSize { + return errInvalidControlFrame + } + + b0 := byte(messageType) | finalBit + b1 := byte(len(data)) + if !c.isServer { + b1 |= maskBit + } + + buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) + buf = append(buf, b0, b1) + + if c.isServer { + buf = append(buf, data...) + } else { + key := newMaskKey() + buf = append(buf, key[:]...) + buf = append(buf, data...) + maskBytes(key, 0, buf[6:]) + } + + d := 1000 * time.Hour + if !deadline.IsZero() { + d = time.Until(deadline) + if d < 0 { + return errWriteTimeout + } + } + + timer := time.NewTimer(d) + select { + case <-c.mu: + timer.Stop() + case <-timer.C: + return errWriteTimeout + } + defer func() { c.mu <- struct{}{} }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + if err := c.conn.SetWriteDeadline(deadline); err != nil { + return c.writeFatal(err) + } + _, err = c.conn.Write(buf) + if err != nil { + return c.writeFatal(err) + } + if messageType == CloseMessage { + _ = c.writeFatal(ErrCloseSent) + } + return err +} + +// beginMessage prepares a connection and message writer for a new message. +func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { + // Close previous writer if not already closed by the application. It's + // probably better to return an error in this situation, but we cannot + // change this without breaking existing applications. + if c.writer != nil { + if err := c.writer.Close(); err != nil { + log.Printf("websocket: discarding writer close error: %v", err) + } + c.writer = nil + } + + if !isControl(messageType) && !isData(messageType) { + return errBadWriteOpCode + } + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + mw.c = c + mw.frameType = messageType + mw.pos = maxFrameHeaderSize + + if c.writeBuf == nil { + wpd, ok := c.writePool.Get().(writePoolData) + if ok { + c.writeBuf = wpd.buf + } else { + c.writeBuf = make([]byte, c.writeBufSize) + } + } + return nil +} + +// NextWriter returns a writer for the next message to send. The writer's Close +// method flushes the complete message to the network. +// +// There can be at most one open writer on a connection. NextWriter closes the +// previous writer if the application has not already done so. +// +// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and +// PongMessage) are supported. +func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { + return nil, err + } + c.writer = &mw + if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { + w := c.newCompressionWriter(c.writer, c.compressionLevel) + mw.compress = true + c.writer = w + } + return c.writer, nil +} + +type messageWriter struct { + c *Conn + compress bool // whether next call to flushFrame should set RSV1 + pos int // end of data in writeBuf. + frameType int // type of the current frame. + err error +} + +func (w *messageWriter) endMessage(err error) error { + if w.err != nil { + return err + } + c := w.c + w.err = err + c.writer = nil + if c.writePool != nil { + c.writePool.Put(writePoolData{buf: c.writeBuf}) + c.writeBuf = nil + } + return err +} + +// flushFrame writes buffered data and extra as a frame to the network. The +// final argument indicates that this is the last frame in the message. +func (w *messageWriter) flushFrame(final bool, extra []byte) error { + c := w.c + length := w.pos - maxFrameHeaderSize + len(extra) + + // Check for invalid control frames. + if isControl(w.frameType) && + (!final || length > maxControlFramePayloadSize) { + return w.endMessage(errInvalidControlFrame) + } + + b0 := byte(w.frameType) + if final { + b0 |= finalBit + } + if w.compress { + b0 |= rsv1Bit + } + w.compress = false + + b1 := byte(0) + if !c.isServer { + b1 |= maskBit + } + + // Assume that the frame starts at beginning of c.writeBuf. + framePos := 0 + if c.isServer { + // Adjust up if mask not included in the header. + framePos = 4 + } + + switch { + case length >= 65536: + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 127 + binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) + case length > 125: + framePos += 6 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 126 + binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) + default: + framePos += 8 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | byte(length) + } + + if !c.isServer { + key := newMaskKey() + copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) + if len(extra) > 0 { + return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode"))) + } + } + + // Write the buffers to the connection with best-effort detection of + // concurrent writes. See the concurrency section in the package + // documentation for more info. + + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + + err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) + + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + + if err != nil { + return w.endMessage(err) + } + + if final { + _ = w.endMessage(errWriteClosed) + return nil + } + + // Setup for next frame. + w.pos = maxFrameHeaderSize + w.frameType = continuationFrame + return nil +} + +func (w *messageWriter) ncopy(max int) (int, error) { + n := len(w.c.writeBuf) - w.pos + if n <= 0 { + if err := w.flushFrame(false, nil); err != nil { + return 0, err + } + n = len(w.c.writeBuf) - w.pos + } + if n > max { + n = max + } + return n, nil +} + +func (w *messageWriter) Write(p []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + + if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { + // Don't buffer large messages. + err := w.flushFrame(false, p) + if err != nil { + return 0, err + } + return len(p), nil + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) WriteString(p string) (int, error) { + if w.err != nil { + return 0, w.err + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { + if w.err != nil { + return 0, w.err + } + for { + if w.pos == len(w.c.writeBuf) { + err = w.flushFrame(false, nil) + if err != nil { + break + } + } + var n int + n, err = r.Read(w.c.writeBuf[w.pos:]) + w.pos += n + nn += int64(n) + if err != nil { + if err == io.EOF { + err = nil + } + break + } + } + return nn, err +} + +func (w *messageWriter) Close() error { + if w.err != nil { + return w.err + } + return w.flushFrame(true, nil) +} + +// WritePreparedMessage writes prepared message into connection. +func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { + frameType, frameData, err := pm.frame(prepareKey{ + isServer: c.isServer, + compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), + compressionLevel: c.compressionLevel, + }) + if err != nil { + return err + } + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + err = c.write(frameType, c.writeDeadline, frameData, nil) + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + return err +} + +// WriteMessage is a helper method for getting a writer using NextWriter, +// writing the message and closing the writer. +func (c *Conn) WriteMessage(messageType int, data []byte) error { + + if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { + // Fast path with no allocations and single frame. + + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { + return err + } + n := copy(c.writeBuf[mw.pos:], data) + mw.pos += n + data = data[n:] + return mw.flushFrame(true, data) + } + + w, err := c.NextWriter(messageType) + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + return w.Close() +} + +// SetWriteDeadline sets the write deadline on the underlying network +// connection. After a write has timed out, the websocket state is corrupt and +// all future writes will return an error. A zero value for t means writes will +// not time out. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = t + return nil +} + +// Read methods + +func (c *Conn) advanceFrame() (int, error) { + // 1. Skip remainder of previous frame. + + if c.readRemaining > 0 { + if _, err := io.CopyN(io.Discard, c.br, c.readRemaining); err != nil { + return noFrame, err + } + } + + // 2. Read and parse first two bytes of frame header. + // To aid debugging, collect and report all errors in the first two bytes + // of the header. + + var errors []string + + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + frameType := int(p[0] & 0xf) + final := p[0]&finalBit != 0 + rsv1 := p[0]&rsv1Bit != 0 + rsv2 := p[0]&rsv2Bit != 0 + rsv3 := p[0]&rsv3Bit != 0 + mask := p[1]&maskBit != 0 + if err := c.setReadRemaining(int64(p[1] & 0x7f)); err != nil { + return noFrame, err + } + + c.readDecompress = false + if rsv1 { + if c.newDecompressionReader != nil { + c.readDecompress = true + } else { + errors = append(errors, "RSV1 set") + } + } + + if rsv2 { + errors = append(errors, "RSV2 set") + } + + if rsv3 { + errors = append(errors, "RSV3 set") + } + + switch frameType { + case CloseMessage, PingMessage, PongMessage: + if c.readRemaining > maxControlFramePayloadSize { + errors = append(errors, "len > 125 for control") + } + if !final { + errors = append(errors, "FIN not set on control") + } + case TextMessage, BinaryMessage: + if !c.readFinal { + errors = append(errors, "data before FIN") + } + c.readFinal = final + case continuationFrame: + if c.readFinal { + errors = append(errors, "continuation after FIN") + } + c.readFinal = final + default: + errors = append(errors, "bad opcode "+strconv.Itoa(frameType)) + } + + if mask != c.isServer { + errors = append(errors, "bad MASK") + } + + if len(errors) > 0 { + return noFrame, c.handleProtocolError(strings.Join(errors, ", ")) + } + + // 3. Read and parse frame length as per + // https://tools.ietf.org/html/rfc6455#section-5.2 + // + // The length of the "Payload data", in bytes: if 0-125, that is the payload + // length. + // - If 126, the following 2 bytes interpreted as a 16-bit unsigned + // integer are the payload length. + // - If 127, the following 8 bytes interpreted as + // a 64-bit unsigned integer (the most significant bit MUST be 0) are the + // payload length. Multibyte length quantities are expressed in network byte + // order. + + switch c.readRemaining { + case 126: + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + if err := c.setReadRemaining(int64(binary.BigEndian.Uint16(p))); err != nil { + return noFrame, err + } + case 127: + p, err := c.read(8) + if err != nil { + return noFrame, err + } + + if err := c.setReadRemaining(int64(binary.BigEndian.Uint64(p))); err != nil { + return noFrame, err + } + } + + // 4. Handle frame masking. + + if mask { + c.readMaskPos = 0 + p, err := c.read(len(c.readMaskKey)) + if err != nil { + return noFrame, err + } + copy(c.readMaskKey[:], p) + } + + // 5. For text and binary messages, enforce read limit and return. + + if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { + + c.readLength += c.readRemaining + // Don't allow readLength to overflow in the presence of a large readRemaining + // counter. + if c.readLength < 0 { + return noFrame, ErrReadLimit + } + + if c.readLimit > 0 && c.readLength > c.readLimit { + if err := c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)); err != nil { + return noFrame, err + } + return noFrame, ErrReadLimit + } + + return frameType, nil + } + + // 6. Read control frame payload. + + var payload []byte + if c.readRemaining > 0 { + payload, err = c.read(int(c.readRemaining)) + if err := c.setReadRemaining(0); err != nil { + return noFrame, err + } + if err != nil { + return noFrame, err + } + if c.isServer { + maskBytes(c.readMaskKey, 0, payload) + } + } + + // 7. Process control frame payload. + + switch frameType { + case PongMessage: + if err := c.handlePong(string(payload)); err != nil { + return noFrame, err + } + case PingMessage: + if err := c.handlePing(string(payload)); err != nil { + return noFrame, err + } + case CloseMessage: + closeCode := CloseNoStatusReceived + closeText := "" + if len(payload) >= 2 { + closeCode = int(binary.BigEndian.Uint16(payload)) + if !isValidReceivedCloseCode(closeCode) { + return noFrame, c.handleProtocolError("bad close code " + strconv.Itoa(closeCode)) + } + closeText = string(payload[2:]) + if !utf8.ValidString(closeText) { + return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") + } + } + if err := c.handleClose(closeCode, closeText); err != nil { + return noFrame, err + } + return noFrame, &CloseError{Code: closeCode, Text: closeText} + } + + return frameType, nil +} + +func (c *Conn) handleProtocolError(message string) error { + data := FormatCloseMessage(CloseProtocolError, message) + if len(data) > maxControlFramePayloadSize { + data = data[:maxControlFramePayloadSize] + } + if err := c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)); err != nil { + return err + } + return errors.New("websocket: " + message) +} + +// NextReader returns the next data message received from the peer. The +// returned messageType is either TextMessage or BinaryMessage. +// +// There can be at most one open reader on a connection. NextReader discards +// the previous message if the application has not already consumed it. +// +// Applications must break out of the application's read loop when this method +// returns a non-nil error value. Errors returned from this method are +// permanent. Once this method returns a non-nil error, all subsequent calls to +// this method return the same error. +func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + // Close previous reader, only relevant for decompression. + if c.reader != nil { + if err := c.reader.Close(); err != nil { + log.Printf("websocket: discarding reader close error: %v", err) + } + c.reader = nil + } + + c.messageReader = nil + c.readLength = 0 + + for c.readErr == nil { + frameType, err := c.advanceFrame() + if err != nil { + c.readErr = hideTempErr(err) + break + } + + if frameType == TextMessage || frameType == BinaryMessage { + c.messageReader = &messageReader{c} + c.reader = c.messageReader + if c.readDecompress { + c.reader = c.newDecompressionReader(c.reader) + } + return frameType, c.reader, nil + } + } + + // Applications that do handle the error returned from this method spin in + // tight loop on connection failure. To help application developers detect + // this error, panic on repeated reads to the failed connection. + c.readErrCount++ + if c.readErrCount >= 1000 { + panic("repeated read on failed websocket connection") + } + + return noFrame, nil, c.readErr +} + +type messageReader struct{ c *Conn } + +func (r *messageReader) Read(b []byte) (int, error) { + c := r.c + if c.messageReader != r { + return 0, io.EOF + } + + for c.readErr == nil { + + if c.readRemaining > 0 { + if int64(len(b)) > c.readRemaining { + b = b[:c.readRemaining] + } + n, err := c.br.Read(b) + c.readErr = hideTempErr(err) + if c.isServer { + c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) + } + rem := c.readRemaining + rem -= int64(n) + if err := c.setReadRemaining(rem); err != nil { + return 0, err + } + if c.readRemaining > 0 && c.readErr == io.EOF { + c.readErr = errUnexpectedEOF + } + return n, c.readErr + } + + if c.readFinal { + c.messageReader = nil + return 0, io.EOF + } + + frameType, err := c.advanceFrame() + switch { + case err != nil: + c.readErr = hideTempErr(err) + case frameType == TextMessage || frameType == BinaryMessage: + c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") + } + } + + err := c.readErr + if err == io.EOF && c.messageReader == r { + err = errUnexpectedEOF + } + return 0, err +} + +func (r *messageReader) Close() error { + return nil +} + +// ReadMessage is a helper method for getting a reader using NextReader and +// reading from that reader to a buffer. +func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { + var r io.Reader + messageType, r, err = c.NextReader() + if err != nil { + return messageType, nil, err + } + p, err = io.ReadAll(r) + return messageType, p, err +} + +// SetReadDeadline sets the read deadline on the underlying network connection. +// After a read has timed out, the websocket connection state is corrupt and +// all future reads will return an error. A zero value for t means reads will +// not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetReadLimit sets the maximum size in bytes for a message read from the peer. If a +// message exceeds the limit, the connection sends a close message to the peer +// and returns ErrReadLimit to the application. +func (c *Conn) SetReadLimit(limit int64) { + c.readLimit = limit +} + +// CloseHandler returns the current close handler +func (c *Conn) CloseHandler() func(code int, text string) error { + return c.handleClose +} + +// SetCloseHandler sets the handler for close messages received from the peer. +// The code argument to h is the received close code or CloseNoStatusReceived +// if the close message is empty. The default close handler sends a close +// message back to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// close messages as described in the section on Control Messages above. +// +// The connection read methods return a CloseError when a close message is +// received. Most applications should handle close messages as part of their +// normal error handling. Applications should only set a close handler when the +// application must perform some action before sending a close message back to +// the peer. +func (c *Conn) SetCloseHandler(h func(code int, text string) error) { + if h == nil { + h = func(code int, text string) error { + message := FormatCloseMessage(code, "") + if err := c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)); err != nil { + return err + } + return nil + } + } + c.handleClose = h +} + +// PingHandler returns the current ping handler +func (c *Conn) PingHandler() func(appData string) error { + return c.handlePing +} + +// SetPingHandler sets the handler for ping messages received from the peer. +// The appData argument to h is the PING message application data. The default +// ping handler sends a pong to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// ping messages as described in the section on Control Messages above. +func (c *Conn) SetPingHandler(h func(appData string) error) { + if h == nil { + h = func(message string) error { + err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + if err == ErrCloseSent { + return nil + } else if _, ok := err.(net.Error); ok { + return nil + } + return err + } + } + c.handlePing = h +} + +// PongHandler returns the current pong handler +func (c *Conn) PongHandler() func(appData string) error { + return c.handlePong +} + +// SetPongHandler sets the handler for pong messages received from the peer. +// The appData argument to h is the PONG message application data. The default +// pong handler does nothing. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// pong messages as described in the section on Control Messages above. +func (c *Conn) SetPongHandler(h func(appData string) error) { + if h == nil { + h = func(string) error { return nil } + } + c.handlePong = h +} + +// NetConn returns the underlying connection that is wrapped by c. +// Note that writing to or reading from this connection directly will corrupt the +// WebSocket connection. +func (c *Conn) NetConn() net.Conn { + return c.conn +} + +// UnderlyingConn returns the internal net.Conn. This can be used to further +// modifications to connection specific flags. +// Deprecated: Use the NetConn method. +func (c *Conn) UnderlyingConn() net.Conn { + return c.conn +} + +// EnableWriteCompression enables and disables write compression of +// subsequent text and binary messages. This function is a noop if +// compression was not negotiated with the peer. +func (c *Conn) EnableWriteCompression(enable bool) { + c.enableWriteCompression = enable +} + +// SetCompressionLevel sets the flate compression level for subsequent text and +// binary messages. This function is a noop if compression was not negotiated +// with the peer. See the compress/flate package for a description of +// compression levels. +func (c *Conn) SetCompressionLevel(level int) error { + if !isValidCompressionLevel(level) { + return errors.New("websocket: invalid compression level") + } + c.compressionLevel = level + return nil +} + +// FormatCloseMessage formats closeCode and text as a WebSocket close message. +// An empty message is returned for code CloseNoStatusReceived. +func FormatCloseMessage(closeCode int, text string) []byte { + if closeCode == CloseNoStatusReceived { + // Return empty message because it's illegal to send + // CloseNoStatusReceived. Return non-nil value in case application + // checks for nil. + return []byte{} + } + buf := make([]byte, 2+len(text)) + binary.BigEndian.PutUint16(buf, uint16(closeCode)) + copy(buf[2:], text) + return buf +} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go new file mode 100644 index 000000000..8db0cef95 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/doc.go @@ -0,0 +1,227 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements the WebSocket protocol defined in RFC 6455. +// +// Overview +// +// The Conn type represents a WebSocket connection. A server application calls +// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: +// +// var upgrader = websocket.Upgrader{ +// ReadBufferSize: 1024, +// WriteBufferSize: 1024, +// } +// +// func handler(w http.ResponseWriter, r *http.Request) { +// conn, err := upgrader.Upgrade(w, r, nil) +// if err != nil { +// log.Println(err) +// return +// } +// ... Use conn to send and receive messages. +// } +// +// Call the connection's WriteMessage and ReadMessage methods to send and +// receive messages as a slice of bytes. This snippet of code shows how to echo +// messages using these methods: +// +// for { +// messageType, p, err := conn.ReadMessage() +// if err != nil { +// log.Println(err) +// return +// } +// if err := conn.WriteMessage(messageType, p); err != nil { +// log.Println(err) +// return +// } +// } +// +// In above snippet of code, p is a []byte and messageType is an int with value +// websocket.BinaryMessage or websocket.TextMessage. +// +// An application can also send and receive messages using the io.WriteCloser +// and io.Reader interfaces. To send a message, call the connection NextWriter +// method to get an io.WriteCloser, write the message to the writer and close +// the writer when done. To receive a message, call the connection NextReader +// method to get an io.Reader and read until io.EOF is returned. This snippet +// shows how to echo messages using the NextWriter and NextReader methods: +// +// for { +// messageType, r, err := conn.NextReader() +// if err != nil { +// return +// } +// w, err := conn.NextWriter(messageType) +// if err != nil { +// return err +// } +// if _, err := io.Copy(w, r); err != nil { +// return err +// } +// if err := w.Close(); err != nil { +// return err +// } +// } +// +// Data Messages +// +// The WebSocket protocol distinguishes between text and binary data messages. +// Text messages are interpreted as UTF-8 encoded text. The interpretation of +// binary messages is left to the application. +// +// This package uses the TextMessage and BinaryMessage integer constants to +// identify the two data message types. The ReadMessage and NextReader methods +// return the type of the received message. The messageType argument to the +// WriteMessage and NextWriter methods specifies the type of a sent message. +// +// It is the application's responsibility to ensure that text messages are +// valid UTF-8 encoded text. +// +// Control Messages +// +// The WebSocket protocol defines three types of control messages: close, ping +// and pong. Call the connection WriteControl, WriteMessage or NextWriter +// methods to send a control message to the peer. +// +// Connections handle received close messages by calling the handler function +// set with the SetCloseHandler method and by returning a *CloseError from the +// NextReader, ReadMessage or the message Read method. The default close +// handler sends a close message to the peer. +// +// Connections handle received ping messages by calling the handler function +// set with the SetPingHandler method. The default ping handler sends a pong +// message to the peer. +// +// Connections handle received pong messages by calling the handler function +// set with the SetPongHandler method. The default pong handler does nothing. +// If an application sends ping messages, then the application should set a +// pong handler to receive the corresponding pong. +// +// The control message handler functions are called from the NextReader, +// ReadMessage and message reader Read methods. The default close and ping +// handlers can block these methods for a short time when the handler writes to +// the connection. +// +// The application must read the connection to process close, ping and pong +// messages sent from the peer. If the application is not otherwise interested +// in messages from the peer, then the application should start a goroutine to +// read and discard messages from the peer. A simple example is: +// +// func readLoop(c *websocket.Conn) { +// for { +// if _, _, err := c.NextReader(); err != nil { +// c.Close() +// break +// } +// } +// } +// +// Concurrency +// +// Connections support one concurrent reader and one concurrent writer. +// +// Applications are responsible for ensuring that no more than one goroutine +// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, +// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and +// that no more than one goroutine calls the read methods (NextReader, +// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) +// concurrently. +// +// The Close and WriteControl methods can be called concurrently with all other +// methods. +// +// Origin Considerations +// +// Web browsers allow Javascript applications to open a WebSocket connection to +// any host. It's up to the server to enforce an origin policy using the Origin +// request header sent by the browser. +// +// The Upgrader calls the function specified in the CheckOrigin field to check +// the origin. If the CheckOrigin function returns false, then the Upgrade +// method fails the WebSocket handshake with HTTP status 403. +// +// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail +// the handshake if the Origin request header is present and the Origin host is +// not equal to the Host request header. +// +// The deprecated package-level Upgrade function does not perform origin +// checking. The application is responsible for checking the Origin header +// before calling the Upgrade function. +// +// Buffers +// +// Connections buffer network input and output to reduce the number +// of system calls when reading or writing messages. +// +// Write buffers are also used for constructing WebSocket frames. See RFC 6455, +// Section 5 for a discussion of message framing. A WebSocket frame header is +// written to the network each time a write buffer is flushed to the network. +// Decreasing the size of the write buffer can increase the amount of framing +// overhead on the connection. +// +// The buffer sizes in bytes are specified by the ReadBufferSize and +// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default +// size of 4096 when a buffer size field is set to zero. The Upgrader reuses +// buffers created by the HTTP server when a buffer size field is set to zero. +// The HTTP server buffers have a size of 4096 at the time of this writing. +// +// The buffer sizes do not limit the size of a message that can be read or +// written by a connection. +// +// Buffers are held for the lifetime of the connection by default. If the +// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the +// write buffer only when writing a message. +// +// Applications should tune the buffer sizes to balance memory use and +// performance. Increasing the buffer size uses more memory, but can reduce the +// number of system calls to read or write the network. In the case of writing, +// increasing the buffer size can reduce the number of frame headers written to +// the network. +// +// Some guidelines for setting buffer parameters are: +// +// Limit the buffer sizes to the maximum expected message size. Buffers larger +// than the largest message do not provide any benefit. +// +// Depending on the distribution of message sizes, setting the buffer size to +// a value less than the maximum expected message size can greatly reduce memory +// use with a small impact on performance. Here's an example: If 99% of the +// messages are smaller than 256 bytes and the maximum message size is 512 +// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls +// than a buffer size of 512 bytes. The memory savings is 50%. +// +// A write buffer pool is useful when the application has a modest number +// writes over a large number of connections. when buffers are pooled, a larger +// buffer size has a reduced impact on total memory use and has the benefit of +// reducing system calls and frame overhead. +// +// Compression EXPERIMENTAL +// +// Per message compression extensions (RFC 7692) are experimentally supported +// by this package in a limited capacity. Setting the EnableCompression option +// to true in Dialer or Upgrader will attempt to negotiate per message deflate +// support. +// +// var upgrader = websocket.Upgrader{ +// EnableCompression: true, +// } +// +// If compression was successfully negotiated with the connection's peer, any +// message received in compressed form will be automatically decompressed. +// All Read methods will return uncompressed bytes. +// +// Per message compression of messages written to a connection can be enabled +// or disabled by calling the corresponding Conn method: +// +// conn.EnableWriteCompression(false) +// +// Currently this package does not support compression with "context takeover". +// This means that messages must be compressed and decompressed in isolation, +// without retaining sliding window or dictionary state across messages. For +// more details refer to RFC 7692. +// +// Use of compression is experimental and may result in decreased performance. +package websocket diff --git a/vendor/github.com/gorilla/websocket/join.go b/vendor/github.com/gorilla/websocket/join.go new file mode 100644 index 000000000..c64f8c829 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/join.go @@ -0,0 +1,42 @@ +// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "io" + "strings" +) + +// JoinMessages concatenates received messages to create a single io.Reader. +// The string term is appended to each message. The returned reader does not +// support concurrent calls to the Read method. +func JoinMessages(c *Conn, term string) io.Reader { + return &joinReader{c: c, term: term} +} + +type joinReader struct { + c *Conn + term string + r io.Reader +} + +func (r *joinReader) Read(p []byte) (int, error) { + if r.r == nil { + var err error + _, r.r, err = r.c.NextReader() + if err != nil { + return 0, err + } + if r.term != "" { + r.r = io.MultiReader(r.r, strings.NewReader(r.term)) + } + } + n, err := r.r.Read(p) + if err == io.EOF { + err = nil + r.r = nil + } + return n, err +} diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go new file mode 100644 index 000000000..dc2c1f641 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/json.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "encoding/json" + "io" +) + +// WriteJSON writes the JSON encoding of v as a message. +// +// Deprecated: Use c.WriteJSON instead. +func WriteJSON(c *Conn, v interface{}) error { + return c.WriteJSON(v) +} + +// WriteJSON writes the JSON encoding of v as a message. +// +// See the documentation for encoding/json Marshal for details about the +// conversion of Go values to JSON. +func (c *Conn) WriteJSON(v interface{}) error { + w, err := c.NextWriter(TextMessage) + if err != nil { + return err + } + err1 := json.NewEncoder(w).Encode(v) + err2 := w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// Deprecated: Use c.ReadJSON instead. +func ReadJSON(c *Conn, v interface{}) error { + return c.ReadJSON(v) +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// See the documentation for the encoding/json Unmarshal function for details +// about the conversion of JSON to a Go value. +func (c *Conn) ReadJSON(v interface{}) error { + _, r, err := c.NextReader() + if err != nil { + return err + } + err = json.NewDecoder(r).Decode(v) + if err == io.EOF { + // One value is expected in the message. + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go new file mode 100644 index 000000000..67d0968be --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -0,0 +1,59 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +//go:build !appengine +// +build !appengine + +package websocket + +import "unsafe" + +// #nosec G103 -- (CWE-242) Has been audited +const wordSize = int(unsafe.Sizeof(uintptr(0))) + +func maskBytes(key [4]byte, pos int, b []byte) int { + // Mask one byte at a time for small buffers. + if len(b) < 2*wordSize { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 + } + + // Mask one byte at a time to word boundary. + //#nosec G103 -- (CWE-242) Has been audited + if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { + n = wordSize - n + for i := range b[:n] { + b[i] ^= key[pos&3] + pos++ + } + b = b[n:] + } + + // Create aligned word size key. + var k [wordSize]byte + for i := range k { + k[i] = key[(pos+i)&3] + } + //#nosec G103 -- (CWE-242) Has been audited + kw := *(*uintptr)(unsafe.Pointer(&k)) + + // Mask one word at a time. + n := (len(b) / wordSize) * wordSize + for i := 0; i < n; i += wordSize { + //#nosec G103 -- (CWE-242) Has been audited + *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw + } + + // Mask one byte at a time for remaining bytes. + b = b[n:] + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go new file mode 100644 index 000000000..36250ca7c --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask_safe.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +//go:build appengine +// +build appengine + +package websocket + +func maskBytes(key [4]byte, pos int, b []byte) int { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go new file mode 100644 index 000000000..c854225e9 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/prepared.go @@ -0,0 +1,102 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "net" + "sync" + "time" +) + +// PreparedMessage caches on the wire representations of a message payload. +// Use PreparedMessage to efficiently send a message payload to multiple +// connections. PreparedMessage is especially useful when compression is used +// because the CPU and memory expensive compression operation can be executed +// once for a given set of compression options. +type PreparedMessage struct { + messageType int + data []byte + mu sync.Mutex + frames map[prepareKey]*preparedFrame +} + +// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. +type prepareKey struct { + isServer bool + compress bool + compressionLevel int +} + +// preparedFrame contains data in wire representation. +type preparedFrame struct { + once sync.Once + data []byte +} + +// NewPreparedMessage returns an initialized PreparedMessage. You can then send +// it to connection using WritePreparedMessage method. Valid wire +// representation will be calculated lazily only once for a set of current +// connection options. +func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { + pm := &PreparedMessage{ + messageType: messageType, + frames: make(map[prepareKey]*preparedFrame), + data: data, + } + + // Prepare a plain server frame. + _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) + if err != nil { + return nil, err + } + + // To protect against caller modifying the data argument, remember the data + // copied to the plain server frame. + pm.data = frameData[len(frameData)-len(data):] + return pm, nil +} + +func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { + pm.mu.Lock() + frame, ok := pm.frames[key] + if !ok { + frame = &preparedFrame{} + pm.frames[key] = frame + } + pm.mu.Unlock() + + var err error + frame.once.Do(func() { + // Prepare a frame using a 'fake' connection. + // TODO: Refactor code in conn.go to allow more direct construction of + // the frame. + mu := make(chan struct{}, 1) + mu <- struct{}{} + var nc prepareConn + c := &Conn{ + conn: &nc, + mu: mu, + isServer: key.isServer, + compressionLevel: key.compressionLevel, + enableWriteCompression: true, + writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), + } + if key.compress { + c.newCompressionWriter = compressNoContextTakeover + } + err = c.WriteMessage(pm.messageType, pm.data) + frame.data = nc.buf.Bytes() + }) + return pm.messageType, frame.data, err +} + +type prepareConn struct { + buf bytes.Buffer + net.Conn +} + +func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } +func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go new file mode 100644 index 000000000..80f55d1ea --- /dev/null +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -0,0 +1,86 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/base64" + "errors" + "log" + "net" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/proxy" +) + +type netDialerFunc func(network, addr string) (net.Conn, error) + +func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { + return fn(network, addr) +} + +func init() { + proxy.RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy.Dialer) (proxy.Dialer, error) { + return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil + }) +} + +type httpProxyDialer struct { + proxyURL *url.URL + forwardDial func(network, addr string) (net.Conn, error) +} + +func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { + hostPort, _ := hostPortNoPort(hpd.proxyURL) + conn, err := hpd.forwardDial(network, hostPort) + if err != nil { + return nil, err + } + + connectHeader := make(http.Header) + if user := hpd.proxyURL.User; user != nil { + proxyUser := user.Username() + if proxyPassword, passwordSet := user.Password(); passwordSet { + credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) + connectHeader.Set("Proxy-Authorization", "Basic "+credential) + } + } + + connectReq := &http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Opaque: addr}, + Host: addr, + Header: connectHeader, + } + + if err := connectReq.Write(conn); err != nil { + if err := conn.Close(); err != nil { + log.Printf("httpProxyDialer: failed to close connection: %v", err) + } + return nil, err + } + + // Read response. It's OK to use and discard buffered reader here becaue + // the remote server does not speak until spoken to. + br := bufio.NewReader(conn) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + if err := conn.Close(); err != nil { + log.Printf("httpProxyDialer: failed to close connection: %v", err) + } + return nil, err + } + + if resp.StatusCode != 200 { + if err := conn.Close(); err != nil { + log.Printf("httpProxyDialer: failed to close connection: %v", err) + } + f := strings.SplitN(resp.Status, " ", 2) + return nil, errors.New(f[1]) + } + return conn, nil +} diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go new file mode 100644 index 000000000..1e720e1da --- /dev/null +++ b/vendor/github.com/gorilla/websocket/server.go @@ -0,0 +1,389 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "errors" + "io" + "log" + "net/http" + "net/url" + "strings" + "time" +) + +// HandshakeError describes an error with the handshake from the peer. +type HandshakeError struct { + message string +} + +func (e HandshakeError) Error() string { return e.message } + +// Upgrader specifies parameters for upgrading an HTTP connection to a +// WebSocket connection. +// +// It is safe to call Upgrader's methods concurrently. +type Upgrader struct { + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer + // size is zero, then buffers allocated by the HTTP server are used. The + // I/O buffer sizes do not limit the size of the messages that can be sent + // or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the server's supported protocols in order of + // preference. If this field is not nil, then the Upgrade method negotiates a + // subprotocol by selecting the first match in this list with a protocol + // requested by the client. If there's no match, then no protocol is + // negotiated (the Sec-Websocket-Protocol header is not included in the + // handshake response). + Subprotocols []string + + // Error specifies the function for generating HTTP error responses. If Error + // is nil, then http.Error is used to generate the HTTP response. + Error func(w http.ResponseWriter, r *http.Request, status int, reason error) + + // CheckOrigin returns true if the request Origin header is acceptable. If + // CheckOrigin is nil, then a safe default is used: return false if the + // Origin request header is present and the origin host is not equal to + // request Host header. + // + // A CheckOrigin function should carefully validate the request origin to + // prevent cross-site request forgery. + CheckOrigin func(r *http.Request) bool + + // EnableCompression specify if the server should attempt to negotiate per + // message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool +} + +func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { + err := HandshakeError{reason} + if u.Error != nil { + u.Error(w, r, status, err) + } else { + w.Header().Set("Sec-Websocket-Version", "13") + http.Error(w, http.StatusText(status), status) + } + return nil, err +} + +// checkSameOrigin returns true if the origin is not set or is equal to the request host. +func checkSameOrigin(r *http.Request) bool { + origin := r.Header["Origin"] + if len(origin) == 0 { + return true + } + u, err := url.Parse(origin[0]) + if err != nil { + return false + } + return equalASCIIFold(u.Host, r.Host) +} + +func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { + if u.Subprotocols != nil { + clientProtocols := Subprotocols(r) + for _, serverProtocol := range u.Subprotocols { + for _, clientProtocol := range clientProtocols { + if clientProtocol == serverProtocol { + return clientProtocol + } + } + } + } else if responseHeader != nil { + return responseHeader.Get("Sec-Websocket-Protocol") + } + return "" +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie). To specify +// subprotocols supported by the server, set Upgrader.Subprotocols directly. +// +// If the upgrade fails, then Upgrade replies to the client with an HTTP error +// response. +func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { + const badHandshake = "websocket: the client is not using the websocket protocol: " + + if !tokenListContainsValue(r.Header, "Connection", "upgrade") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header") + } + + if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") + } + + if r.Method != http.MethodGet { + return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET") + } + + if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") + } + + if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported") + } + + checkOrigin := u.CheckOrigin + if checkOrigin == nil { + checkOrigin = checkSameOrigin + } + if !checkOrigin(r) { + return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin") + } + + challengeKey := r.Header.Get("Sec-Websocket-Key") + if !isValidChallengeKey(challengeKey) { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header must be Base64 encoded value of 16-byte in length") + } + + subprotocol := u.selectSubprotocol(r, responseHeader) + + // Negotiate PMCE + var compress bool + if u.EnableCompression { + for _, ext := range parseExtensions(r.Header) { + if ext[""] != "permessage-deflate" { + continue + } + compress = true + break + } + } + + h, ok := w.(http.Hijacker) + if !ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") + } + var brw *bufio.ReadWriter + netConn, brw, err := h.Hijack() + if err != nil { + return u.returnError(w, r, http.StatusInternalServerError, err.Error()) + } + + if brw.Reader.Buffered() > 0 { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, errors.New("websocket: client sent data before handshake is complete") + } + + var br *bufio.Reader + if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 { + // Reuse hijacked buffered reader as connection reader. + br = brw.Reader + } + + buf := bufioWriterBuffer(netConn, brw.Writer) + + var writeBuf []byte + if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 { + // Reuse hijacked write buffer as connection buffer. + writeBuf = buf + } + + c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf) + c.subprotocol = subprotocol + + if compress { + c.newCompressionWriter = compressNoContextTakeover + c.newDecompressionReader = decompressNoContextTakeover + } + + // Use larger of hijacked buffer and connection write buffer for header. + p := buf + if len(c.writeBuf) > len(p) { + p = c.writeBuf + } + p = p[:0] + + p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) + p = append(p, computeAcceptKey(challengeKey)...) + p = append(p, "\r\n"...) + if c.subprotocol != "" { + p = append(p, "Sec-WebSocket-Protocol: "...) + p = append(p, c.subprotocol...) + p = append(p, "\r\n"...) + } + if compress { + p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) + } + for k, vs := range responseHeader { + if k == "Sec-Websocket-Protocol" { + continue + } + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + for i := 0; i < len(v); i++ { + b := v[i] + if b <= 31 { + // prevent response splitting. + b = ' ' + } + p = append(p, b) + } + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + // Clear deadlines set by HTTP server. + if err := netConn.SetDeadline(time.Time{}); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } + + if u.HandshakeTimeout > 0 { + if err := netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } + } + if _, err = netConn.Write(p); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } + if u.HandshakeTimeout > 0 { + if err := netConn.SetWriteDeadline(time.Time{}); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } + } + + return c, nil +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// Deprecated: Use websocket.Upgrader instead. +// +// Upgrade does not perform origin checking. The application is responsible for +// checking the Origin header before calling Upgrade. An example implementation +// of the same origin policy check is: +// +// if req.Header.Get("Origin") != "http://"+req.Host { +// http.Error(w, "Origin not allowed", http.StatusForbidden) +// return +// } +// +// If the endpoint supports subprotocols, then the application is responsible +// for negotiating the protocol used on the connection. Use the Subprotocols() +// function to get the subprotocols requested by the client. Use the +// Sec-Websocket-Protocol response header to specify the subprotocol selected +// by the application. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// negotiated subprotocol (Sec-Websocket-Protocol). +// +// The connection buffers IO to the underlying network connection. The +// readBufSize and writeBufSize parameters specify the size of the buffers to +// use. Messages can be larger than the buffers. +// +// If the request is not a valid WebSocket handshake, then Upgrade returns an +// error of type HandshakeError. Applications should handle this error by +// replying to the client with an HTTP error response. +func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { + u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} + u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { + // don't return errors to maintain backwards compatibility + } + u.CheckOrigin = func(r *http.Request) bool { + // allow all connections by default + return true + } + return u.Upgrade(w, r, responseHeader) +} + +// Subprotocols returns the subprotocols requested by the client in the +// Sec-Websocket-Protocol header. +func Subprotocols(r *http.Request) []string { + h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) + if h == "" { + return nil + } + protocols := strings.Split(h, ",") + for i := range protocols { + protocols[i] = strings.TrimSpace(protocols[i]) + } + return protocols +} + +// IsWebSocketUpgrade returns true if the client requested upgrade to the +// WebSocket protocol. +func IsWebSocketUpgrade(r *http.Request) bool { + return tokenListContainsValue(r.Header, "Connection", "upgrade") && + tokenListContainsValue(r.Header, "Upgrade", "websocket") +} + +// bufioReaderSize size returns the size of a bufio.Reader. +func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int { + // This code assumes that peek on a reset reader returns + // bufio.Reader.buf[:0]. + // TODO: Use bufio.Reader.Size() after Go 1.10 + br.Reset(originalReader) + if p, err := br.Peek(0); err == nil { + return cap(p) + } + return 0 +} + +// writeHook is an io.Writer that records the last slice passed to it vio +// io.Writer.Write. +type writeHook struct { + p []byte +} + +func (wh *writeHook) Write(p []byte) (int, error) { + wh.p = p + return len(p), nil +} + +// bufioWriterBuffer grabs the buffer from a bufio.Writer. +func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { + // This code assumes that bufio.Writer.buf[:1] is passed to the + // bufio.Writer's underlying writer. + var wh writeHook + bw.Reset(&wh) + if err := bw.WriteByte(0); err != nil { + panic(err) + } + if err := bw.Flush(); err != nil { + log.Printf("websocket: bufioWriterBuffer: Flush: %v", err) + } + + bw.Reset(originalWriter) + + return wh.p[:cap(wh.p)] +} diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go new file mode 100644 index 000000000..7f3864534 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/tls_handshake.go @@ -0,0 +1,18 @@ +package websocket + +import ( + "context" + "crypto/tls" +) + +func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.HandshakeContext(ctx); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go new file mode 100644 index 000000000..9b1a629bf --- /dev/null +++ b/vendor/github.com/gorilla/websocket/util.go @@ -0,0 +1,298 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/rand" + "crypto/sha1" //#nosec G505 -- (CWE-327) https://datatracker.ietf.org/doc/html/rfc6455#page-54 + "encoding/base64" + "io" + "net/http" + "strings" + "unicode/utf8" +) + +var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +func computeAcceptKey(challengeKey string) string { + h := sha1.New() //#nosec G401 -- (CWE-326) https://datatracker.ietf.org/doc/html/rfc6455#page-54 + h.Write([]byte(challengeKey)) + h.Write(keyGUID) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func generateChallengeKey() (string, error) { + p := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, p); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(p), nil +} + +// Token octets per RFC 2616. +var isTokenOctet = [256]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +// skipSpace returns a slice of the string s with all leading RFC 2616 linear +// whitespace removed. +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if b := s[i]; b != ' ' && b != '\t' { + break + } + } + return s[i:] +} + +// nextToken returns the leading RFC 2616 token of s and the string following +// the token. +func nextToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if !isTokenOctet[s[i]] { + break + } + } + return s[:i], s[i:] +} + +// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616 +// and the string following the token or quoted string. +func nextTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return nextToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} + +// equalASCIIFold returns true if s is equal to t with ASCII case folding as +// defined in RFC 4790. +func equalASCIIFold(s, t string) bool { + for s != "" && t != "" { + sr, size := utf8.DecodeRuneInString(s) + s = s[size:] + tr, size := utf8.DecodeRuneInString(t) + t = t[size:] + if sr == tr { + continue + } + if 'A' <= sr && sr <= 'Z' { + sr = sr + 'a' - 'A' + } + if 'A' <= tr && tr <= 'Z' { + tr = tr + 'a' - 'A' + } + if sr != tr { + return false + } + } + return s == t +} + +// tokenListContainsValue returns true if the 1#token header with the given +// name contains a token equal to value with ASCII case folding. +func tokenListContainsValue(header http.Header, name string, value string) bool { +headers: + for _, s := range header[name] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + s = skipSpace(s) + if s != "" && s[0] != ',' { + continue headers + } + if equalASCIIFold(t, value) { + return true + } + if s == "" { + continue headers + } + s = s[1:] + } + } + return false +} + +// parseExtensions parses WebSocket extensions from a header. +func parseExtensions(header http.Header) []map[string]string { + // From RFC 6455: + // + // Sec-WebSocket-Extensions = extension-list + // extension-list = 1#extension + // extension = extension-token *( ";" extension-param ) + // extension-token = registered-token + // registered-token = token + // extension-param = token [ "=" (token | quoted-string) ] + // ;When using the quoted-string syntax variant, the value + // ;after quoted-string unescaping MUST conform to the + // ;'token' ABNF. + + var result []map[string]string +headers: + for _, s := range header["Sec-Websocket-Extensions"] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + ext := map[string]string{"": t} + for { + s = skipSpace(s) + if !strings.HasPrefix(s, ";") { + break + } + var k string + k, s = nextToken(skipSpace(s[1:])) + if k == "" { + continue headers + } + s = skipSpace(s) + var v string + if strings.HasPrefix(s, "=") { + v, s = nextTokenOrQuoted(skipSpace(s[1:])) + s = skipSpace(s) + } + if s != "" && s[0] != ',' && s[0] != ';' { + continue headers + } + ext[k] = v + } + if s != "" && s[0] != ',' { + continue headers + } + result = append(result, ext) + if s == "" { + continue headers + } + s = s[1:] + } + } + return result +} + +// isValidChallengeKey checks if the argument meets RFC6455 specification. +func isValidChallengeKey(s string) bool { + // From RFC6455: + // + // A |Sec-WebSocket-Key| header field with a base64-encoded (see + // Section 4 of [RFC4648]) value that, when decoded, is 16 bytes in + // length. + + if s == "" { + return false + } + decoded, err := base64.StdEncoding.DecodeString(s) + return err == nil && len(decoded) == 16 +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/.gitignore b/vendor/github.com/mochi-mqtt/server/v2/.gitignore new file mode 100644 index 000000000..2381fa499 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/.gitignore @@ -0,0 +1,5 @@ +cmd/mqtt +.DS_Store +*.db +.idea +vendor \ No newline at end of file diff --git a/vendor/github.com/mochi-mqtt/server/v2/.golangci.yml b/vendor/github.com/mochi-mqtt/server/v2/.golangci.yml new file mode 100644 index 000000000..908393664 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/.golangci.yml @@ -0,0 +1,103 @@ +linters: + disable-all: false + fix: false # Fix found issues (if it's supported by the linter). + enable: + # - asasalint + # - asciicheck + # - bidichk + # - bodyclose + # - containedctx + # - contextcheck + #- cyclop + # - deadcode + - decorder + # - depguard + # - dogsled + # - dupl + - durationcheck + # - errchkjson + # - errname + - errorlint + # - execinquery + # - exhaustive + # - exhaustruct + # - exportloopref + #- forcetypeassert + #- forbidigo + #- funlen + #- gci + # - gochecknoglobals + # - gochecknoinits + # - gocognit + # - goconst + # - gocritic + - gocyclo + - godot + # - godox + # - goerr113 + # - gofmt + # - gofumpt + # - goheader + - goimports + # - golint + # - gomnd + # - gomoddirectives + # - gomodguard + # - goprintffuncname + - gosec + - gosimple + - govet + # - grouper + # - ifshort + - importas + - ineffassign + # - interfacebloat + # - interfacer + # - ireturn + # - lll + # - maintidx + # - makezero + - maligned + - misspell + # - nakedret + # - nestif + # - nilerr + # - nilnil + # - nlreturn + # - noctx + # - nolintlint + # - nonamedreturns + # - nosnakecase + # - nosprintfhostport + # - paralleltest + # - prealloc + # - predeclared + # - promlinter + - reassign + # - revive + # - rowserrcheck + # - scopelint + # - sqlclosecheck + # - staticcheck + # - structcheck + # - stylecheck + # - tagliatelle + # - tenv + # - testpackage + # - thelper + - tparallel + # - typecheck + - unconvert + - unparam + - unused + - usestdlibvars + # - varcheck + # - varnamelen + - wastedassign + - whitespace + # - wrapcheck + # - wsl + disable: + - errcheck + + diff --git a/vendor/github.com/mochi-mqtt/server/v2/Dockerfile b/vendor/github.com/mochi-mqtt/server/v2/Dockerfile new file mode 100644 index 000000000..dc3d2bd89 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/Dockerfile @@ -0,0 +1,31 @@ +FROM golang:1.21.0-alpine3.18 AS builder + +RUN apk update +RUN apk add git + +WORKDIR /app + +COPY go.mod ./ +COPY go.sum ./ +RUN go mod download + +COPY . ./ + +RUN go build -o /app/mochi ./cmd + + +FROM alpine + +WORKDIR / +COPY --from=builder /app/mochi . + +# tcp +EXPOSE 1883 + +# websockets +EXPOSE 1882 + +# dashboard +EXPOSE 8080 + +ENTRYPOINT [ "/mochi" ] diff --git a/vendor/github.com/mochi-mqtt/server/v2/LICENSE.md b/vendor/github.com/mochi-mqtt/server/v2/LICENSE.md new file mode 100644 index 000000000..25718aceb --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/LICENSE.md @@ -0,0 +1,23 @@ + +The MIT License (MIT) + +Copyright (c) 2023 Mochi-MQTT Organisation +Copyright (c) 2019, 2022, 2023 Jonathan Blake (mochi-co) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mochi-mqtt/server/v2/README-CN.md b/vendor/github.com/mochi-mqtt/server/v2/README-CN.md new file mode 100644 index 000000000..76807a8d7 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/README-CN.md @@ -0,0 +1,505 @@ +# Mochi-MQTT Server + +

+ +![build status](https://github.com/mochi-mqtt/server/actions/workflows/build.yml/badge.svg) +[![Coverage Status](https://coveralls.io/repos/github/mochi-mqtt/server/badge.svg?branch=master&v2)](https://coveralls.io/github/mochi-mqtt/server?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/mochi-mqtt/server)](https://goreportcard.com/report/github.com/mochi-mqtt/server/v2) +[![Go Reference](https://pkg.go.dev/badge/github.com/mochi-mqtt/server.svg)](https://pkg.go.dev/github.com/mochi-mqtt/server/v2) +[![contributions welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/mochi-mqtt/server/issues) + +

+ +[English](README.md) | [简体中文](README-CN.md) | [日本語](README-JP.md) | [招募翻译者!](https://github.com/orgs/mochi-mqtt/discussions/310) + + +🎆 **mochi-co/mqtt 现在已经是新的 mochi-mqtt 组织的一部分。** 详细信息请[阅读公告.](https://github.com/orgs/mochi-mqtt/discussions/271) + + +### Mochi-MQTT 是一个完全兼容的、可嵌入的高性能 Go MQTT v5(以及 v3.1.1)中间件/服务器。 + +Mochi MQTT 是一个[完全兼容 MQTT v5](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html) 的可嵌入的中间件/服务器,完全使用 Go 语言编写,旨在用于遥测和物联网项目的开发。它可以作为独立的二进制文件使用,也可以嵌入到你自己的应用程序中作为库来使用,经过精心设计以实现尽可能的轻量化和快速部署,同时也极为重视代码的质量和可维护性。 + +#### 什么是 MQTT? +MQTT 代表 MQ Telemetry Transport。它是一种发布/订阅、非常简单和轻量的消息传递协议,专为受限设备和低带宽、高延迟或不可靠网络设计而成([了解更多](https://mqtt.org/faq))。Mochi MQTT 实现了完整的 MQTT 协议的 5.0.0 版本。 + +#### Mochi-MQTT 特性 +- 完全兼容 MQTT v5 功能,与 MQTT v3.1.1 和 v3.0.0 兼容: + - MQTT v5 用户和数据包属性 + - 主题别名(Topic Aliases) + - 共享订阅(Shared Subscriptions) + - 订阅选项和订阅标识符(Identifiers) + - 消息过期(Message Expiry) + - 客户端会话过期(Client Session Expiry) + - 发送和接收 QoS 流量控制配额(Flow Control Quotas) + - 服务器端的断开连接和数据包的权限验证(Auth Packets) + - 遗愿消息延迟间隔(Will Delay Intervals) + - 还有 Mochi MQTT v1 的所有原始 MQTT 功能,例如完全的 QoS(0,1,2)、$SYS 主题、保留消息等。 +- 面向开发者: + - 核心代码都已开放并可访问,以便开发者完全控制。 + - 功能丰富且灵活的基于钩子(Hook)的接口系统,支持便捷的“插件(plugin)”开发。 + - 使用特殊的内联客户端(inline client)进行服务端的消息发布,也支持服务端伪装成现有的客户端。 +- 高性能且稳定: + - 基于经典前缀树 Trie 的主题-订阅模型。 + - 客户端特定的写入缓冲区,避免因读取速度慢或客户端不规范行为而产生的问题。 + - 通过所有 [Paho互操作性测试](https://github.com/eclipse/paho.mqtt.testing/tree/master/interoperability)(MQTT v5 和 MQTT v3)。 + - 超过一千多个经过仔细考虑的单元测试场景。 +- 支持 TCP、Websocket(包括 SSL/TLS)和$SYS 服务状态监控。 +- 内置 基于Redis、Badger 和 Bolt 的持久化(使用Hook钩子,你也可以自己创建)。 +- 内置基于规则的认证和 ACL 权限管理(使用Hook钩子,你也可以自己创建)。 + +### 兼容性说明(Compatibility Notes) +由于 v5 规范与 MQTT 的早期版本存在重叠,因此服务器可以接受 v5 和 v3 客户端,但在连接了 v5 和 v3 客户端的情况下,为 v5 客户端提供的属性和功能将会对 v3 客户端进行降级处理(例如用户属性)。 + +对于 MQTT v3.0.0 和 v3.1.1 的支持被视为混合兼容性。在 v3 规范中没有明确限制的情况下,将使用更新的和以安全为首要考虑的 v5 规范 - 例如保留的消息(retained messages)的过期处理,待发送消息(inflight messages)的过期处理、客户端过期处理以及QOS消息数量的限制等。 + +#### 版本更新时间 +除非涉及关键问题,新版本通常在周末发布。 + +## 规划路线图(Roadmap) +- 请[提出问题](https://github.com/mochi-mqtt/server/issues)来请求新功能或新的hook钩子接口! +- 集群支持。 +- 统计度量支持。 +- 配置文件支持(支持 Docker)。 + +## 快速开始(Quick Start) +### 使用 Go 运行服务端 +Mochi MQTT 可以作为独立的中间件使用。只需拉取此仓库代码,然后在 [cmd](cmd) 文件夹中运行 [cmd/main.go](cmd/main.go) ,默认将开启下面几个服务端口, tcp (:1883)、websocket (:1882) 和服务状态监控 (:8080) 。 + +``` +cd cmd +go build -o mqtt && ./mqtt +``` + +### 使用 Docker + +你现在可以从 Docker Hub 仓库中拉取并运行Mochi MQTT[官方镜像](https://hub.docker.com/r/mochimqtt/server): +```sh +docker pull mochimqtt/server +或者 +docker run mochimqtt/server +``` + +我们还在积极完善这部分的工作,现在正在实现使用[配置文件的启动](https://github.com/orgs/mochi-mqtt/projects/2)方式。更多关于 Docker 的支持正在[这里](https://github.com/orgs/mochi-mqtt/discussions/281#discussion-5544545)和[这里](https://github.com/orgs/mochi-mqtt/discussions/209)进行讨论。如果你有在这个场景下使用 Mochi-MQTT,也可以参与到讨论中来。 + +我们提供了一个简单的 Dockerfile,用于运行 cmd/main.go 中的 Websocket(:1882)、TCP(:1883) 和服务端状态信息(:8080)这三个服务监听: + +```sh +docker build -t mochi:latest . +docker run -p 1883:1883 -p 1882:1882 -p 8080:8080 mochi:latest +``` + + +## 使用 Mochi MQTT 进行开发 +### 将Mochi MQTT作为包导入使用 +将 Mochi MQTT 作为一个包导入只需要几行代码即可开始使用。 +``` go +import ( + "log" + + mqtt "github.com/mochi-mqtt/server/v2" + "github.com/mochi-mqtt/server/v2/hooks/auth" + "github.com/mochi-mqtt/server/v2/listeners" +) + +func main() { + // 创建信号用于等待服务端关闭信号 + sigs := make(chan os.Signal, 1) + done := make(chan bool, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-sigs + done <- true + }() + + // 创建新的 MQTT 服务器。 + server := mqtt.New(nil) + + // 允许所有连接(权限)。 + _ = server.AddHook(new(auth.AllowHook), nil) + + // 在标1883端口上创建一个 TCP 服务端。 + tcp := listeners.NewTCP("t1", ":1883", nil) + err := server.AddListener(tcp) + if err != nil { + log.Fatal(err) + } + + + go func() { + err := server.Serve() + if err != nil { + log.Fatal(err) + } + }() + + // 服务端等待关闭信号 + <-done + + // 关闭服务端时需要做的一些清理工作 +} +``` + +在 [examples](examples) 文件夹中可以找到更多使用不同配置运行服务端的示例。 + +#### 网络监听器 (Network Listeners) + +服务端内置了一些已经实现的网络监听(Network Listeners),这些Listeners允许服务端接受不同协议的连接。当前的监听Listeners有这些: + +| Listener | Usage | +|------------------------------|----------------------------------------------------------------------------------------------| +| listeners.NewTCP | 一个 TCP 监听器,接收TCP连接 | +| listeners.NewUnixSock | 一个 Unix 套接字监听器 | +| listeners.NewNet | 一个 net.Listener 监听 | +| listeners.NewWebsocket | 一个 Websocket 监听器 | +| listeners.NewHTTPStats | 一个 HTTP $SYS 服务状态监听器 | +| listeners.NewHTTPHealthCheck | 一个 HTTP 健康检测监听器,用于为例如云基础设施提供健康检查响应 | + +> 可以使用listeners.Listener接口开发新的监听器。如果有兴趣,你可以实现自己的Listener,如果你在此期间你有更好的建议或疑问,你可以[提交问题](https://github.com/mochi-mqtt/server/issues)给我们。 + +可以在*listeners.Config 中配置TLS,传递给Listener使其支持TLS。 +我们提供了一些示例,可以在 [示例](examples) 文件夹或 [cmd/main.go](cmd/main.go) 中找到。 + +### 服务端选项和功能(Server Options and Capabilities) + +有许多可配置的选项(Options)可用于更改服务器的行为或限制对某些功能的访问。 +```go +server := mqtt.New(&mqtt.Options{ + Capabilities: mqtt.Capabilities{ + MaximumSessionExpiryInterval: 3600, + Compatibilities: mqtt.Compatibilities{ + ObscureNotAuthorized: true, + }, + }, + ClientNetWriteBufferSize: 4096, + ClientNetReadBufferSize: 4096, + SysTopicResendInterval: 10, + InlineClient: false, +}) +``` +请参考 mqtt.Options、mqtt.Capabilities 和 mqtt.Compatibilities 结构体,以查看完整的所有服务端选项。ClientNetWriteBufferSize 和 ClientNetReadBufferSize 可以根据你的需求配置调整每个客户端的内存使用状况。 + +### 默认配置说明(Default Configuration Notes) + +关于决定默认配置的值,在这里进行一些说明: + +- 默认情况下,server.Options.Capabilities.MaximumMessageExpiryInterval 的值被设置为 86400(24小时),以防止在使用默认配置时网络上暴露服务器而受到恶意DOS攻击(如果不配置到期时间将允许无限数量的保留retained/待发送inflight消息累积)。如果您在一个受信任的环境中运行,或者您有更大的保留期容量,您可以选择覆盖此设置(设置为0 以取消到期限制)。 + +## 事件钩子(Event Hooks) + +服务端有一个通用的事件钩子(Event Hooks)系统,它允许开发人员在服务器和客户端生命周期的各个阶段定制添加和修改服务端的功能。这些通用Hook钩子用于提供从认证(authentication)、持久性存储(persistent storage)到调试工具(debugging tools)等各种功能。 + +钩子(Hook)是可叠加的 - 你可以向服务器添加多个钩子(Hook),它们将按添加的顺序运行。一些钩子(Hook)修改值,这些修改后的值将在所有钩子返回之前传递给后续的钩子(Hook)。 + +| 类型 | 导入包 | 描述 | +|----------------|--------------------------------------------------------------------------|----------------------------------------------------------------------------| +| 访问控制 | [mochi-mqtt/server/hooks/auth . AllowHook](hooks/auth/allow_all.go) | AllowHook 允许所有客户端连接访问并读写所有主题。 | +| 访问控制 | [mochi-mqtt/server/hooks/auth . Auth](hooks/auth/auth.go) | 基于规则的访问权限控制。 | +| 数据持久性 | [mochi-mqtt/server/hooks/storage/bolt](hooks/storage/bolt/bolt.go) | 使用 [BoltDB](https://dbdb.io/db/boltdb) 进行持久性存储(已弃用)。 | +| 数据持久性 | [mochi-mqtt/server/hooks/storage/badger](hooks/storage/badger/badger.go) | 使用 [BadgerDB](https://github.com/dgraph-io/badger) 进行持久性存储。 | +| 数据持久性 | [mochi-mqtt/server/hooks/storage/redis](hooks/storage/redis/redis.go) | 使用 [Redis](https://redis.io) 进行持久性存储。 | +| 调试跟踪 | [mochi-mqtt/server/hooks/debug](hooks/debug/debug.go) | 调试输出以查看数据包在服务端的链路追踪。 | + +许多内部函数都已开放给开发者,你可以参考上述示例创建自己的Hook钩子。如果你有更好的关于Hook钩子方面的建议或者疑问,你可以[提交问题](https://github.com/mochi-mqtt/server/issues)给我们。 + +### 访问控制(Access Control) + +#### 允许所有(Allow Hook) + +默认情况下,Mochi MQTT 使用拒绝所有(DENY-ALL)的访问控制规则。要允许连接,必须实现一个访问控制的钩子(Hook)来替代默认的(DENY-ALL)钩子。其中最简单的钩子(Hook)是 auth.AllowAll 钩子(Hook),它为所有连接、订阅和发布提供允许所有(ALLOW-ALL)的规则。这也是使用最简单的钩子: + +```go +server := mqtt.New(nil) +_ = server.AddHook(new(auth.AllowHook), nil) +``` + +>如果你将服务器暴露在互联网或不受信任的网络上,请不要这样做 - 它真的应该仅用于开发、测试和调试。 + +#### 权限认证(Auth Ledger) + +权限认证钩子(Auth Ledger hook)使用结构化的定义来制定访问规则。认证规则分为两种形式:身份规则(连接时使用)和 ACL权限规则(发布订阅时使用)。 + +身份规则(Auth rules)有四个可选参数和一个是否允许参数: + +| 参数 | 说明 | +| -- | -- | +| Client | 客户端的客户端 ID | +| Username | 客户端的用户名 | +| Password | 客户端的密码 | +| Remote | 客户端的远程地址或 IP | +| Allow | true(允许此用户)或 false(拒绝此用户) | + +ACL权限规则(ACL rules)有三个可选参数和一个主题匹配参数: +| 参数 | 说明 | +| -- | -- | +| Client | 客户端的客户端 ID | +| Username | 客户端的用户名 | +| Remote | 客户端的远程地址或 IP | +| Filters | 用于匹配的主题数组 | + +规则按索引顺序(0,1,2,3)处理,并在匹配到第一个规则时返回。请查看 [hooks/auth/ledger.go](hooks/auth/ledger.go) 的具体实现。 + +```go +server := mqtt.New(nil) +err := server.AddHook(new(auth.Hook), &auth.Options{ + Ledger: &auth.Ledger{ + Auth: auth.AuthRules{ // Auth 默认情况下禁止所有连接 + {Username: "peach", Password: "password1", Allow: true}, + {Username: "melon", Password: "password2", Allow: true}, + {Remote: "127.0.0.1:*", Allow: true}, + {Remote: "localhost:*", Allow: true}, + }, + ACL: auth.ACLRules{ // ACL 默认情况下允许所有连接 + {Remote: "127.0.0.1:*"}, // 本地用户允许所有连接 + { + // 用户 melon 可以读取和写入自己的主题 + Username: "melon", Filters: auth.Filters{ + "melon/#": auth.ReadWrite, + "updates/#": auth.WriteOnly, // 可以写入 updates,但不能从其他人那里读取 updates + }, + }, + { + // 其他的客户端没有发布的权限 + Filters: auth.Filters{ + "#": auth.ReadOnly, + "updates/#": auth.Deny, + }, + }, + }, + } +}) +``` + +规则还可以存储为 JSON 或 YAML,并使用 Data 字段加载文件的二进制数据: + +```go +err := server.AddHook(new(auth.Hook), &auth.Options{ + Data: data, // 从字节数组(文件二进制)读取规则:yaml 或 json +}) +``` +详细信息请参阅 [examples/auth/encoded/main.go](examples/auth/encoded/main.go)。 + +### 持久化存储(Persistent Storage) + +#### Redis + +我们提供了一个基本的 Redis 存储钩子(Hook),用于为服务端提供数据持久性。你可以将这个Redis的钩子(Hook)添加到服务器中,Redis的一些参数也是可以配置的。这个钩子(Hook)里使用 github.com/go-redis/redis/v8 这个库,可以通过 Options 来配置一些参数。 + +```go +err := server.AddHook(new(redis.Hook), &redis.Options{ + Options: &rv8.Options{ + Addr: "localhost:6379", // Redis服务端地址 + Password: "", // Redis服务端的密码 + DB: 0, // Redis数据库的index + }, +}) +if err != nil { + log.Fatal(err) +} +``` +有关 Redis 钩子的工作原理或如何使用它的更多信息,请参阅 [examples/persistence/redis/main.go](examples/persistence/redis/main.go) 或 [hooks/storage/redis](hooks/storage/redis) 。 + +#### Badger DB + +如果您更喜欢基于文件的存储,还有一个 BadgerDB 存储钩子(Hook)可用。它可以以与其他钩子大致相同的方式添加和配置(具有较少的选项)。 + +```go +err := server.AddHook(new(badger.Hook), &badger.Options{ + Path: badgerPath, +}) +if err != nil { + log.Fatal(err) +} +``` + +有关 Badger 钩子(Hook)的工作原理或如何使用它的更多信息,请参阅 [examples/persistence/badger/main.go](examples/persistence/badger/main.go) 或 [hooks/storage/badger](hooks/storage/badger)。 + +还有一个 BoltDB 钩子(Hook),已被弃用,推荐使用 Badger,但如果你想使用它,请参考 [examples/persistence/bolt/main.go](examples/persistence/bolt/main.go)。 + +## 使用事件钩子 Event Hooks 进行开发 + +在服务端和客户端生命周期中,开发者可以使用各种钩子(Hook)增加对服务端或客户端的一些自定义的处理。 +所有的钩子都定义在mqtt.Hook这个接口中了,可以在 [hooks.go](hooks.go) 中找到这些钩子(Hook)函数。 + +> 最灵活的事件钩子是 OnPacketRead、OnPacketEncode 和 OnPacketSent - 这些钩子可以用来控制和修改所有传入和传出的数据包。 + +| 钩子函数 | 说明 | +|------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| OnStarted | 在服务器成功启动时调 |用。 | +| OnStopped | 在服务器成功停止时调用。 | +| OnConnectAuthenticate | 当用户尝试与服务器进行身份验证时调用。必须实现此方法来允许或拒绝对服务器的访问(请参阅 hooks/auth/allow_all 或 basic)。它可以在自定义Hook钩子中使用,以检查连接的用户是否与现有用户数据库中的用户匹配。如果允许访问,则返回 true。 | +| OnACLCheck | 当用户尝试发布或订阅主题时调用,用来检测ACL规则。 | +| OnSysInfoTick | 当 $SYS 主题相关的消息被发布时调用。 | +| OnConnect | 当新客户端连接时调用,可能返回一个错误或错误码以中断客户端的连接。 | +| OnSessionEstablish | 在新客户端连接并进行身份验证后,会立即调用此方法,并在会话建立和发送CONNACK之前立即调用。 | +| OnSessionEstablished | 在新客户端成功建立会话(在OnConnect之后)时调用。 | +| OnDisconnect | 当客户端因任何原因断开连接时调用。 | +| OnAuthPacket | 当接收到认证数据包时调用。它旨在允许开发人员创建自己的 MQTT v5 认证数据包处理机制。在这里允许数据包的修改。 | +| OnPacketRead | 当从客户端接收到数据包时调用。允许对数据包进行修改。 | +| OnPacketEncode | 在数据包被编码并发送给客户端之前立即调用。允许修改数据包。 | +| OnPacketSent | 在数据包已发送给客户端后调用。 | +| OnPacketProcessed | 在数据包已接收并成功由服务端处理后调用。 | +| OnSubscribe | 当客户端订阅一个或多个主题时调用。允许修改数据包。 | +| OnSubscribed | 当客户端成功订阅一个或多个主题时调用。 | +| OnSelectSubscribers | 当订阅者已被关联到一个主题中,在选择共享订阅的订阅者之前调用。允许接收者修改。 | +| OnUnsubscribe | 当客户端取消订阅一个或多个主题时调用。允许包修改。 | +| OnUnsubscribed | 当客户端成功取消订阅一个或多个主题时调用。 | +| OnPublish | 当客户端发布消息时调用。允许修改数据包。 | +| OnPublished | 当客户端向订阅者发布消息后调用。| +| OnPublishDropped | 消息传递给客户端之前消息已被丢弃,将调用此方法。 例如当客户端响应时间过长需要丢弃消息时。 | +| OnRetainMessage | 当消息被保留时调用。 | +| OnRetainPublished | 当保留的消息被发布给客户端时调用。 | +| OnQosPublish | 当发出QoS >= 1 的消息给订阅者后调用。 | +| OnQosComplete | 在消息的QoS流程走完之后调用。 | +| OnQosDropped | 在消息的QoS流程未完成,同时消息到期时调用。 | +| OnPacketIDExhausted | 当packet ids已经用完后,没有可用的id可再分配时调用。 | +| OnWill | 当客户端断开连接并打算发布遗嘱消息时调用。允许修改数据包。 | +| OnWillSent | 遗嘱消息发送完成后被调用。 | +| OnClientExpired | 在客户端会话已过期并应删除时调用。 | +| OnRetainedExpired | 在保留的消息已过期并应删除时调用。| | +| StoredClients | 这个接口需要返回客户端列表,例如从持久化数据库中获取客户端列表。 | +| StoredSubscriptions | 返回客户端的所有订阅,例如从持久化数据库中获取客户端的订阅列表。 | +| StoredInflightMessages | 返回待发送消息(inflight messages),例如从持久化数据库中获取到还有哪些消息未完成传输。 | +| StoredRetainedMessages | 返回保留的消息,例如从持久化数据库获取保留的消息。 | +| StoredSysInfo | 返回存储的系统状态信息,例如从持久化数据库获取的系统状态信息。 | + +如果你想自己实现一个持久化存储的Hook钩子,请参考现有的持久存储Hook钩子以获取灵感和借鉴。如果您正在构建一个身份验证Hook钩子,您将需要实现OnACLCheck 和 OnConnectAuthenticate这两个函数接口。 + +### 内联客户端 (Inline Client v2.4.0+支持) + +现在可以通过使用内联客户端功能直接在服务端上订阅主题和发布消息。内联客户端是内置在服务端中的特殊的客户端,可以在服务端的配置中启用: + +```go +server := mqtt.New(&mqtt.Options{ + InlineClient: true, +}) +``` +启用上述配置后,你将能够使用 server.Publish、server.Subscribe 和 server.Unsubscribe 方法来在服务端中直接发布和接收消息。 + +具体如何使用请参考 [direct examples](examples/direct/main.go) 。 + +#### 内联发布(Inline Publish) +要想在服务端中直接发布Publish一个消息,可以使用 `server.Publish`方法。 + +```go +err := server.Publish("direct/publish", []byte("packet scheduled message"), false, 0) +``` +> 在这种情况下,QoS级别只对订阅者有效,按照 MQTT v5 规范。 + +#### 内联订阅(Inline Subscribe) +要想在服务端中直接订阅一个主题,可以使用 `server.Subscribe`方法并提供一个处理订阅消息的回调函数。内联订阅的 QoS默认都是0。如果您希望对相同的主题有多个回调,可以使用 MQTTv5 的 subscriptionId 属性进行区分。 + +```go +callbackFn := func(cl *mqtt.Client, sub packets.Subscription, pk packets.Packet) { + server.Log.Info("inline client received message from subscription", "client", cl.ID, "subscriptionId", sub.Identifier, "topic", pk.TopicName, "payload", string(pk.Payload)) +} +server.Subscribe("direct/#", 1, callbackFn) +``` + +#### 取消内联订阅(Inline Unsubscribe) +如果您使用内联客户端订阅了某个主题,如果需要取消订阅。您可以使用 `server.Unsubscribe` 方法取消内联订阅: + +```go +server.Unsubscribe("direct/#", 1) +``` + +### 注入数据包(Packet Injection) + +如果你想要更多的服务端控制,或者想要设置特定的MQTT v5属性或其他属性,你可以选择指定的客户端创建自己的发布包(publish packets)。这种方法允许你将MQTT数据包(packets)直接注入到运行中的服务端,相当于服务端直接自己模拟接收到了某个客户端的数据包。 + +数据包注入(Packet Injection)可用于任何MQTT数据包,包括ping请求、订阅等。你可以获取客户端的详细信息,因此你甚至可以直接在服务端模拟某个在线的客户端,发布一个数据包。 + +大多数情况下,您可能希望使用上面描述的内联客户端(Inline Client),因为它具有独特的特权:它可以绕过所有ACL和主题验证检查,这意味着它甚至可以发布到$SYS主题。你也可以自己从头开始制定一个自己的内联客户端,它将与内置的内联客户端行为相同。 + +```go +cl := server.NewClient(nil, "local", "inline", true) +server.InjectPacket(cl, packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Publish, + }, + TopicName: "direct/publish", + Payload: []byte("scheduled message"), +}) +``` + +> MQTT数据包仍然需要满足规范的结构,所以请参考[测试用例中数据包的定义](packets/tpackets.go) 和 [MQTTv5规范](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html) 以获取一些帮助。 + +具体如何使用请参考 [hooks example](examples/hooks/main.go) 。 + + +### 测试(Testing) +#### 单元测试(Unit Tests) + +Mochi MQTT 使用精心编写的单元测试,测试了一千多种场景,以确保每个函数都表现出我们期望的行为。您可以使用以下命令运行测试: +``` +go run --cover ./... +``` + +#### Paho 互操作性测试(Paho Interoperability Test) + +您可以使用 `examples/paho/main.go` 启动服务器,然后在 _interoperability_ 文件夹中运行 `python3 client_test5.py` 来检查代理是否符合 [Paho互操作性测试](https://github.com/eclipse/paho.mqtt.testing/tree/master/interoperability) 的要求,包括 MQTT v5 和 v3 的测试。 + +> 请注意,关于 paho 测试套件存在一些尚未解决的问题,因此在 `paho/main.go` 示例中启用了某些兼容性模式。 + + +## 基准测试(Performance Benchmarks) + +Mochi MQTT 的性能与其他的一些主流的mqtt中间件(如 Mosquitto、EMQX 等)不相上下。 + +基准测试是使用 [MQTT-Stresser](https://github.com/inovex/mqtt-stresser) 在 Apple Macbook Air M2 上进行的,使用 `cmd/main.go` 默认设置。考虑到高低吞吐量的突发情况,中位数分数是最有用的。数值越高越好。 + + +> 基准测试中呈现的数值不代表真实每秒消息吞吐量。它们依赖于 mqtt-stresser 的一种不寻常的计算方法,但它们在所有代理之间是一致的。性能基准测试的结果仅供参考。这些比较都是使用默认配置进行的。 + +`mqtt-stresser -broker tcp://localhost:1883 -num-clients=2 -num-messages=10000` +| Broker | publish fastest | median | slowest | receive fastest | median | slowest | +| -- | -- | -- | -- | -- | -- | -- | +| Mochi v2.2.10 | 124,772 | 125,456 | 124,614 | 314,461 | 313,186 | 311,910 | +| [Mosquitto v2.0.15](https://github.com/eclipse/mosquitto) | 155,920 | 155,919 | 155,918 | 185,485 | 185,097 | 184,709 | +| [EMQX v5.0.11](https://github.com/emqx/emqx) | 156,945 | 156,257 | 155,568 | 17,918 | 17,783 | 17,649 | +| [Rumqtt v0.21.0](https://github.com/bytebeamio/rumqtt) | 112,208 | 108,480 | 104,753 | 135,784 | 126,446 | 117,108 | + +`mqtt-stresser -broker tcp://localhost:1883 -num-clients=10 -num-messages=10000` +| Broker | publish fastest | median | slowest | receive fastest | median | slowest | +| -- | -- | -- | -- | -- | -- | -- | +| Mochi v2.2.10 | 41,825 | 31,663| 23,008 | 144,058 | 65,903 | 37,618 | +| Mosquitto v2.0.15 | 42,729 | 38,633 | 29,879 | 23,241 | 19,714 | 18,806 | +| EMQX v5.0.11 | 21,553 | 17,418 | 14,356 | 4,257 | 3,980 | 3,756 | +| Rumqtt v0.21.0 | 42,213 | 23,153 | 20,814 | 49,465 | 36,626 | 19,283 | + +百万消息挑战(立即向服务器发送100万条消息): + +`mqtt-stresser -broker tcp://localhost:1883 -num-clients=100 -num-messages=10000` +| Broker | publish fastest | median | slowest | receive fastest | median | slowest | +| -- | -- | -- | -- | -- | -- | -- | +| Mochi v2.2.10 | 13,532 | 4,425 | 2,344 | 52,120 | 7,274 | 2,701 | +| Mosquitto v2.0.15 | 3,826 | 3,395 | 3,032 | 1,200 | 1,150 | 1,118 | +| EMQX v5.0.11 | 4,086 | 2,432 | 2,274 | 434 | 333 | 311 | +| Rumqtt v0.21.0 | 78,972 | 5,047 | 3,804 | 4,286 | 3,249 | 2,027 | + +> 这里还不确定EMQX是不是哪里出了问题,可能是因为 Docker 的默认配置优化不对,所以要持保留意见,因为我们确实知道它是一款可靠的软件。 + +## 贡献指南(Contribution Guidelines) + +我们欢迎代码贡献和反馈!如果你发现了漏洞(bug)或者有任何疑问,又或者是有新的需求,请[提交给我们](https://github.com/mochi-mqtt/server/issues)。如果您提交了一个PR(pull request)请求,请尽量遵循以下准则: + +- 在合理的情况下,尽量保持测试覆盖率。 +- 清晰地说明PR(pull request)请求的作用和原因。 +- 请不要忘记在你贡献的文件中添加 SPDX FileContributor 标签。 + +[SPDX 注释] (https://spdx.dev) 用于智能的识别每个文件的许可证、版权和贡献。如果您正在向本仓库添加一个新文件,请确保它具有以下 SPDX 头部: +```go +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2023 mochi-mqtt +// SPDX-FileContributor: Your name or alias + +package name +``` + +请确保为文件的每位贡献者添加一个新的SPDX-FileContributor 行。可以参考其他文件的示例。请务必记得这样做,你对这个项目的贡献是有价值且受到赞赏的 - 获得认可非常重要! + +## 给我们星星的人数(Stargazers over time) 🥰 +[![Stargazers over time](https://starchart.cc/mochi-mqtt/server.svg)](https://starchart.cc/mochi-mqtt/server) + +您是否在项目中使用 Mochi MQTT?[请告诉我们!](https://github.com/mochi-mqtt/server/issues) + diff --git a/vendor/github.com/mochi-mqtt/server/v2/README-JP.md b/vendor/github.com/mochi-mqtt/server/v2/README-JP.md new file mode 100644 index 000000000..3cb9d8689 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/README-JP.md @@ -0,0 +1,494 @@ +# Mochi-MQTT Server + +

+ +![build status](https://github.com/mochi-mqtt/server/actions/workflows/build.yml/badge.svg) +[![Coverage Status](https://coveralls.io/repos/github/mochi-mqtt/server/badge.svg?branch=master&v2)](https://coveralls.io/github/mochi-mqtt/server?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/mochi-mqtt/server)](https://goreportcard.com/report/github.com/mochi-mqtt/server/v2) +[![Go Reference](https://pkg.go.dev/badge/github.com/mochi-mqtt/server.svg)](https://pkg.go.dev/github.com/mochi-mqtt/server/v2) +[![contributions welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/mochi-mqtt/server/issues) + +

+ +[English](README.md) | [简体中文](README-CN.md) | [日本語](README-JP.md) | [Translators Wanted!](https://github.com/orgs/mochi-mqtt/discussions/310) + +🎆 **mochi-co/mqtt は新しい mochi-mqtt organisation の一部です.** [このページをお読みください](https://github.com/orgs/mochi-mqtt/discussions/271) + + +### Mochi-MQTTは MQTT v5 (と v3.1.1)に完全に準拠しているアプリケーションに組み込み可能なハイパフォーマンスなbroker/serverです. + +Mochi MQTT は Goで書かれたMQTT v5に完全に[準拠](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html)しているMQTTブローカーで、IoTプロジェクトやテレメトリの開発プロジェクト向けに設計されています。 スタンドアロンのバイナリで使ったり、アプリケーションにライブラリとして組み込むことができ、プロジェクトのメンテナンス性と品質を確保できるように配慮しながら、 軽量で可能な限り速く動作するように設計されています。 + +#### MQTTとは? +MQTT は [MQ Telemetry Transport](https://en.wikipedia.org/wiki/MQTT)を意味します。 Pub/Sub型のシンプルで軽量なメッセージプロトコルで、低帯域、高遅延、不安定なネットワーク下での制約を考慮して設計されています([MQTTについて詳しくはこちら](https://mqtt.org/faq))。 Mochi MQTTはMQTTプロトコルv5.0.0に完全準拠した実装をしています。 + +#### Mochi-MQTTのもつ機能 + +- MQTTv5への完全な準拠とMQTT v3.1.1 および v3.0.0 との互換性: + - MQTT v5で拡張されたユーザープロパティ + - トピック・エイリアス + - 共有サブスクリプション + - サブスクリプションオプションとサブスクリプションID + - メッセージの有効期限 + - クライアントセッション + - 送受信QoSフロー制御クォータ + - サーバサイド切断と認証パケット + - Will遅延間隔 + - 上記に加えてQoS(0,1,2)、$SYSトピック、retain機能などすべてのMQTT v1の特徴を持ちます +- Developer-centric: + - 開発者が制御できるように、ほとんどのコアブローカーのコードをエクスポートにしてアクセスできるようにしました。 + - フル機能で柔軟なフックベースのインターフェイスにすることで簡単に'プラグイン'を開発できるようにしました。 + - 特別なインラインクライアントを利用することでパケットインジェクションを行うか、既存のクライアントとしてマスカレードすることができます。 +- パフォーマンスと安定性: + - 古典的なツリーベースのトピックサブスクリプションモデル + - クライアント固有に書き込みバッファーをもたせることにより、読み込みの遅さや不規則なクライアントの挙動の問題を回避しています。 + - MQTT v5 and MQTT v3のすべての[Paho互換性テスト](https://github.com/eclipse/paho.mqtt.testing/tree/master/interoperability)をpassしています。 + - 慎重に検討された多くのユニットテストシナリオでテストされています。 +- TCP, Websocket (SSL/TLSを含む), $SYSのダッシュボードリスナー +- フックを利用した保存機能としてRedis, Badger, Boltを使うことができます(自作のHookも可能です)。 +- フックを利用したルールベース認証機能とアクセス制御リストLedgerを使うことができます(自作のHookも可能です)。 + +### 互換性に関する注意事項 +MQTTv5とそれ以前との互換性から、サーバーはv5とv3両方のクライアントを受け入れることができますが、v5とv3のクライアントが接続された場合はv5でクライアント向けの特徴と機能はv3クライアントにダウングレードされます(ユーザープロパティなど)。 +MQTT v3.0.0 と v3.1.1 のサポートはハイブリッド互換性があるとみなされます。それはv3と仕様に制限されていない場合、例えば、送信メッセージ、保持メッセージの有効期限とQoSフロー制御制限などについては、よりモダンで安全なv5の動作が使用されます + +#### リリースされる時期について +クリティカルなイシュー出ない限り、新しいリリースがされるのは週末です。 + +## Roadmap +- 新しい特徴やイベントフックのリクエストは [open an issue](https://github.com/mochi-mqtt/server/issues) へ! +- クラスターのサポート +- メトリックスサポートの強化 +- ファイルベースの設定(Dockerイメージのサポート) + +## Quick Start +### GoでのBrokerの動かし方 +Mochi MQTTはスタンドアロンのブローカーとして使うことができます。単純にこのレポジトリーをチェックアウトして、[cmd/main.go](cmd/main.go) を起動すると内部の [cmd](cmd) フォルダのエントリポイントにしてtcp (:1883), websocket (:1882), dashboard (:8080)のポートを外部にEXPOSEします。 + +``` +cd cmd +go build -o mqtt && ./mqtt +``` + +### Dockerで利用する +Dockerレポジトリの [official Mochi MQTT image](https://hub.docker.com/r/mochimqtt/server) から Pullして起動することができます。 + +```sh +docker pull mochimqtt/server +or +docker run mochimqtt/server +``` + +これは実装途中です。[file-based configuration](https://github.com/orgs/mochi-mqtt/projects/2) は、この実装をよりよくサポートするために開発中です。 +より実質的なdockerのサポートが議論されています。_Docker環境で使っている方は是非この議論に参加してください。_ [ここ](https://github.com/orgs/mochi-mqtt/discussions/281#discussion-5544545) や [ここ](https://github.com/orgs/mochi-mqtt/discussions/209)。 + +[cmd/main.go](cmd/main.go)の Websocket, TCP, Statsサーバを実行するために、シンプルなDockerfileが提供されます。 + + +```sh +docker build -t mochi:latest . +docker run -p 1883:1883 -p 1882:1882 -p 8080:8080 mochi:latest +``` + +## Mochi MQTTを使って開発するには +### パッケージをインポート +Mochi MQTTをパッケージとしてインポートするにはほんの数行のコードで始めることができます。 +``` go +import ( + "log" + + mqtt "github.com/mochi-mqtt/server/v2" + "github.com/mochi-mqtt/server/v2/hooks/auth" + "github.com/mochi-mqtt/server/v2/listeners" +) + +func main() { + // Create signals channel to run server until interrupted + sigs := make(chan os.Signal, 1) + done := make(chan bool, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-sigs + done <- true + }() + + // Create the new MQTT Server. + server := mqtt.New(nil) + + // Allow all connections. + _ = server.AddHook(new(auth.AllowHook), nil) + + // Create a TCP listener on a standard port. + tcp := listeners.NewTCP("t1", ":1883", nil) + err := server.AddListener(tcp) + if err != nil { + log.Fatal(err) + } + + + go func() { + err := server.Serve() + if err != nil { + log.Fatal(err) + } + }() + + // Run server until interrupted + <-done + + // Cleanup +} +``` + +ブローカーの動作例は [examples](examples)フォルダにあります。 + +#### Network Listeners +サーバは様々なプロトコルのコネクションのリスナーに対応しています。現在の対応リスナーは、 + +| Listener | Usage | +|------------------------------|----------------------------------------------------------------------------------------------| +| listeners.NewTCP | TCPリスナー | +| listeners.NewUnixSock | Unixソケットリスナー | +| listeners.NewNet | net.Listenerリスナー | +| listeners.NewWebsocket | Websocketリスナー | +| listeners.NewHTTPStats | HTTP $SYSダッシュボード | +| listeners.NewHTTPHealthCheck | ヘルスチェック応答を提供するためのHTTPヘルスチェックリスナー(クラウドインフラ) | + +> 新しいリスナーを開発するためには `listeners.Listener` を使ってください。使ったら是非教えてください! + +TLSを設定するには`*listeners.Config`を渡すことができます。 + +[examples](examples) フォルダと [cmd/main.go](cmd/main.go)に使用例があります。 + + +## 設定できるオプションと機能 +たくさんのオプションが利用可能です。サーバーの動作を変更したり、特定の機能へのアクセスを制限することができます。 + +```go +server := mqtt.New(&mqtt.Options{ + Capabilities: mqtt.Capabilities{ + MaximumSessionExpiryInterval: 3600, + Compatibilities: mqtt.Compatibilities{ + ObscureNotAuthorized: true, + }, + }, + ClientNetWriteBufferSize: 4096, + ClientNetReadBufferSize: 4096, + SysTopicResendInterval: 10, + InlineClient: false, +}) +``` + +mqtt.Options、mqtt.Capabilities、mqtt.Compatibilitiesの構造体はオプションの理解に役立ちます。 +必要に応じて`ClientNetWriteBufferSize`と`ClientNetReadBufferSize`はクライアントの使用するメモリに合わせて設定できます。 + +### デフォルト設定に関する注意事項 + +いくつかのデフォルトの設定を決める際にいくつかの決定がなされましたのでここに記しておきます: +- デフォルトとして、敵対的なネットワーク上のDoSアタックにさらされるのを防ぐために `server.Options.Capabilities.MaximumMessageExpiryInterval`は86400 (24時間)に、とセットされています。有効期限を無限にすると、保持、送信メッセージが無限に蓄積されるからです。もし信頼できる環境であったり、より大きな保存期間が可能であれば、この設定はオーバーライドできます(`0` を設定すると有効期限はなくなります。) + +## Event Hooks +ユニバーサルイベントフックシステムは、開発者にサーバとクライアントの様々なライフサイクルをフックすることができ、ブローカーの機能を追加/変更することができます。それらのユニバーサルフックは認証、永続ストレージ、デバッグツールなど、あらゆるものに使用されています。 +フックは複数重ねることができ、サーバに複数のフックを設定することができます。それらは追加した順番に動作します。いくつかのフックは値を変えて、その値は動作コードに返される前にあとに続くフックに渡されます。 + + +| Type | Import | Info | +|----------------|--------------------------------------------------------------------------|----------------------------------------------------------------------------| +| Access Control | [mochi-mqtt/server/hooks/auth . AllowHook](hooks/auth/allow_all.go) | すべてのトピックに対しての読み書きをすべてのクライアントに対して許可します。 | +| Access Control | [mochi-mqtt/server/hooks/auth . Auth](hooks/auth/auth.go) | ルールベースのアクセスコントロール台帳です。 | +| Persistence | [mochi-mqtt/server/hooks/storage/bolt](hooks/storage/bolt/bolt.go) | [BoltDB](https://dbdb.io/db/boltdb) を使った永続ストレージ (非推奨). | +| Persistence | [mochi-mqtt/server/hooks/storage/badger](hooks/storage/badger/badger.go) | [BadgerDB](https://github.com/dgraph-io/badger)を使った永続ストレージ | +| Persistence | [mochi-mqtt/server/hooks/storage/redis](hooks/storage/redis/redis.go) | [Redis](https://redis.io)を使った永続ストレージ | +| Debugging | [mochi-mqtt/server/hooks/debug](hooks/debug/debug.go) | パケットフローを可視化するデバッグ用のフック | + +たくさんの内部関数が開発者に公開されています、なので、上記の例を使って自分でフックを作ることができます。もし作ったら是非[Open an issue](https://github.com/mochi-mqtt/server/issues)に投稿して教えてください! + +### アクセスコントロール +#### Allow Hook +デフォルトで、Mochi MQTTはアクセスコントロールルールにDENY-ALLを使用しています。コネクションを許可するためには、アクセスコントロールフックを上書きする必要があります。一番単純なのは`auth.AllowAll`フックで、ALLOW-ALLルールがすべてのコネクション、サブスクリプション、パブリッシュに適用されます。使い方は下記のようにするだけです: + +```go +server := mqtt.New(nil) +_ = server.AddHook(new(auth.AllowHook), nil) +``` + +> もしインターネットや信頼できないネットワークにさらされる場合は行わないでください。これは開発・テスト・デバッグ用途のみであるべきです。 + +#### Auth Ledger +Auth Ledgerは構造体で定義したアクセスルールの洗練された仕組みを提供します。Auth Ledgerルール2つの形式から成ります、認証ルール(コネクション)とACLルール(パブリッシュ、サブスクライブ)です。 + +認証ルールは4つのクライテリアとアサーションフラグがあります: +| Criteria | Usage | +| -- | -- | +| Client | 接続クライアントのID | +| Username | 接続クライアントのユーザー名 | +| Password | 接続クライアントのパスワード | +| Remote | クライアントのリモートアドレスもしくはIP | +| Allow | true(このユーザーを許可する)もしくはfalse(このユーザを拒否する) | + +アクセスコントロールルールは3つのクライテリアとフィルターマッチがあります: +| Criteria | Usage | +| -- | -- | +| Client | 接続クライアントのID | +| Username | 接続クライアントのユーザー名 | +| Remote | クライアントのリモートアドレスもしくはIP | +| Filters | 合致するフィルターの配列 | + +ルールはインデックス順(0,1,2,3)に処理され、はじめに合致したルールが適用されます。 [hooks/auth/ledger.go](hooks/auth/ledger.go) の構造体を見てください。 + + +```go +server := mqtt.New(nil) +err := server.AddHook(new(auth.Hook), &auth.Options{ + Ledger: &auth.Ledger{ + Auth: auth.AuthRules{ // Auth disallows all by default + {Username: "peach", Password: "password1", Allow: true}, + {Username: "melon", Password: "password2", Allow: true}, + {Remote: "127.0.0.1:*", Allow: true}, + {Remote: "localhost:*", Allow: true}, + }, + ACL: auth.ACLRules{ // ACL allows all by default + {Remote: "127.0.0.1:*"}, // local superuser allow all + { + // user melon can read and write to their own topic + Username: "melon", Filters: auth.Filters{ + "melon/#": auth.ReadWrite, + "updates/#": auth.WriteOnly, // can write to updates, but can't read updates from others + }, + }, + { + // Otherwise, no clients have publishing permissions + Filters: auth.Filters{ + "#": auth.ReadOnly, + "updates/#": auth.Deny, + }, + }, + }, + } +}) +``` + +ledgeはデータフィールドを使用してJSONもしくはYAML形式で保存したものを使用することもできます。 +```go +err := server.AddHook(new(auth.Hook), &auth.Options{ + Data: data, // build ledger from byte slice: yaml or json +}) +``` +より詳しくは[examples/auth/encoded/main.go](examples/auth/encoded/main.go)を見てください。 + +### 永続ストレージ +#### Redis +ブローカーに永続性を提供する基本的な Redis ストレージフックが利用可能です。他のフックと同じ方法で、いくつかのオプションを使用してサーバーに追加できます。それはフック内部で github.com/go-redis/redis/v8 を使用し、Optionsの値で詳しい設定を行うことができます。 +```go +err := server.AddHook(new(redis.Hook), &redis.Options{ + Options: &rv8.Options{ + Addr: "localhost:6379", // default redis address + Password: "", // your password + DB: 0, // your redis db + }, +}) +if err != nil { + log.Fatal(err) +} +``` +Redisフックがどのように動くか、どのように使用するかについての詳しくは、[examples/persistence/redis/main.go](examples/persistence/redis/main.go) か [hooks/storage/redis](hooks/storage/redis) のソースコードを見てください。 + +#### Badger DB +もしファイルベースのストレージのほうが適しているのであれば、BadgerDBストレージも使用することができます。それもまた、他のフックと同様に追加、設定することができます(オプションは若干少ないです)。 + +```go +err := server.AddHook(new(badger.Hook), &badger.Options{ + Path: badgerPath, +}) +if err != nil { + log.Fatal(err) +} +``` + +badgerフックがどのように動くか、どのように使用するかについての詳しくは、[examples/persistence/badger/main.go](examples/persistence/badger/main.go) か [hooks/storage/badger](hooks/storage/badger) のソースコードを見てください。 + +BoltDBフックはBadgerに代わって非推奨となりましたが、もし必要ならば [examples/persistence/bolt/main.go](examples/persistence/bolt/main.go)をチェックしてください。 + +## イベントフックを利用した開発 + +ブローカーとクライアントのライフサイクルに関わるたくさんのフックが利用できます。 +そのすべてのフックと`mqtt.Hook`インターフェイスの関数シグネチャは[hooks.go](hooks.go)に記載されています。 + +> もっと柔軟なイベントフックはOnPacketRead、OnPacketEncodeとOnPacketSentです。それらは、すべての流入パケットと流出パケットをコントロール及び変更に使用されるフックです。 + + +| Function | Usage | +|------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| OnStarted | サーバーが正常にスタートした際に呼ばれます。 | +| OnStopped | サーバーが正常に終了した際に呼ばれます。 | +| OnConnectAuthenticate | ユーザーがサーバと認証を試みた際に呼ばれます。このメソッドはサーバーへのアクセス許可もしくは拒否するためには必ず使用する必要があります(hooks/auth/allow_all or basicを見てください)。これは、データベースにユーザーが存在するか照合してチェックするカスタムフックに利用できます。許可する場合はtrueを返す実装をします。| +| OnACLCheck | ユーザーがあるトピックフィルタにpublishかsubscribeした際に呼ばれます。上と同様です | +| OnSysInfoTick | $SYSトピック値がpublishされた場合に呼ばれます。 | +| OnConnect | 新しいクライアントが接続した際によばれます、エラーかパケットコードを返して切断する場合があります。 | +| OnSessionEstablish | 新しいクライアントが接続された後すぐ、セッションが確立されてCONNACKが送信される前に呼ばれます。 | +| OnSessionEstablished | 新しいクライアントがセッションを確立した際(OnConnectの後)に呼ばれます。 | +| OnDisconnect | クライアントが何らかの理由で切断された場合に呼ばれます。 | +| OnAuthPacket | 認証パケットを受け取ったときに呼ばれます。これは開発者にmqtt v5の認証パケットを取り扱う仕組みを作成すること意図しています。パケットを変更することができます。 | +| OnPacketRead | クライアントからパケットを受け取った際に呼ばれます。パケットを変更することができます。 | +| OnPacketEncode | エンコードされたパケットがクライアントに送信する直前に呼ばれます。パケットを変更することができます。 | +| OnPacketSent | クライアントにパケットが送信された際に呼ばれます。 | +| OnPacketProcessed | パケットが届いてブローカーが正しく処理できた場合に呼ばれます。 | +| OnSubscribe | クライアントが1つ以上のフィルタをsubscribeした場合に呼ばれます。パケットの変更ができます。 | +| OnSubscribed | クライアントが1つ以上のフィルタをsubscribeに成功した場合に呼ばれます。 | +| OnSelectSubscribers | サブスクライバーがトピックに収集されたとき、共有サブスクライバーが選択される前に呼ばれる。受信者は変更可能。 | +| OnUnsubscribe | 1つ以上のあんサブスクライブが呼ばれた場合。パケットの変更は可能。 | +| OnUnsubscribed | クライアントが正常に1つ以上のトピックフィルタをサブスクライブ解除した場合。 | +| OnPublish | クライアントがメッセージをパブリッシュした場合。パケットの変更は可能。 | +| OnPublished | クライアントがサブスクライバーにメッセージをパブリッシュし終わった場合。 | +| OnPublishDropped | あるクライアントが反応に時間がかかった場合等のようにクライアントに到達する前にメッセージが失われた場合に呼ばれる。 | +| OnRetainMessage | パブリッシュされたメッセージが保持された場合に呼ばれる。 | +| OnRetainPublished | 保持されたメッセージがクライアントに到達した場合に呼ばれる。 | +| OnQosPublish | QoSが1以上のパケットがサブスクライバーに発行された場合。 | +| OnQosComplete | そのメッセージQoSフローが完了した場合に呼ばれる。 | +| OnQosDropped | インフライトメッセージが完了前に期限切れになった場合に呼ばれる。 | +| OnPacketIDExhausted | クライアントがパケットに割り当てるIDが枯渇した場合に呼ばれる。 | +| OnWill | クライアントが切断し、WILLメッセージを発行しようとした場合に呼ばれる。パケットの変更が可能。 | +| OnWillSent | LWTメッセージが切断されたクライアントから発行された場合に呼ばれる | +| OnClientExpired | クライアントセッションが期限切れで削除するべき場合に呼ばれる。 | +| OnRetainedExpired | 保持メッセージが期限切れで削除すべき場合に呼ばれる。 | +| StoredClients | クライアントを返す。例えば永続ストレージから。 | +| StoredSubscriptions | クライアントのサブスクリプションを返す。例えば永続ストレージから。 | +| StoredInflightMessages | インフライトメッセージを返す。例えば永続ストレージから。 | +| StoredRetainedMessages | 保持されたメッセージを返す。例えば永続ストレージから。 | +| StoredSysInfo | システム情報の値を返す。例えば永続ストレージから。 | + +もし永続ストレージフックを作成しようとしているのであれば、すでに存在する永続的なフックを見てインスピレーションとどのようなパターンがあるか見てみてください。もし認証フックを作成しようとしているのであれば、`OnACLCheck`と`OnConnectAuthenticate`が役立つでしょう。 + +### Inline Client (v2.4.0+) +トピックに対して埋め込まれたコードから直接サブスクライブとパブリッシュできます。そうするには`inline client`機能を使うことができます。インラインクライアント機能はサーバの一部として組み込まれているクライアントでサーバーのオプションとしてEnableにできます。 +```go +server := mqtt.New(&mqtt.Options{ + InlineClient: true, +}) +``` +Enableにすると、`server.Publish`, `server.Subscribe`, `server.Unsubscribe`のメソッドを利用できて、ブローカーから直接メッセージを送受信できます。 +> 実際の使用例は[direct examples](examples/direct/main.go)を見てください。 + +#### Inline Publish +組み込まれたアプリケーションからメッセージをパブリッシュするには`server.Publish(topic string, payload []byte, retain bool, qos byte) error`メソッドを利用します。 + +```go +err := server.Publish("direct/publish", []byte("packet scheduled message"), false, 0) +``` +> このケースでのQoSはサブスクライバーに設定できる上限でしか使用されません。これはMQTTv5の仕様に従っています。 + +#### Inline Subscribe +組み込まれたアプリケーション内部からトピックフィルタをサブスクライブするには、`server.Subscribe(filter string, subscriptionId int, handler InlineSubFn) error`メソッドがコールバックも含めて使用できます。 +インラインサブスクリプションではQoS0のみが適用されます。もし複数のコールバックを同じフィルタに設定したい場合は、MQTTv5の`subscriptionId`のプロパティがその区別に使用できます。 + +```go +callbackFn := func(cl *mqtt.Client, sub packets.Subscription, pk packets.Packet) { + server.Log.Info("inline client received message from subscription", "client", cl.ID, "subscriptionId", sub.Identifier, "topic", pk.TopicName, "payload", string(pk.Payload)) +} +server.Subscribe("direct/#", 1, callbackFn) +``` + +#### Inline Unsubscribe +インラインクライアントでサブスクリプション解除をしたい場合は、`server.Unsubscribe(filter string, subscriptionId int) error` メソッドで行うことができます。 + +```go +server.Unsubscribe("direct/#", 1) +``` + +### Packet Injection +もし、より制御したい場合や、特定のMQTTv5のプロパティやその他の値をセットしたい場合は、クライアントからのパブリッシュパケットを自ら作成することができます。この方法は単なるパブリッシュではなく、MQTTパケットをまるで特定のクライアントから受け取ったかのようにランタイムに直接インジェクションすることができます。 + +このパケットインジェクションは例えばPING ReqやサブスクリプションなどのどんなMQTTパケットでも使用できます。そしてクライアントの構造体とメソッドはエクスポートされているので、(もし、非常にカスタマイズ性の高い要求がある場合には)まるで接続されたクライアントに代わってパケットをインジェクションすることさえできます。 + +たいていの場合は上記のインラインクライアントを使用するのが良いでしょう、それはACLとトピックバリデーションをバイパスできる特権があるからです。これは$SYSトピックにさえパブリッシュできることも意味します。ビルトインのクライアントと同様に振る舞うインラインクライアントを作成できます。 + +```go +cl := server.NewClient(nil, "local", "inline", true) +server.InjectPacket(cl, packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Publish, + }, + TopicName: "direct/publish", + Payload: []byte("scheduled message"), +}) +``` + +> MQTTのパケットは正しく構成する必要があり、なので[the test packets catalogue](packets/tpackets.go)と[MQTTv5 Specification](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html)を参照してください。 + +この機能の動作を確認するには[hooks example](examples/hooks/main.go) を見てください。 + + +### Testing +#### ユニットテスト +それぞれの関数が期待通りの動作をするように考えられてMochi MQTTテストが作成されています。テストを走らせるには: +``` +go run --cover ./... +``` + +#### Paho相互運用性テスト +`examples/paho/main.go`を使用してブローカーを起動し、_interoperability_フォルダの`python3 client_test5.py`のmqttv5とv3のテストを実行することで、[Paho Interoperability Test](https://github.com/eclipse/paho.mqtt.testing/tree/master/interoperability)を確認することができます。 + +> pahoスイートには現在は何個かの偽陰性に関わるissueがあるので、`paho/main.go`の例ではいくつかの互換性モードがオンになっていることに注意してください。 + + + +## ベンチマーク +Mochi MQTTのパフォーマンスはMosquitto、EMQX、その他などの有名なブローカーに匹敵します。 + +ベンチマークはApple Macbook Air M2上で[MQTT-Stresser](https://github.com/inovex/mqtt-stresser)、セッティングとして`cmd/main.go`のデフォルト設定を使用しています。高スループットと低スループットのバーストを考慮すると、中央値のスコアが最も信頼できます。この値は高いほど良いです。 + +> ベンチマークの値は1秒あたりのメッセージ数のスループットのそのものを表しているわけではありません。これは、mqtt-stresserによる固有の計算に依存するものではありますが、すべてのブローカーに渡って一貫性のある値として利用しています。 +> ベンチマークは一般的なパフォーマンス予測ガイドラインとしてのみ提供されます。比較はそのまま使用したデフォルトの設定値で実行しています。 + +`mqtt-stresser -broker tcp://localhost:1883 -num-clients=2 -num-messages=10000` +| Broker | publish fastest | median | slowest | receive fastest | median | slowest | +| -- | -- | -- | -- | -- | -- | -- | +| Mochi v2.2.10 | 124,772 | 125,456 | 124,614 | 314,461 | 313,186 | 311,910 | +| [Mosquitto v2.0.15](https://github.com/eclipse/mosquitto) | 155,920 | 155,919 | 155,918 | 185,485 | 185,097 | 184,709 | +| [EMQX v5.0.11](https://github.com/emqx/emqx) | 156,945 | 156,257 | 155,568 | 17,918 | 17,783 | 17,649 | +| [Rumqtt v0.21.0](https://github.com/bytebeamio/rumqtt) | 112,208 | 108,480 | 104,753 | 135,784 | 126,446 | 117,108 | + +`mqtt-stresser -broker tcp://localhost:1883 -num-clients=10 -num-messages=10000` +| Broker | publish fastest | median | slowest | receive fastest | median | slowest | +| -- | -- | -- | -- | -- | -- | -- | +| Mochi v2.2.10 | 41,825 | 31,663| 23,008 | 144,058 | 65,903 | 37,618 | +| Mosquitto v2.0.15 | 42,729 | 38,633 | 29,879 | 23,241 | 19,714 | 18,806 | +| EMQX v5.0.11 | 21,553 | 17,418 | 14,356 | 4,257 | 3,980 | 3,756 | +| Rumqtt v0.21.0 | 42,213 | 23,153 | 20,814 | 49,465 | 36,626 | 19,283 | + +100万メッセージ試験 (100 万メッセージを一斉にサーバーに送信します): + +`mqtt-stresser -broker tcp://localhost:1883 -num-clients=100 -num-messages=10000` +| Broker | publish fastest | median | slowest | receive fastest | median | slowest | +| -- | -- | -- | -- | -- | -- | -- | +| Mochi v2.2.10 | 13,532 | 4,425 | 2,344 | 52,120 | 7,274 | 2,701 | +| Mosquitto v2.0.15 | 3,826 | 3,395 | 3,032 | 1,200 | 1,150 | 1,118 | +| EMQX v5.0.11 | 4,086 | 2,432 | 2,274 | 434 | 333 | 311 | +| Rumqtt v0.21.0 | 78,972 | 5,047 | 3,804 | 4,286 | 3,249 | 2,027 | + +> EMQXのここでの結果は何が起きているのかわかりませんが、おそらくDockerのそのままの設定が最適ではなかったのでしょう、なので、この結果はソフトウェアのひとつの側面にしか過ぎないと捉えてください。 + + +## Contribution Guidelines +コントリビューションとフィードバックは両方とも歓迎しています![Open an issue](https://github.com/mochi-mqtt/server/issues)でバグを報告したり、質問したり、新機能のリクエストをしてください。もしプルリクエストするならば下記のガイドラインに従うようにしてください。 +- 合理的で可能な限りテストカバレッジを維持してください +- なぜPRをしたのかとそのPRの内容について明確にしてください。 +- 有意義な貢献をした場合はSPDX FileContributorタグをファイルにつけてください。 + +[SPDX Annotations](https://spdx.dev)はそのライセンス、著作権表記、コントリビューターについて明確するのために、それぞれのファイルに機械可読な形式で記されています。もし、新しいファイルをレポジトリに追加した場合は、下記のようなSPDXヘッダーを付与していることを確かめてください。 + +```go +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2023 mochi-mqtt +// SPDX-FileContributor: Your name or alias + +package name +``` + +ファイルにそれぞれのコントリビューターの`SPDX-FileContributor`が追加されていることを確認してください、他のファイルを参考にしてください。あなたのこのプロジェクトへのコントリビュートは価値があり、高く評価されます! + + +## Stargazers over time 🥰 +[![Stargazers over time](https://starchart.cc/mochi-mqtt/server.svg)](https://starchart.cc/mochi-mqtt/server) +Mochi MQTTをプロジェクトで使用していますか? [是非私達に教えてください!](https://github.com/mochi-mqtt/server/issues) + diff --git a/vendor/github.com/mochi-mqtt/server/v2/README.md b/vendor/github.com/mochi-mqtt/server/v2/README.md new file mode 100644 index 000000000..41ef4be2e --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/README.md @@ -0,0 +1,484 @@ +# Mochi-MQTT Server + +

+ +![build status](https://github.com/mochi-mqtt/server/actions/workflows/build.yml/badge.svg) +[![Coverage Status](https://coveralls.io/repos/github/mochi-mqtt/server/badge.svg?branch=master&v2)](https://coveralls.io/github/mochi-mqtt/server?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/mochi-mqtt/server)](https://goreportcard.com/report/github.com/mochi-mqtt/server/v2) +[![Go Reference](https://pkg.go.dev/badge/github.com/mochi-mqtt/server.svg)](https://pkg.go.dev/github.com/mochi-mqtt/server/v2) +[![contributions welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/mochi-mqtt/server/issues) + +

+ +[English](README.md) | [简体中文](README-CN.md) | [日本語](README-JP.md) | [Translators Wanted!](https://github.com/orgs/mochi-mqtt/discussions/310) + +🎆 **mochi-co/mqtt is now part of the new mochi-mqtt organisation.** [Read about this announcement here.](https://github.com/orgs/mochi-mqtt/discussions/271) + + +### Mochi-MQTT is a fully compliant, embeddable high-performance Go MQTT v5 (and v3.1.1) broker/server + +Mochi MQTT is an embeddable [fully compliant](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html) MQTT v5 broker server written in Go, designed for the development of telemetry and internet-of-things projects. The server can be used either as a standalone binary or embedded as a library in your own applications, and has been designed to be as lightweight and fast as possible, with great care taken to ensure the quality and maintainability of the project. + +#### What is MQTT? +MQTT stands for [MQ Telemetry Transport](https://en.wikipedia.org/wiki/MQTT). It is a publish/subscribe, extremely simple and lightweight messaging protocol, designed for constrained devices and low-bandwidth, high-latency or unreliable networks ([Learn more](https://mqtt.org/faq)). Mochi MQTT fully implements version 5.0.0 of the MQTT protocol. + +#### Mochi-MQTT Features + +- Full MQTTv5 Feature Compliance, compatibility for MQTT v3.1.1 and v3.0.0: + - User and MQTTv5 Packet Properties + - Topic Aliases + - Shared Subscriptions + - Subscription Options and Subscription Identifiers + - Message Expiry + - Client Session Expiry + - Send and Receive QoS Flow Control Quotas + - Server-side Disconnect and Auth Packets + - Will Delay Intervals + - Plus all the original MQTT features of Mochi MQTT v1, such as Full QoS(0,1,2), $SYS topics, retained messages, etc. +- Developer-centric: + - Most core broker code is now exported and accessible, for total developer control. + - Full-featured and flexible Hook-based interfacing system to provide easy 'plugin' development. + - Direct Packet Injection using special inline client, or masquerade as existing clients. +- Performant and Stable: + - Our classic trie-based Topic-Subscription model. + - Client-specific write buffers to avoid issues with slow-reading or irregular client behaviour. + - Passes all [Paho Interoperability Tests](https://github.com/eclipse/paho.mqtt.testing/tree/master/interoperability) for MQTT v5 and MQTT v3. + - Over a thousand carefully considered unit test scenarios. +- TCP, Websocket (including SSL/TLS), and $SYS Dashboard listeners. +- Built-in Redis, Badger, and Bolt Persistence using Hooks (but you can also make your own). +- Built-in Rule-based Authentication and ACL Ledger using Hooks (also make your own). + +### Compatibility Notes +Because of the overlap between the v5 specification and previous versions of mqtt, the server can accept both v5 and v3 clients, but note that in cases where both v5 an v3 clients are connected, properties and features provided for v5 clients will be downgraded for v3 clients (such as user properties). + +Support for MQTT v3.0.0 and v3.1.1 is considered hybrid-compatibility. Where not specifically restricted in the v3 specification, more modern and safety-first v5 behaviours are used instead - such as expiry for inflight and retained messages, and clients - and quality-of-service flow control limits. + +#### When is this repo updated? +Unless it's a critical issue, new releases typically go out over the weekend. + +## Roadmap +- Please [open an issue](https://github.com/mochi-mqtt/server/issues) to request new features or event hooks! +- Cluster support. +- Enhanced Metrics support. +- File-based server configuration (supporting docker). + +## Quick Start +### Running the Broker with Go +Mochi MQTT can be used as a standalone broker. Simply checkout this repository and run the [cmd/main.go](cmd/main.go) entrypoint in the [cmd](cmd) folder which will expose tcp (:1883), websocket (:1882), and dashboard (:8080) listeners. + +``` +cd cmd +go build -o mqtt && ./mqtt +``` + +### Using Docker +You can now pull and run the [official Mochi MQTT image](https://hub.docker.com/r/mochimqtt/server) from our Docker repo: + +```sh +docker pull mochimqtt/server +or +docker run mochimqtt/server +``` + +This is a work in progress, and a [file-based configuration](https://github.com/orgs/mochi-mqtt/projects/2) is being developed to better support this implementation. _More substantial docker support is being discussed [here](https://github.com/orgs/mochi-mqtt/discussions/281#discussion-5544545) and [here](https://github.com/orgs/mochi-mqtt/discussions/209). Please join the discussion if you use Mochi-MQTT in this environment._ + +A simple Dockerfile is provided for running the [cmd/main.go](cmd/main.go) Websocket, TCP, and Stats server: + +```sh +docker build -t mochi:latest . +docker run -p 1883:1883 -p 1882:1882 -p 8080:8080 mochi:latest +``` + +## Developing with Mochi MQTT +### Importing as a package +Importing Mochi MQTT as a package requires just a few lines of code to get started. +``` go +import ( + "log" + + mqtt "github.com/mochi-mqtt/server/v2" + "github.com/mochi-mqtt/server/v2/hooks/auth" + "github.com/mochi-mqtt/server/v2/listeners" +) + +func main() { + // Create signals channel to run server until interrupted + sigs := make(chan os.Signal, 1) + done := make(chan bool, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-sigs + done <- true + }() + + // Create the new MQTT Server. + server := mqtt.New(nil) + + // Allow all connections. + _ = server.AddHook(new(auth.AllowHook), nil) + + // Create a TCP listener on a standard port. + tcp := listeners.NewTCP("t1", ":1883", nil) + err := server.AddListener(tcp) + if err != nil { + log.Fatal(err) + } + + + go func() { + err := server.Serve() + if err != nil { + log.Fatal(err) + } + }() + + // Run server until interrupted + <-done + + // Cleanup +} +``` + +Examples of running the broker with various configurations can be found in the [examples](examples) folder. + +#### Network Listeners +The server comes with a variety of pre-packaged network listeners which allow the broker to accept connections on different protocols. The current listeners are: + +| Listener | Usage | +|------------------------------|----------------------------------------------------------------------------------------------| +| listeners.NewTCP | A TCP listener | +| listeners.NewUnixSock | A Unix Socket listener | +| listeners.NewNet | A net.Listener listener | +| listeners.NewWebsocket | A Websocket listener | +| listeners.NewHTTPStats | An HTTP $SYS info dashboard | +| listeners.NewHTTPHealthCheck | An HTTP healthcheck listener to provide health check responses for e.g. cloud infrastructure | + +> Use the `listeners.Listener` interface to develop new listeners. If you do, please let us know! + +A `*listeners.Config` may be passed to configure TLS. + +Examples of usage can be found in the [examples](examples) folder or [cmd/main.go](cmd/main.go). + + +## Server Options and Capabilities +A number of configurable options are available which can be used to alter the behaviour or restrict access to certain features in the server. + +```go +server := mqtt.New(&mqtt.Options{ + Capabilities: mqtt.Capabilities{ + MaximumSessionExpiryInterval: 3600, + Compatibilities: mqtt.Compatibilities{ + ObscureNotAuthorized: true, + }, + }, + ClientNetWriteBufferSize: 4096, + ClientNetReadBufferSize: 4096, + SysTopicResendInterval: 10, + InlineClient: false, +}) +``` + +Review the mqtt.Options, mqtt.Capabilities, and mqtt.Compatibilities structs for a comprehensive list of options. `ClientNetWriteBufferSize` and `ClientNetReadBufferSize` can be configured to adjust memory usage per client, based on your needs. + +### Default Configuration Notes + +Some choices were made when deciding the default configuration that need to be mentioned here: + +- By default, the value of `server.Options.Capabilities.MaximumMessageExpiryInterval` is set to 86400 (24 hours), in order to prevent exposing the broker to DOS attacks on hostile networks when using the out-of-the-box configuration (as an infinite expiry would allow an infinite number of retained/inflight messages to accumulate). If you are operating in a trusted environment, or you have capacity for a larger retention period, you may wish to override this (set to `0` for no expiry). + +## Event Hooks +A universal event hooks system allows developers to hook into various parts of the server and client life cycle to add and modify functionality of the broker. These universal hooks are used to provide everything from authentication, persistent storage, to debugging tools. + +Hooks are stackable - you can add multiple hooks to a server, and they will be run in the order they were added. Some hooks modify values, and these modified values will be passed to the subsequent hooks before being returned to the runtime code. + +| Type | Import | Info | +|----------------|--------------------------------------------------------------------------|----------------------------------------------------------------------------| +| Access Control | [mochi-mqtt/server/hooks/auth . AllowHook](hooks/auth/allow_all.go) | Allow access to all connecting clients and read/write to all topics. | +| Access Control | [mochi-mqtt/server/hooks/auth . Auth](hooks/auth/auth.go) | Rule-based access control ledger. | +| Persistence | [mochi-mqtt/server/hooks/storage/bolt](hooks/storage/bolt/bolt.go) | Persistent storage using [BoltDB](https://dbdb.io/db/boltdb) (deprecated). | +| Persistence | [mochi-mqtt/server/hooks/storage/badger](hooks/storage/badger/badger.go) | Persistent storage using [BadgerDB](https://github.com/dgraph-io/badger). | +| Persistence | [mochi-mqtt/server/hooks/storage/redis](hooks/storage/redis/redis.go) | Persistent storage using [Redis](https://redis.io). | +| Debugging | [mochi-mqtt/server/hooks/debug](hooks/debug/debug.go) | Additional debugging output to visualise packet flow. | + +Many of the internal server functions are now exposed to developers, so you can make your own Hooks by using the above as examples. If you do, please [Open an issue](https://github.com/mochi-mqtt/server/issues) and let everyone know! + +### Access Control +#### Allow Hook +By default, Mochi MQTT uses a DENY-ALL access control rule. To allow connections, this must overwritten using an Access Control hook. The simplest of these hooks is the `auth.AllowAll` hook, which provides ALLOW-ALL rules to all connections, subscriptions, and publishing. It's also the simplest hook to use: + +```go +server := mqtt.New(nil) +_ = server.AddHook(new(auth.AllowHook), nil) +``` + +> Don't do this if you are exposing your server to the internet or untrusted networks - it should really be used for development, testing, and debugging only. + +#### Auth Ledger +The Auth Ledger hook provides a sophisticated mechanism for defining access rules in a struct format. Auth ledger rules come in two forms: Auth rules (connection), and ACL rules (publish subscribe). + +Auth rules have 4 optional criteria and an assertion flag: +| Criteria | Usage | +| -- | -- | +| Client | client id of the connecting client | +| Username | username of the connecting client | +| Password | password of the connecting client | +| Remote | the remote address or ip of the client | +| Allow | true (allow this user) or false (deny this user) | + +ACL rules have 3 optional criteria and an filter match: +| Criteria | Usage | +| -- | -- | +| Client | client id of the connecting client | +| Username | username of the connecting client | +| Remote | the remote address or ip of the client | +| Filters | an array of filters to match | + +Rules are processed in index order (0,1,2,3), returning on the first matching rule. See [hooks/auth/ledger.go](hooks/auth/ledger.go) to review the structs. + +```go +server := mqtt.New(nil) +err := server.AddHook(new(auth.Hook), &auth.Options{ + Ledger: &auth.Ledger{ + Auth: auth.AuthRules{ // Auth disallows all by default + {Username: "peach", Password: "password1", Allow: true}, + {Username: "melon", Password: "password2", Allow: true}, + {Remote: "127.0.0.1:*", Allow: true}, + {Remote: "localhost:*", Allow: true}, + }, + ACL: auth.ACLRules{ // ACL allows all by default + {Remote: "127.0.0.1:*"}, // local superuser allow all + { + // user melon can read and write to their own topic + Username: "melon", Filters: auth.Filters{ + "melon/#": auth.ReadWrite, + "updates/#": auth.WriteOnly, // can write to updates, but can't read updates from others + }, + }, + { + // Otherwise, no clients have publishing permissions + Filters: auth.Filters{ + "#": auth.ReadOnly, + "updates/#": auth.Deny, + }, + }, + }, + } +}) +``` + +The ledger can also be stored as JSON or YAML and loaded using the Data field: +```go +err := server.AddHook(new(auth.Hook), &auth.Options{ + Data: data, // build ledger from byte slice: yaml or json +}) +``` +See [examples/auth/encoded/main.go](examples/auth/encoded/main.go) for more information. + +### Persistent Storage +#### Redis +A basic Redis storage hook is available which provides persistence for the broker. It can be added to the server in the same fashion as any other hook, with several options. It uses github.com/go-redis/redis/v8 under the hook, and is completely configurable through the Options value. +```go +err := server.AddHook(new(redis.Hook), &redis.Options{ + Options: &rv8.Options{ + Addr: "localhost:6379", // default redis address + Password: "", // your password + DB: 0, // your redis db + }, +}) +if err != nil { + log.Fatal(err) +} +``` +For more information on how the redis hook works, or how to use it, see the [examples/persistence/redis/main.go](examples/persistence/redis/main.go) or [hooks/storage/redis](hooks/storage/redis) code. + +#### Badger DB +There's also a BadgerDB storage hook if you prefer file based storage. It can be added and configured in much the same way as the other hooks (with somewhat less options). +```go +err := server.AddHook(new(badger.Hook), &badger.Options{ + Path: badgerPath, +}) +if err != nil { + log.Fatal(err) +} +``` +For more information on how the badger hook works, or how to use it, see the [examples/persistence/badger/main.go](examples/persistence/badger/main.go) or [hooks/storage/badger](hooks/storage/badger) code. + +There is also a BoltDB hook which has been deprecated in favour of Badger, but if you need it, check [examples/persistence/bolt/main.go](examples/persistence/bolt/main.go). + +## Developing with Event Hooks +Many hooks are available for interacting with the broker and client lifecycle. +The function signatures for all the hooks and `mqtt.Hook` interface can be found in [hooks.go](hooks.go). + +> The most flexible event hooks are OnPacketRead, OnPacketEncode, and OnPacketSent - these hooks be used to control and modify all incoming and outgoing packets. + +| Function | Usage | +|------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| OnStarted | Called when the server has successfully started. | +| OnStopped | Called when the server has successfully stopped. | +| OnConnectAuthenticate | Called when a user attempts to authenticate with the server. An implementation of this method MUST be used to allow or deny access to the server (see hooks/auth/allow_all or basic). It can be used in custom hooks to check connecting users against an existing user database. Returns true if allowed. | +| OnACLCheck | Called when a user attempts to publish or subscribe to a topic filter. As above. | +| OnSysInfoTick | Called when the $SYS topic values are published out. | +| OnConnect | Called when a new client connects, may return an error or packet code to halt the client connection process. | +| OnSessionEstablish | Called immediately after a new client connects and authenticates and immediately before the session is established and CONNACK is sent. | +| OnSessionEstablished | Called when a new client successfully establishes a session (after OnConnect) | +| OnDisconnect | Called when a client is disconnected for any reason. | +| OnAuthPacket | Called when an auth packet is received. It is intended to allow developers to create their own mqtt v5 Auth Packet handling mechanisms. Allows packet modification. | +| OnPacketRead | Called when a packet is received from a client. Allows packet modification. | +| OnPacketEncode | Called immediately before a packet is encoded to be sent to a client. Allows packet modification. | +| OnPacketSent | Called when a packet has been sent to a client. | +| OnPacketProcessed | Called when a packet has been received and successfully handled by the broker. | +| OnSubscribe | Called when a client subscribes to one or more filters. Allows packet modification. | +| OnSubscribed | Called when a client successfully subscribes to one or more filters. | +| OnSelectSubscribers | Called when subscribers have been collected for a topic, but before shared subscription subscribers have been selected. Allows receipient modification. | +| OnUnsubscribe | Called when a client unsubscribes from one or more filters. Allows packet modification. | +| OnUnsubscribed | Called when a client successfully unsubscribes from one or more filters. | +| OnPublish | Called when a client publishes a message. Allows packet modification. | +| OnPublished | Called when a client has published a message to subscribers. | +| OnPublishDropped | Called when a message to a client is dropped before delivery, such as if the client is taking too long to respond. | +| OnRetainMessage | Called then a published message is retained. | +| OnRetainPublished | Called then a retained message is published to a client. | +| OnQosPublish | Called when a publish packet with Qos >= 1 is issued to a subscriber. | +| OnQosComplete | Called when the Qos flow for a message has been completed. | +| OnQosDropped | Called when an inflight message expires before completion. | +| OnPacketIDExhausted | Called when a client runs out of unused packet ids to assign. | +| OnWill | Called when a client disconnects and intends to issue a will message. Allows packet modification. | +| OnWillSent | Called when an LWT message has been issued from a disconnecting client. | +| OnClientExpired | Called when a client session has expired and should be deleted. | +| OnRetainedExpired | Called when a retained message has expired and should be deleted. | +| StoredClients | Returns clients, eg. from a persistent store. | +| StoredSubscriptions | Returns client subscriptions, eg. from a persistent store. | +| StoredInflightMessages | Returns inflight messages, eg. from a persistent store. | +| StoredRetainedMessages | Returns retained messages, eg. from a persistent store. | +| StoredSysInfo | Returns stored system info values, eg. from a persistent store. | + +If you are building a persistent storage hook, see the existing persistent hooks for inspiration and patterns. If you are building an auth hook, you will need `OnACLCheck` and `OnConnectAuthenticate`. + +### Inline Client (v2.4.0+) +It's now possible to subscribe and publish to topics directly from the embedding code, by using the `inline client` feature. The Inline Client is an embedded client which operates as part of the server, and can be enabled in the server options: +```go +server := mqtt.New(&mqtt.Options{ + InlineClient: true, +}) +``` +Once enabled, you will be able to use the `server.Publish`, `server.Subscribe`, and `server.Unsubscribe` methods to issue and received messages from broker-adjacent code. + +> See [direct examples](examples/direct/main.go) for real-life usage examples. + +#### Inline Publish +To publish basic message to a topic from within the embedding application, you can use the `server.Publish(topic string, payload []byte, retain bool, qos byte) error` method. + +```go +err := server.Publish("direct/publish", []byte("packet scheduled message"), false, 0) +``` +> The Qos byte in this case is only used to set the upper qos limit available for subscribers, as per MQTT v5 spec. + +#### Inline Subscribe +To subscribe to a topic filter from within the embedding application, you can use the `server.Subscribe(filter string, subscriptionId int, handler InlineSubFn) error` method with a callback function. Note that only QoS 0 is supported for inline subscriptions. If you wish to have multiple callbacks for the same filter, you can use the MQTTv5 `subscriptionId` property to differentiate. + +```go +callbackFn := func(cl *mqtt.Client, sub packets.Subscription, pk packets.Packet) { + server.Log.Info("inline client received message from subscription", "client", cl.ID, "subscriptionId", sub.Identifier, "topic", pk.TopicName, "payload", string(pk.Payload)) +} +server.Subscribe("direct/#", 1, callbackFn) +``` + +#### Inline Unsubscribe +You may wish to unsubscribe if you have subscribed to a filter using the inline client. You can do this easily with the `server.Unsubscribe(filter string, subscriptionId int) error` method: + +```go +server.Unsubscribe("direct/#", 1) +``` + +### Packet Injection +If you want more control, or want to set specific MQTT v5 properties and other values you can create your own publish packets from a client of your choice. This method allows you to inject MQTT packets (no just publish) directly into the runtime as though they had been received by a specific client. + +Packet injection can be used for any MQTT packet, including ping requests, subscriptions, etc. And because the Clients structs and methods are now exported, you can even inject packets on behalf of a connected client (if you have a very custom requirements). + +Most of the time you'll want to use the Inline Client described above, as it has unique privileges: it bypasses all ACL and topic validation checks, meaning it can even publish to $SYS topics. In this case, you can create an inline client from scratch which will behave the same as the built-in inline client. + +```go +cl := server.NewClient(nil, "local", "inline", true) +server.InjectPacket(cl, packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Publish, + }, + TopicName: "direct/publish", + Payload: []byte("scheduled message"), +}) +``` + +> MQTT packets still need to be correctly formed, so refer our [the test packets catalogue](packets/tpackets.go) and [MQTTv5 Specification](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html) for inspiration. + +See the [hooks example](examples/hooks/main.go) to see this feature in action. + + +### Testing +#### Unit Tests +Mochi MQTT tests over a thousand scenarios with thoughtfully hand written unit tests to ensure each function does exactly what we expect. You can run the tests using go: +``` +go run --cover ./... +``` + +#### Paho Interoperability Test +You can check the broker against the [Paho Interoperability Test](https://github.com/eclipse/paho.mqtt.testing/tree/master/interoperability) by starting the broker using `examples/paho/main.go`, and then running the mqtt v5 and v3 tests with `python3 client_test5.py` from the _interoperability_ folder. + +> Note that there are currently a number of outstanding issues regarding false negatives in the paho suite, and as such, certain compatibility modes are enabled in the `paho/main.go` example. + + +## Performance Benchmarks +Mochi MQTT performance is comparable with popular brokers such as Mosquitto, EMQX, and others. + +Performance benchmarks were tested using [MQTT-Stresser](https://github.com/inovex/mqtt-stresser) on a Apple Macbook Air M2, using `cmd/main.go` default settings. Taking into account bursts of high and low throughput, the median scores are the most useful. Higher is better. + +> The values presented in the benchmark are not representative of true messages per second throughput. They rely on an unusual calculation by mqtt-stresser, but are usable as they are consistent across all brokers. +> Benchmarks are provided as a general performance expectation guideline only. Comparisons are performed using out-of-the-box default configurations. + +`mqtt-stresser -broker tcp://localhost:1883 -num-clients=2 -num-messages=10000` +| Broker | publish fastest | median | slowest | receive fastest | median | slowest | +| -- | -- | -- | -- | -- | -- | -- | +| Mochi v2.2.10 | 124,772 | 125,456 | 124,614 | 314,461 | 313,186 | 311,910 | +| [Mosquitto v2.0.15](https://github.com/eclipse/mosquitto) | 155,920 | 155,919 | 155,918 | 185,485 | 185,097 | 184,709 | +| [EMQX v5.0.11](https://github.com/emqx/emqx) | 156,945 | 156,257 | 155,568 | 17,918 | 17,783 | 17,649 | +| [Rumqtt v0.21.0](https://github.com/bytebeamio/rumqtt) | 112,208 | 108,480 | 104,753 | 135,784 | 126,446 | 117,108 | + +`mqtt-stresser -broker tcp://localhost:1883 -num-clients=10 -num-messages=10000` +| Broker | publish fastest | median | slowest | receive fastest | median | slowest | +| -- | -- | -- | -- | -- | -- | -- | +| Mochi v2.2.10 | 41,825 | 31,663| 23,008 | 144,058 | 65,903 | 37,618 | +| Mosquitto v2.0.15 | 42,729 | 38,633 | 29,879 | 23,241 | 19,714 | 18,806 | +| EMQX v5.0.11 | 21,553 | 17,418 | 14,356 | 4,257 | 3,980 | 3,756 | +| Rumqtt v0.21.0 | 42,213 | 23,153 | 20,814 | 49,465 | 36,626 | 19,283 | + +Million Message Challenge (hit the server with 1 million messages immediately): + +`mqtt-stresser -broker tcp://localhost:1883 -num-clients=100 -num-messages=10000` +| Broker | publish fastest | median | slowest | receive fastest | median | slowest | +| -- | -- | -- | -- | -- | -- | -- | +| Mochi v2.2.10 | 13,532 | 4,425 | 2,344 | 52,120 | 7,274 | 2,701 | +| Mosquitto v2.0.15 | 3,826 | 3,395 | 3,032 | 1,200 | 1,150 | 1,118 | +| EMQX v5.0.11 | 4,086 | 2,432 | 2,274 | 434 | 333 | 311 | +| Rumqtt v0.21.0 | 78,972 | 5,047 | 3,804 | 4,286 | 3,249 | 2,027 | + +> Not sure what's going on with EMQX here, perhaps the docker out-of-the-box settings are not optimal, so take it with a pinch of salt as we know for a fact it's a solid piece of software. + +## Contribution Guidelines +Contributions and feedback are both welcomed and encouraged! [Open an issue](https://github.com/mochi-mqtt/server/issues) to report a bug, ask a question, or make a feature request. If you open a pull request, please try to follow the following guidelines: +- Try to maintain test coverage where reasonably possible. +- Clearly state what the PR does and why. +- Please remember to add your SPDX FileContributor tag to files where you have made a meaningful contribution. + +[SPDX Annotations](https://spdx.dev) are used to clearly indicate the license, copyright, and contributions of each file in a machine-readable format. If you are adding a new file to the repository, please ensure it has the following SPDX header: +```go +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2023 mochi-mqtt +// SPDX-FileContributor: Your name or alias + +package name +``` + +Please ensure to add a new `SPDX-FileContributor` line for each contributor to the file. Refer to other files for examples. Please remember to do this, your contributions to this project are valuable and appreciated - it's important to receive credit! + +## Stargazers over time 🥰 +[![Stargazers over time](https://starchart.cc/mochi-mqtt/server.svg)](https://starchart.cc/mochi-mqtt/server) +Are you using Mochi MQTT in a project? [Let us know!](https://github.com/mochi-mqtt/server/issues) + diff --git a/vendor/github.com/mochi-mqtt/server/v2/clients.go b/vendor/github.com/mochi-mqtt/server/v2/clients.go new file mode 100644 index 000000000..dca822541 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/clients.go @@ -0,0 +1,644 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2023 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package mqtt + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/rs/xid" + + "github.com/mochi-mqtt/server/v2/packets" +) + +const ( + defaultKeepalive uint16 = 10 // the default connection keepalive value in seconds. + defaultClientProtocolVersion byte = 4 // the default mqtt protocol version of connecting clients (if somehow unspecified). + minimumKeepalive uint16 = 5 // the minimum recommended keepalive - values under with display a warning. +) + +var ( + ErrMinimumKeepalive = errors.New("client keepalive is below minimum recommended value and may exhibit connection instability") +) + +// ReadFn is the function signature for the function used for reading and processing new packets. +type ReadFn func(*Client, packets.Packet) error + +// Clients contains a map of the clients known by the broker. +type Clients struct { + internal map[string]*Client // clients known by the broker, keyed on client id. + sync.RWMutex +} + +// NewClients returns an instance of Clients. +func NewClients() *Clients { + return &Clients{ + internal: make(map[string]*Client), + } +} + +// Add adds a new client to the clients map, keyed on client id. +func (cl *Clients) Add(val *Client) { + cl.Lock() + defer cl.Unlock() + cl.internal[val.ID] = val +} + +// GetAll returns all the clients. +func (cl *Clients) GetAll() map[string]*Client { + cl.RLock() + defer cl.RUnlock() + m := map[string]*Client{} + for k, v := range cl.internal { + m[k] = v + } + return m +} + +// Get returns the value of a client if it exists. +func (cl *Clients) Get(id string) (*Client, bool) { + cl.RLock() + defer cl.RUnlock() + val, ok := cl.internal[id] + return val, ok +} + +// Len returns the length of the clients map. +func (cl *Clients) Len() int { + cl.RLock() + defer cl.RUnlock() + val := len(cl.internal) + return val +} + +// Delete removes a client from the internal map. +func (cl *Clients) Delete(id string) { + cl.Lock() + defer cl.Unlock() + delete(cl.internal, id) +} + +// GetByListener returns clients matching a listener id. +func (cl *Clients) GetByListener(id string) []*Client { + cl.RLock() + defer cl.RUnlock() + clients := make([]*Client, 0, cl.Len()) + for _, client := range cl.internal { + if client.Net.Listener == id && !client.Closed() { + clients = append(clients, client) + } + } + return clients +} + +// Client contains information about a client known by the broker. +type Client struct { + Properties ClientProperties // client properties + State ClientState // the operational state of the client. + Net ClientConnection // network connection state of the client + ID string // the client id. + ops *ops // ops provides a reference to server ops. + sync.RWMutex // mutex +} + +// ClientConnection contains the connection transport and metadata for the client. +type ClientConnection struct { + Conn net.Conn // the net.Conn used to establish the connection + bconn *bufio.Reader // a buffered net.Conn for reading packets + outbuf *bytes.Buffer // a buffer for writing packets + Remote string // the remote address of the client + Listener string // listener id of the client + Inline bool // if true, the client is the built-in 'inline' embedded client +} + +// ClientProperties contains the properties which define the client behaviour. +type ClientProperties struct { + Props packets.Properties + Will Will + Username []byte + ProtocolVersion byte + Clean bool +} + +// Will contains the last will and testament details for a client connection. +type Will struct { + Payload []byte // - + User []packets.UserProperty // - + TopicName string // - + Flag uint32 // 0,1 + WillDelayInterval uint32 // - + Qos byte // - + Retain bool // - +} + +// ClientState tracks the state of the client. +type ClientState struct { + TopicAliases TopicAliases // a map of topic aliases + stopCause atomic.Value // reason for stopping + Inflight *Inflight // a map of in-flight qos messages + Subscriptions *Subscriptions // a map of the subscription filters a client maintains + disconnected int64 // the time the client disconnected in unix time, for calculating expiry + outbound chan *packets.Packet // queue for pending outbound packets + endOnce sync.Once // only end once + isTakenOver uint32 // used to identify orphaned clients + packetID uint32 // the current highest packetID + open context.Context // indicate that the client is open for packet exchange + cancelOpen context.CancelFunc // cancel function for open context + outboundQty int32 // number of messages currently in the outbound queue + Keepalive uint16 // the number of seconds the connection can wait + ServerKeepalive bool // keepalive was set by the server +} + +// newClient returns a new instance of Client. This is almost exclusively used by Server +// for creating new clients, but it lives here because it's not dependent. +func newClient(c net.Conn, o *ops) *Client { + ctx, cancel := context.WithCancel(context.Background()) + cl := &Client{ + State: ClientState{ + Inflight: NewInflights(), + Subscriptions: NewSubscriptions(), + TopicAliases: NewTopicAliases(o.options.Capabilities.TopicAliasMaximum), + open: ctx, + cancelOpen: cancel, + Keepalive: defaultKeepalive, + outbound: make(chan *packets.Packet, o.options.Capabilities.MaximumClientWritesPending), + }, + Properties: ClientProperties{ + ProtocolVersion: defaultClientProtocolVersion, // default protocol version + }, + ops: o, + } + + if c != nil { + cl.Net = ClientConnection{ + Conn: c, + bconn: bufio.NewReaderSize(c, o.options.ClientNetReadBufferSize), + Remote: c.RemoteAddr().String(), + } + } + + return cl +} + +// WriteLoop ranges over pending outbound messages and writes them to the client connection. +func (cl *Client) WriteLoop() { + for { + select { + case pk := <-cl.State.outbound: + if err := cl.WritePacket(*pk); err != nil { + // TODO : Figure out what to do with error + cl.ops.log.Debug("failed publishing packet", "error", err, "client", cl.ID, "packet", pk) + } + atomic.AddInt32(&cl.State.outboundQty, -1) + case <-cl.State.open.Done(): + return + } + } +} + +// ParseConnect parses the connect parameters and properties for a client. +func (cl *Client) ParseConnect(lid string, pk packets.Packet) { + cl.Net.Listener = lid + + cl.Properties.ProtocolVersion = pk.ProtocolVersion + cl.Properties.Username = pk.Connect.Username + cl.Properties.Clean = pk.Connect.Clean + cl.Properties.Props = pk.Properties.Copy(false) + + if cl.Properties.Props.ReceiveMaximum > cl.ops.options.Capabilities.MaximumInflight { // 3.3.4 Non-normative + cl.Properties.Props.ReceiveMaximum = cl.ops.options.Capabilities.MaximumInflight + } + + if pk.Connect.Keepalive <= minimumKeepalive { + cl.ops.log.Warn( + ErrMinimumKeepalive.Error(), + "client", cl.ID, + "keepalive", pk.Connect.Keepalive, + "recommended", minimumKeepalive, + ) + } + + cl.State.Keepalive = pk.Connect.Keepalive // [MQTT-3.2.2-22] + cl.State.Inflight.ResetReceiveQuota(int32(cl.ops.options.Capabilities.ReceiveMaximum)) // server receive max per client + cl.State.Inflight.ResetSendQuota(int32(cl.Properties.Props.ReceiveMaximum)) // client receive max + cl.State.TopicAliases.Outbound = NewOutboundTopicAliases(cl.Properties.Props.TopicAliasMaximum) + + cl.ID = pk.Connect.ClientIdentifier + if cl.ID == "" { + cl.ID = xid.New().String() // [MQTT-3.1.3-6] [MQTT-3.1.3-7] + cl.Properties.Props.AssignedClientID = cl.ID + } + + if pk.Connect.WillFlag { + cl.Properties.Will = Will{ + Qos: pk.Connect.WillQos, + Retain: pk.Connect.WillRetain, + Payload: pk.Connect.WillPayload, + TopicName: pk.Connect.WillTopic, + WillDelayInterval: pk.Connect.WillProperties.WillDelayInterval, + User: pk.Connect.WillProperties.User, + } + if pk.Properties.SessionExpiryIntervalFlag && + pk.Properties.SessionExpiryInterval < pk.Connect.WillProperties.WillDelayInterval { + cl.Properties.Will.WillDelayInterval = pk.Properties.SessionExpiryInterval + } + if pk.Connect.WillFlag { + cl.Properties.Will.Flag = 1 // atomic for checking + } + } +} + +// refreshDeadline refreshes the read/write deadline for the net.Conn connection. +func (cl *Client) refreshDeadline(keepalive uint16) { + var expiry time.Time // nil time can be used to disable deadline if keepalive = 0 + if keepalive > 0 { + expiry = time.Now().Add(time.Duration(keepalive+(keepalive/2)) * time.Second) // [MQTT-3.1.2-22] + } + + if cl.Net.Conn != nil { + _ = cl.Net.Conn.SetDeadline(expiry) // [MQTT-3.1.2-22] + } +} + +// NextPacketID returns the next available (unused) packet id for the client. +// If no unused packet ids are available, an error is returned and the client +// should be disconnected. +func (cl *Client) NextPacketID() (i uint32, err error) { + cl.Lock() + defer cl.Unlock() + + i = atomic.LoadUint32(&cl.State.packetID) + started := i + overflowed := false + for { + if overflowed && i == started { + return 0, packets.ErrQuotaExceeded + } + + if i >= cl.ops.options.Capabilities.maximumPacketID { + overflowed = true + i = 0 + continue + } + + i++ + + if _, ok := cl.State.Inflight.Get(uint16(i)); !ok { + atomic.StoreUint32(&cl.State.packetID, i) + return i, nil + } + } +} + +// ResendInflightMessages attempts to resend any pending inflight messages to connected clients. +func (cl *Client) ResendInflightMessages(force bool) error { + if cl.State.Inflight.Len() == 0 { + return nil + } + + for _, tk := range cl.State.Inflight.GetAll(false) { + if tk.FixedHeader.Type == packets.Publish { + tk.FixedHeader.Dup = true // [MQTT-3.3.1-1] [MQTT-3.3.1-3] + } + + cl.ops.hooks.OnQosPublish(cl, tk, tk.Created, 0) + err := cl.WritePacket(tk) + if err != nil { + return err + } + + if tk.FixedHeader.Type == packets.Puback || tk.FixedHeader.Type == packets.Pubcomp { + if ok := cl.State.Inflight.Delete(tk.PacketID); ok { + cl.ops.hooks.OnQosComplete(cl, tk) + atomic.AddInt64(&cl.ops.info.Inflight, -1) + } + } + } + + return nil +} + +// ClearInflights deletes all inflight messages for the client, e.g. for a disconnected user with a clean session. +func (cl *Client) ClearInflights() { + for _, tk := range cl.State.Inflight.GetAll(false) { + if ok := cl.State.Inflight.Delete(tk.PacketID); ok { + cl.ops.hooks.OnQosDropped(cl, tk) + atomic.AddInt64(&cl.ops.info.Inflight, -1) + } + } +} + +// ClearExpiredInflights deletes any inflight messages which have expired. +func (cl *Client) ClearExpiredInflights(now, maximumExpiry int64) []uint16 { + deleted := []uint16{} + for _, tk := range cl.State.Inflight.GetAll(false) { + expired := tk.ProtocolVersion == 5 && tk.Expiry > 0 && tk.Expiry < now // [MQTT-3.3.2-5] + + // If the maximum message expiry interval is set (greater than 0), and the message + // retention period exceeds the maximum expiry, the message will be forcibly removed. + enforced := maximumExpiry > 0 && now-tk.Created > maximumExpiry + + if expired || enforced { + if ok := cl.State.Inflight.Delete(tk.PacketID); ok { + cl.ops.hooks.OnQosDropped(cl, tk) + atomic.AddInt64(&cl.ops.info.Inflight, -1) + deleted = append(deleted, tk.PacketID) + } + } + } + + return deleted +} + +// Read reads incoming packets from the connected client and transforms them into +// packets to be handled by the packetHandler. +func (cl *Client) Read(packetHandler ReadFn) error { + var err error + + for { + if cl.Closed() { + return nil + } + + cl.refreshDeadline(cl.State.Keepalive) + fh := new(packets.FixedHeader) + err = cl.ReadFixedHeader(fh) + if err != nil { + return err + } + + pk, err := cl.ReadPacket(fh) + if err != nil { + return err + } + + err = packetHandler(cl, pk) // Process inbound packet. + if err != nil { + return err + } + } +} + +// Stop instructs the client to shut down all processing goroutines and disconnect. +func (cl *Client) Stop(err error) { + cl.State.endOnce.Do(func() { + + if cl.Net.Conn != nil { + _ = cl.Net.Conn.Close() // omit close error + } + + if err != nil { + cl.State.stopCause.Store(err) + } + + if cl.State.cancelOpen != nil { + cl.State.cancelOpen() + } + + atomic.StoreInt64(&cl.State.disconnected, time.Now().Unix()) + }) +} + +// StopCause returns the reason the client connection was stopped, if any. +func (cl *Client) StopCause() error { + if cl.State.stopCause.Load() == nil { + return nil + } + return cl.State.stopCause.Load().(error) +} + +// Closed returns true if client connection is closed. +func (cl *Client) Closed() bool { + return cl.State.open == nil || cl.State.open.Err() != nil +} + +// ReadFixedHeader reads in the values of the next packet's fixed header. +func (cl *Client) ReadFixedHeader(fh *packets.FixedHeader) error { + if cl.Net.bconn == nil { + return ErrConnectionClosed + } + + b, err := cl.Net.bconn.ReadByte() + if err != nil { + return err + } + + err = fh.Decode(b) + if err != nil { + return err + } + + var bu int + fh.Remaining, bu, err = packets.DecodeLength(cl.Net.bconn) + if err != nil { + return err + } + + if cl.ops.options.Capabilities.MaximumPacketSize > 0 && uint32(fh.Remaining+1) > cl.ops.options.Capabilities.MaximumPacketSize { + return packets.ErrPacketTooLarge // [MQTT-3.2.2-15] + } + + atomic.AddInt64(&cl.ops.info.BytesReceived, int64(bu+1)) + return nil +} + +// ReadPacket reads the remaining buffer into an MQTT packet. +func (cl *Client) ReadPacket(fh *packets.FixedHeader) (pk packets.Packet, err error) { + atomic.AddInt64(&cl.ops.info.PacketsReceived, 1) + + pk.ProtocolVersion = cl.Properties.ProtocolVersion // inherit client protocol version for decoding + pk.FixedHeader = *fh + p := make([]byte, pk.FixedHeader.Remaining) + n, err := io.ReadFull(cl.Net.bconn, p) + if err != nil { + return pk, err + } + + atomic.AddInt64(&cl.ops.info.BytesReceived, int64(n)) + + // Decode the remaining packet values using a fresh copy of the bytes, + // otherwise the next packet will change the data of this one. + px := append([]byte{}, p[:]...) + switch pk.FixedHeader.Type { + case packets.Connect: + err = pk.ConnectDecode(px) + case packets.Disconnect: + err = pk.DisconnectDecode(px) + case packets.Connack: + err = pk.ConnackDecode(px) + case packets.Publish: + err = pk.PublishDecode(px) + if err == nil { + atomic.AddInt64(&cl.ops.info.MessagesReceived, 1) + } + case packets.Puback: + err = pk.PubackDecode(px) + case packets.Pubrec: + err = pk.PubrecDecode(px) + case packets.Pubrel: + err = pk.PubrelDecode(px) + case packets.Pubcomp: + err = pk.PubcompDecode(px) + case packets.Subscribe: + err = pk.SubscribeDecode(px) + case packets.Suback: + err = pk.SubackDecode(px) + case packets.Unsubscribe: + err = pk.UnsubscribeDecode(px) + case packets.Unsuback: + err = pk.UnsubackDecode(px) + case packets.Pingreq: + case packets.Pingresp: + case packets.Auth: + err = pk.AuthDecode(px) + default: + err = fmt.Errorf("invalid packet type; %v", pk.FixedHeader.Type) + } + + if err != nil { + return pk, err + } + + pk, err = cl.ops.hooks.OnPacketRead(cl, pk) + return +} + +// WritePacket encodes and writes a packet to the client. +func (cl *Client) WritePacket(pk packets.Packet) error { + if cl.Closed() { + return ErrConnectionClosed + } + + if cl.Net.Conn == nil { + return nil + } + + if pk.Expiry > 0 { + pk.Properties.MessageExpiryInterval = uint32(pk.Expiry - time.Now().Unix()) // [MQTT-3.3.2-6] + } + + pk.ProtocolVersion = cl.Properties.ProtocolVersion + if pk.Mods.MaxSize == 0 { // NB we use this statement to embed client packet sizes in tests + pk.Mods.MaxSize = cl.Properties.Props.MaximumPacketSize + } + + if cl.Properties.Props.RequestProblemInfoFlag && cl.Properties.Props.RequestProblemInfo == 0x0 { + pk.Mods.DisallowProblemInfo = true // [MQTT-3.1.2-29] strict, no problem info on any packet if set + } + + if pk.FixedHeader.Type != packets.Connack || cl.Properties.Props.RequestResponseInfo == 0x1 || cl.ops.options.Capabilities.Compatibilities.AlwaysReturnResponseInfo { + pk.Mods.AllowResponseInfo = true // [MQTT-3.1.2-28] we need to know which properties we can encode + } + + pk = cl.ops.hooks.OnPacketEncode(cl, pk) + + var err error + buf := new(bytes.Buffer) + switch pk.FixedHeader.Type { + case packets.Connect: + err = pk.ConnectEncode(buf) + case packets.Connack: + err = pk.ConnackEncode(buf) + case packets.Publish: + err = pk.PublishEncode(buf) + case packets.Puback: + err = pk.PubackEncode(buf) + case packets.Pubrec: + err = pk.PubrecEncode(buf) + case packets.Pubrel: + err = pk.PubrelEncode(buf) + case packets.Pubcomp: + err = pk.PubcompEncode(buf) + case packets.Subscribe: + err = pk.SubscribeEncode(buf) + case packets.Suback: + err = pk.SubackEncode(buf) + case packets.Unsubscribe: + err = pk.UnsubscribeEncode(buf) + case packets.Unsuback: + err = pk.UnsubackEncode(buf) + case packets.Pingreq: + err = pk.PingreqEncode(buf) + case packets.Pingresp: + err = pk.PingrespEncode(buf) + case packets.Disconnect: + err = pk.DisconnectEncode(buf) + case packets.Auth: + err = pk.AuthEncode(buf) + default: + err = fmt.Errorf("%w: %v", packets.ErrNoValidPacketAvailable, pk.FixedHeader.Type) + } + if err != nil { + return err + } + + if pk.Mods.MaxSize > 0 && uint32(buf.Len()) > pk.Mods.MaxSize { + return packets.ErrPacketTooLarge // [MQTT-3.1.2-24] [MQTT-3.1.2-25] + } + + n, err := func() (int64, error) { + cl.Lock() + defer cl.Unlock() + if len(cl.State.outbound) == 0 { + if cl.Net.outbuf == nil { + return buf.WriteTo(cl.Net.Conn) + } + + // first write to buffer, then flush buffer + n, _ := cl.Net.outbuf.Write(buf.Bytes()) // will always be successful + err = cl.flushOutbuf() + return int64(n), err + } + + // there are more writes in the queue + if cl.Net.outbuf == nil { + if buf.Len() >= cl.ops.options.ClientNetWriteBufferSize { + return buf.WriteTo(cl.Net.Conn) + } + cl.Net.outbuf = new(bytes.Buffer) + } + + n, _ := cl.Net.outbuf.Write(buf.Bytes()) // will always be successful + if cl.Net.outbuf.Len() < cl.ops.options.ClientNetWriteBufferSize { + return int64(n), nil + } + + err = cl.flushOutbuf() + return int64(n), err + }() + if err != nil { + return err + } + + atomic.AddInt64(&cl.ops.info.BytesSent, n) + atomic.AddInt64(&cl.ops.info.PacketsSent, 1) + if pk.FixedHeader.Type == packets.Publish { + atomic.AddInt64(&cl.ops.info.MessagesSent, 1) + } + + cl.ops.hooks.OnPacketSent(cl, pk, buf.Bytes()) + + return err +} + +func (cl *Client) flushOutbuf() (err error) { + if cl.Net.outbuf == nil { + return + } + + _, err = cl.Net.outbuf.WriteTo(cl.Net.Conn) + if err == nil { + cl.Net.outbuf = nil + } + return +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/hooks.go b/vendor/github.com/mochi-mqtt/server/v2/hooks.go new file mode 100644 index 000000000..25b625dca --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/hooks.go @@ -0,0 +1,854 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co, thedevop, dgduncan + +package mqtt + +import ( + "errors" + "fmt" + "log/slog" + "sync" + "sync/atomic" + + "github.com/mochi-mqtt/server/v2/hooks/storage" + "github.com/mochi-mqtt/server/v2/packets" + "github.com/mochi-mqtt/server/v2/system" +) + +const ( + SetOptions byte = iota + OnSysInfoTick + OnStarted + OnStopped + OnConnectAuthenticate + OnACLCheck + OnConnect + OnSessionEstablish + OnSessionEstablished + OnDisconnect + OnAuthPacket + OnPacketRead + OnPacketEncode + OnPacketSent + OnPacketProcessed + OnSubscribe + OnSubscribed + OnSelectSubscribers + OnUnsubscribe + OnUnsubscribed + OnPublish + OnPublished + OnPublishDropped + OnRetainMessage + OnRetainPublished + OnQosPublish + OnQosComplete + OnQosDropped + OnPacketIDExhausted + OnWill + OnWillSent + OnClientExpired + OnRetainedExpired + StoredClients + StoredSubscriptions + StoredInflightMessages + StoredRetainedMessages + StoredSysInfo +) + +var ( + // ErrInvalidConfigType indicates a different Type of config value was expected to what was received. + ErrInvalidConfigType = errors.New("invalid config type provided") +) + +// Hook provides an interface of handlers for different events which occur +// during the lifecycle of the broker. +type Hook interface { + ID() string + Provides(b byte) bool + Init(config any) error + Stop() error + SetOpts(l *slog.Logger, o *HookOptions) + OnStarted() + OnStopped() + OnConnectAuthenticate(cl *Client, pk packets.Packet) bool + OnACLCheck(cl *Client, topic string, write bool) bool + OnSysInfoTick(*system.Info) + OnConnect(cl *Client, pk packets.Packet) error + OnSessionEstablish(cl *Client, pk packets.Packet) + OnSessionEstablished(cl *Client, pk packets.Packet) + OnDisconnect(cl *Client, err error, expire bool) + OnAuthPacket(cl *Client, pk packets.Packet) (packets.Packet, error) + OnPacketRead(cl *Client, pk packets.Packet) (packets.Packet, error) // triggers when a new packet is received by a client, but before packet validation + OnPacketEncode(cl *Client, pk packets.Packet) packets.Packet // modify a packet before it is byte-encoded and written to the client + OnPacketSent(cl *Client, pk packets.Packet, b []byte) // triggers when packet bytes have been written to the client + OnPacketProcessed(cl *Client, pk packets.Packet, err error) // triggers after a packet from the client been processed (handled) + OnSubscribe(cl *Client, pk packets.Packet) packets.Packet + OnSubscribed(cl *Client, pk packets.Packet, reasonCodes []byte) + OnSelectSubscribers(subs *Subscribers, pk packets.Packet) *Subscribers + OnUnsubscribe(cl *Client, pk packets.Packet) packets.Packet + OnUnsubscribed(cl *Client, pk packets.Packet) + OnPublish(cl *Client, pk packets.Packet) (packets.Packet, error) + OnPublished(cl *Client, pk packets.Packet) + OnPublishDropped(cl *Client, pk packets.Packet) + OnRetainMessage(cl *Client, pk packets.Packet, r int64) + OnRetainPublished(cl *Client, pk packets.Packet) + OnQosPublish(cl *Client, pk packets.Packet, sent int64, resends int) + OnQosComplete(cl *Client, pk packets.Packet) + OnQosDropped(cl *Client, pk packets.Packet) + OnPacketIDExhausted(cl *Client, pk packets.Packet) + OnWill(cl *Client, will Will) (Will, error) + OnWillSent(cl *Client, pk packets.Packet) + OnClientExpired(cl *Client) + OnRetainedExpired(filter string) + StoredClients() ([]storage.Client, error) + StoredSubscriptions() ([]storage.Subscription, error) + StoredInflightMessages() ([]storage.Message, error) + StoredRetainedMessages() ([]storage.Message, error) + StoredSysInfo() (storage.SystemInfo, error) +} + +// HookOptions contains values which are inherited from the server on initialisation. +type HookOptions struct { + Capabilities *Capabilities +} + +// Hooks is a slice of Hook interfaces to be called in sequence. +type Hooks struct { + Log *slog.Logger // a logger for the hook (from the server) + internal atomic.Value // a slice of []Hook + wg sync.WaitGroup // a waitgroup for syncing hook shutdown + qty int64 // the number of hooks in use + sync.Mutex // a mutex for locking when adding hooks +} + +// Len returns the number of hooks added. +func (h *Hooks) Len() int64 { + return atomic.LoadInt64(&h.qty) +} + +// Provides returns true if any one hook provides any of the requested hook methods. +func (h *Hooks) Provides(b ...byte) bool { + for _, hook := range h.GetAll() { + for _, hb := range b { + if hook.Provides(hb) { + return true + } + } + } + + return false +} + +// Add adds and initializes a new hook. +func (h *Hooks) Add(hook Hook, config any) error { + h.Lock() + defer h.Unlock() + + err := hook.Init(config) + if err != nil { + return fmt.Errorf("failed initialising %s hook: %w", hook.ID(), err) + } + + i, ok := h.internal.Load().([]Hook) + if !ok { + i = []Hook{} + } + + i = append(i, hook) + h.internal.Store(i) + atomic.AddInt64(&h.qty, 1) + h.wg.Add(1) + + return nil +} + +// GetAll returns a slice of all the hooks. +func (h *Hooks) GetAll() []Hook { + i, ok := h.internal.Load().([]Hook) + if !ok { + return []Hook{} + } + + return i +} + +// Stop indicates all attached hooks to gracefully end. +func (h *Hooks) Stop() { + go func() { + for _, hook := range h.GetAll() { + h.Log.Info("stopping hook", "hook", hook.ID()) + if err := hook.Stop(); err != nil { + h.Log.Debug("problem stopping hook", "error", err, "hook", hook.ID()) + } + + h.wg.Done() + } + }() + + h.wg.Wait() +} + +// OnSysInfoTick is called when the $SYS topic values are published out. +func (h *Hooks) OnSysInfoTick(sys *system.Info) { + for _, hook := range h.GetAll() { + if hook.Provides(OnSysInfoTick) { + hook.OnSysInfoTick(sys) + } + } +} + +// OnStarted is called when the server has successfully started. +func (h *Hooks) OnStarted() { + for _, hook := range h.GetAll() { + if hook.Provides(OnStarted) { + hook.OnStarted() + } + } +} + +// OnStopped is called when the server has successfully stopped. +func (h *Hooks) OnStopped() { + for _, hook := range h.GetAll() { + if hook.Provides(OnStopped) { + hook.OnStopped() + } + } +} + +// OnConnect is called when a new client connects, and may return a packets.Code as an error to halt the connection. +func (h *Hooks) OnConnect(cl *Client, pk packets.Packet) error { + for _, hook := range h.GetAll() { + if hook.Provides(OnConnect) { + err := hook.OnConnect(cl, pk) + if err != nil { + return err + } + } + } + return nil +} + +// OnSessionEstablish is called right after a new client connects and authenticates and right before +// the session is established and CONNACK is sent. +func (h *Hooks) OnSessionEstablish(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnSessionEstablish) { + hook.OnSessionEstablish(cl, pk) + } + } +} + +// OnSessionEstablished is called when a new client establishes a session (after OnConnect). +func (h *Hooks) OnSessionEstablished(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnSessionEstablished) { + hook.OnSessionEstablished(cl, pk) + } + } +} + +// OnDisconnect is called when a client is disconnected for any reason. +func (h *Hooks) OnDisconnect(cl *Client, err error, expire bool) { + for _, hook := range h.GetAll() { + if hook.Provides(OnDisconnect) { + hook.OnDisconnect(cl, err, expire) + } + } +} + +// OnPacketRead is called when a packet is received from a client. +func (h *Hooks) OnPacketRead(cl *Client, pk packets.Packet) (pkx packets.Packet, err error) { + pkx = pk + for _, hook := range h.GetAll() { + if hook.Provides(OnPacketRead) { + npk, err := hook.OnPacketRead(cl, pkx) + if err != nil && errors.Is(err, packets.ErrRejectPacket) { + h.Log.Debug("packet rejected", "hook", hook.ID(), "packet", pkx) + return pk, err + } else if err != nil { + continue + } + + pkx = npk + } + } + + return +} + +// OnAuthPacket is called when an auth packet is received. It is intended to allow developers +// to create their own auth packet handling mechanisms. +func (h *Hooks) OnAuthPacket(cl *Client, pk packets.Packet) (pkx packets.Packet, err error) { + pkx = pk + for _, hook := range h.GetAll() { + if hook.Provides(OnAuthPacket) { + npk, err := hook.OnAuthPacket(cl, pkx) + if err != nil { + return pk, err + } + + pkx = npk + } + } + + return +} + +// OnPacketEncode is called immediately before a packet is encoded to be sent to a client. +func (h *Hooks) OnPacketEncode(cl *Client, pk packets.Packet) packets.Packet { + for _, hook := range h.GetAll() { + if hook.Provides(OnPacketEncode) { + pk = hook.OnPacketEncode(cl, pk) + } + } + + return pk +} + +// OnPacketProcessed is called when a packet has been received and successfully handled by the broker. +func (h *Hooks) OnPacketProcessed(cl *Client, pk packets.Packet, err error) { + for _, hook := range h.GetAll() { + if hook.Provides(OnPacketProcessed) { + hook.OnPacketProcessed(cl, pk, err) + } + } +} + +// OnPacketSent is called when a packet has been sent to a client. It takes a bytes parameter +// containing the bytes sent. +func (h *Hooks) OnPacketSent(cl *Client, pk packets.Packet, b []byte) { + for _, hook := range h.GetAll() { + if hook.Provides(OnPacketSent) { + hook.OnPacketSent(cl, pk, b) + } + } +} + +// OnSubscribe is called when a client subscribes to one or more filters. This method +// differs from OnSubscribed in that it allows you to modify the subscription values +// before the packet is processed. The return values of the hook methods are passed-through +// in the order the hooks were attached. +func (h *Hooks) OnSubscribe(cl *Client, pk packets.Packet) packets.Packet { + for _, hook := range h.GetAll() { + if hook.Provides(OnSubscribe) { + pk = hook.OnSubscribe(cl, pk) + } + } + return pk +} + +// OnSubscribed is called when a client subscribes to one or more filters. +func (h *Hooks) OnSubscribed(cl *Client, pk packets.Packet, reasonCodes []byte) { + for _, hook := range h.GetAll() { + if hook.Provides(OnSubscribed) { + hook.OnSubscribed(cl, pk, reasonCodes) + } + } +} + +// OnSelectSubscribers is called when subscribers have been collected for a topic, but before +// shared subscription subscribers have been selected. This hook can be used to programmatically +// remove or add clients to a publish to subscribers process, or to select the subscriber for a shared +// group in a custom manner (such as based on client id, ip, etc). +func (h *Hooks) OnSelectSubscribers(subs *Subscribers, pk packets.Packet) *Subscribers { + for _, hook := range h.GetAll() { + if hook.Provides(OnSelectSubscribers) { + subs = hook.OnSelectSubscribers(subs, pk) + } + } + return subs +} + +// OnUnsubscribe is called when a client unsubscribes from one or more filters. This method +// differs from OnUnsubscribed in that it allows you to modify the unsubscription values +// before the packet is processed. The return values of the hook methods are passed-through +// in the order the hooks were attached. +func (h *Hooks) OnUnsubscribe(cl *Client, pk packets.Packet) packets.Packet { + for _, hook := range h.GetAll() { + if hook.Provides(OnUnsubscribe) { + pk = hook.OnUnsubscribe(cl, pk) + } + } + return pk +} + +// OnUnsubscribed is called when a client unsubscribes from one or more filters. +func (h *Hooks) OnUnsubscribed(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnUnsubscribed) { + hook.OnUnsubscribed(cl, pk) + } + } +} + +// OnPublish is called when a client publishes a message. This method differs from OnPublished +// in that it allows you to modify you to modify the incoming packet before it is processed. +// The return values of the hook methods are passed-through in the order the hooks were attached. +func (h *Hooks) OnPublish(cl *Client, pk packets.Packet) (pkx packets.Packet, err error) { + pkx = pk + for _, hook := range h.GetAll() { + if hook.Provides(OnPublish) { + npk, err := hook.OnPublish(cl, pkx) + if err != nil { + if errors.Is(err, packets.ErrRejectPacket) { + h.Log.Debug("publish packet rejected", + "error", err, + "hook", hook.ID(), + "packet", pkx) + return pk, err + } + h.Log.Error("publish packet error", + "error", err, + "hook", hook.ID(), + "packet", pkx) + return pk, err + } + pkx = npk + } + } + + return +} + +// OnPublished is called when a client has published a message to subscribers. +func (h *Hooks) OnPublished(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnPublished) { + hook.OnPublished(cl, pk) + } + } +} + +// OnPublishDropped is called when a message to a client was dropped instead of delivered +// such as when a client is too slow to respond. +func (h *Hooks) OnPublishDropped(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnPublishDropped) { + hook.OnPublishDropped(cl, pk) + } + } +} + +// OnRetainMessage is called then a published message is retained. +func (h *Hooks) OnRetainMessage(cl *Client, pk packets.Packet, r int64) { + for _, hook := range h.GetAll() { + if hook.Provides(OnRetainMessage) { + hook.OnRetainMessage(cl, pk, r) + } + } +} + +// OnRetainPublished is called when a retained message is published. +func (h *Hooks) OnRetainPublished(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnRetainPublished) { + hook.OnRetainPublished(cl, pk) + } + } +} + +// OnQosPublish is called when a publish packet with Qos >= 1 is issued to a subscriber. +// In other words, this method is called when a new inflight message is created or resent. +// It is typically used to store a new inflight message. +func (h *Hooks) OnQosPublish(cl *Client, pk packets.Packet, sent int64, resends int) { + for _, hook := range h.GetAll() { + if hook.Provides(OnQosPublish) { + hook.OnQosPublish(cl, pk, sent, resends) + } + } +} + +// OnQosComplete is called when the Qos flow for a message has been completed. +// In other words, when an inflight message is resolved. +// It is typically used to delete an inflight message from a store. +func (h *Hooks) OnQosComplete(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnQosComplete) { + hook.OnQosComplete(cl, pk) + } + } +} + +// OnQosDropped is called the Qos flow for a message expires. In other words, when +// an inflight message expires or is abandoned. It is typically used to delete an +// inflight message from a store. +func (h *Hooks) OnQosDropped(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnQosDropped) { + hook.OnQosDropped(cl, pk) + } + } +} + +// OnPacketIDExhausted is called when the client runs out of unused packet ids to +// assign to a packet. +func (h *Hooks) OnPacketIDExhausted(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnPacketIDExhausted) { + hook.OnPacketIDExhausted(cl, pk) + } + } +} + +// OnWill is called when a client disconnects and publishes an LWT message. This method +// differs from OnWillSent in that it allows you to modify the LWT message before it is +// published. The return values of the hook methods are passed-through in the order +// the hooks were attached. +func (h *Hooks) OnWill(cl *Client, will Will) Will { + for _, hook := range h.GetAll() { + if hook.Provides(OnWill) { + mlwt, err := hook.OnWill(cl, will) + if err != nil { + h.Log.Error("parse will error", + "error", err, + "hook", hook.ID(), + "will", will) + continue + } + will = mlwt + } + } + + return will +} + +// OnWillSent is called when an LWT message has been issued from a disconnecting client. +func (h *Hooks) OnWillSent(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnWillSent) { + hook.OnWillSent(cl, pk) + } + } +} + +// OnClientExpired is called when a client session has expired and should be deleted. +func (h *Hooks) OnClientExpired(cl *Client) { + for _, hook := range h.GetAll() { + if hook.Provides(OnClientExpired) { + hook.OnClientExpired(cl) + } + } +} + +// OnRetainedExpired is called when a retained message has expired and should be deleted. +func (h *Hooks) OnRetainedExpired(filter string) { + for _, hook := range h.GetAll() { + if hook.Provides(OnRetainedExpired) { + hook.OnRetainedExpired(filter) + } + } +} + +// StoredClients returns all clients, e.g. from a persistent store, is used to +// populate the server clients list before start. +func (h *Hooks) StoredClients() (v []storage.Client, err error) { + for _, hook := range h.GetAll() { + if hook.Provides(StoredClients) { + v, err := hook.StoredClients() + if err != nil { + h.Log.Error("failed to load clients", "error", err, "hook", hook.ID()) + return v, err + } + + if len(v) > 0 { + return v, nil + } + } + } + + return +} + +// StoredSubscriptions returns all subcriptions, e.g. from a persistent store, and is +// used to populate the server subscriptions list before start. +func (h *Hooks) StoredSubscriptions() (v []storage.Subscription, err error) { + for _, hook := range h.GetAll() { + if hook.Provides(StoredSubscriptions) { + v, err := hook.StoredSubscriptions() + if err != nil { + h.Log.Error("failed to load subscriptions", "error", err, "hook", hook.ID()) + return v, err + } + + if len(v) > 0 { + return v, nil + } + } + } + + return +} + +// StoredInflightMessages returns all inflight messages, e.g. from a persistent store, +// and is used to populate the restored clients with inflight messages before start. +func (h *Hooks) StoredInflightMessages() (v []storage.Message, err error) { + for _, hook := range h.GetAll() { + if hook.Provides(StoredInflightMessages) { + v, err := hook.StoredInflightMessages() + if err != nil { + h.Log.Error("failed to load inflight messages", "error", err, "hook", hook.ID()) + return v, err + } + + if len(v) > 0 { + return v, nil + } + } + } + + return +} + +// StoredRetainedMessages returns all retained messages, e.g. from a persistent store, +// and is used to populate the server topics with retained messages before start. +func (h *Hooks) StoredRetainedMessages() (v []storage.Message, err error) { + for _, hook := range h.GetAll() { + if hook.Provides(StoredRetainedMessages) { + v, err := hook.StoredRetainedMessages() + if err != nil { + h.Log.Error("failed to load retained messages", "error", err, "hook", hook.ID()) + return v, err + } + + if len(v) > 0 { + return v, nil + } + } + } + + return +} + +// StoredSysInfo returns a set of system info values. +func (h *Hooks) StoredSysInfo() (v storage.SystemInfo, err error) { + for _, hook := range h.GetAll() { + if hook.Provides(StoredSysInfo) { + v, err := hook.StoredSysInfo() + if err != nil { + h.Log.Error("failed to load $SYS info", "error", err, "hook", hook.ID()) + return v, err + } + + if v.Version != "" { + return v, nil + } + } + } + + return +} + +// OnConnectAuthenticate is called when a user attempts to authenticate with the server. +// An implementation of this method MUST be used to allow or deny access to the +// server (see hooks/auth/allow_all or basic). It can be used in custom hooks to +// check connecting users against an existing user database. +func (h *Hooks) OnConnectAuthenticate(cl *Client, pk packets.Packet) bool { + for _, hook := range h.GetAll() { + if hook.Provides(OnConnectAuthenticate) { + if ok := hook.OnConnectAuthenticate(cl, pk); ok { + return true + } + } + } + + return false +} + +// OnACLCheck is called when a user attempts to publish or subscribe to a topic filter. +// An implementation of this method MUST be used to allow or deny access to the +// (see hooks/auth/allow_all or basic). It can be used in custom hooks to +// check publishing and subscribing users against an existing permissions or roles database. +func (h *Hooks) OnACLCheck(cl *Client, topic string, write bool) bool { + for _, hook := range h.GetAll() { + if hook.Provides(OnACLCheck) { + if ok := hook.OnACLCheck(cl, topic, write); ok { + return true + } + } + } + + return false +} + +// HookBase provides a set of default methods for each hook. It should be embedded in +// all hooks. +type HookBase struct { + Hook + Log *slog.Logger + Opts *HookOptions +} + +// ID returns the ID of the hook. +func (h *HookBase) ID() string { + return "base" +} + +// Provides indicates which methods a hook provides. The default is none - this method +// should be overridden by the embedding hook. +func (h *HookBase) Provides(b byte) bool { + return false +} + +// Init performs any pre-start initializations for the hook, such as connecting to databases +// or opening files. +func (h *HookBase) Init(config any) error { + return nil +} + +// SetOpts is called by the server to propagate internal values and generally should +// not be called manually. +func (h *HookBase) SetOpts(l *slog.Logger, opts *HookOptions) { + h.Log = l + h.Opts = opts +} + +// Stop is called to gracefully shut down the hook. +func (h *HookBase) Stop() error { + return nil +} + +// OnStarted is called when the server starts. +func (h *HookBase) OnStarted() {} + +// OnStopped is called when the server stops. +func (h *HookBase) OnStopped() {} + +// OnSysInfoTick is called when the server publishes system info. +func (h *HookBase) OnSysInfoTick(*system.Info) {} + +// OnConnectAuthenticate is called when a user attempts to authenticate with the server. +func (h *HookBase) OnConnectAuthenticate(cl *Client, pk packets.Packet) bool { + return false +} + +// OnACLCheck is called when a user attempts to subscribe or publish to a topic. +func (h *HookBase) OnACLCheck(cl *Client, topic string, write bool) bool { + return false +} + +// OnConnect is called when a new client connects. +func (h *HookBase) OnConnect(cl *Client, pk packets.Packet) error { + return nil +} + +// OnSessionEstablish is called right after a new client connects and authenticates and right before +// the session is established and CONNACK is sent. +func (h *HookBase) OnSessionEstablish(cl *Client, pk packets.Packet) {} + +// OnSessionEstablished is called when a new client establishes a session (after OnConnect). +func (h *HookBase) OnSessionEstablished(cl *Client, pk packets.Packet) {} + +// OnDisconnect is called when a client is disconnected for any reason. +func (h *HookBase) OnDisconnect(cl *Client, err error, expire bool) {} + +// OnAuthPacket is called when an auth packet is received from the client. +func (h *HookBase) OnAuthPacket(cl *Client, pk packets.Packet) (packets.Packet, error) { + return pk, nil +} + +// OnPacketRead is called when a packet is received. +func (h *HookBase) OnPacketRead(cl *Client, pk packets.Packet) (packets.Packet, error) { + return pk, nil +} + +// OnPacketEncode is called before a packet is byte-encoded and written to the client. +func (h *HookBase) OnPacketEncode(cl *Client, pk packets.Packet) packets.Packet { + return pk +} + +// OnPacketSent is called immediately after a packet is written to a client. +func (h *HookBase) OnPacketSent(cl *Client, pk packets.Packet, b []byte) {} + +// OnPacketProcessed is called immediately after a packet from a client is processed. +func (h *HookBase) OnPacketProcessed(cl *Client, pk packets.Packet, err error) {} + +// OnSubscribe is called when a client subscribes to one or more filters. +func (h *HookBase) OnSubscribe(cl *Client, pk packets.Packet) packets.Packet { + return pk +} + +// OnSubscribed is called when a client subscribes to one or more filters. +func (h *HookBase) OnSubscribed(cl *Client, pk packets.Packet, reasonCodes []byte) {} + +// OnSelectSubscribers is called when selecting subscribers to receive a message. +func (h *HookBase) OnSelectSubscribers(subs *Subscribers, pk packets.Packet) *Subscribers { + return subs +} + +// OnUnsubscribe is called when a client unsubscribes from one or more filters. +func (h *HookBase) OnUnsubscribe(cl *Client, pk packets.Packet) packets.Packet { + return pk +} + +// OnUnsubscribed is called when a client unsubscribes from one or more filters. +func (h *HookBase) OnUnsubscribed(cl *Client, pk packets.Packet) {} + +// OnPublish is called when a client publishes a message. +func (h *HookBase) OnPublish(cl *Client, pk packets.Packet) (packets.Packet, error) { + return pk, nil +} + +// OnPublished is called when a client has published a message to subscribers. +func (h *HookBase) OnPublished(cl *Client, pk packets.Packet) {} + +// OnPublishDropped is called when a message to a client is dropped instead of being delivered. +func (h *HookBase) OnPublishDropped(cl *Client, pk packets.Packet) {} + +// OnRetainMessage is called then a published message is retained. +func (h *HookBase) OnRetainMessage(cl *Client, pk packets.Packet, r int64) {} + +// OnRetainPublished is called when a retained message is published. +func (h *HookBase) OnRetainPublished(cl *Client, pk packets.Packet) {} + +// OnQosPublish is called when a publish packet with Qos > 1 is issued to a subscriber. +func (h *HookBase) OnQosPublish(cl *Client, pk packets.Packet, sent int64, resends int) {} + +// OnQosComplete is called when the Qos flow for a message has been completed. +func (h *HookBase) OnQosComplete(cl *Client, pk packets.Packet) {} + +// OnQosDropped is called the Qos flow for a message expires. +func (h *HookBase) OnQosDropped(cl *Client, pk packets.Packet) {} + +// OnPacketIDExhausted is called when the client runs out of unused packet ids to assign to a packet. +func (h *HookBase) OnPacketIDExhausted(cl *Client, pk packets.Packet) {} + +// OnWill is called when a client disconnects and publishes an LWT message. +func (h *HookBase) OnWill(cl *Client, will Will) (Will, error) { + return will, nil +} + +// OnWillSent is called when an LWT message has been issued from a disconnecting client. +func (h *HookBase) OnWillSent(cl *Client, pk packets.Packet) {} + +// OnClientExpired is called when a client session has expired. +func (h *HookBase) OnClientExpired(cl *Client) {} + +// OnRetainedExpired is called when a retained message for a topic has expired. +func (h *HookBase) OnRetainedExpired(topic string) {} + +// StoredClients returns all clients from a store. +func (h *HookBase) StoredClients() (v []storage.Client, err error) { + return +} + +// StoredSubscriptions returns all subcriptions from a store. +func (h *HookBase) StoredSubscriptions() (v []storage.Subscription, err error) { + return +} + +// StoredInflightMessages returns all inflight messages from a store. +func (h *HookBase) StoredInflightMessages() (v []storage.Message, err error) { + return +} + +// StoredRetainedMessages returns all retained messages from a store. +func (h *HookBase) StoredRetainedMessages() (v []storage.Message, err error) { + return +} + +// StoredSysInfo returns a set of system info values. +func (h *HookBase) StoredSysInfo() (v storage.SystemInfo, err error) { + return +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/allow_all.go b/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/allow_all.go new file mode 100644 index 000000000..e05a0de3b --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/allow_all.go @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package auth + +import ( + "bytes" + + "github.com/mochi-mqtt/server/v2" + "github.com/mochi-mqtt/server/v2/packets" +) + +// AllowHook is an authentication hook which allows connection access +// for all users and read and write access to all topics. +type AllowHook struct { + mqtt.HookBase +} + +// ID returns the ID of the hook. +func (h *AllowHook) ID() string { + return "allow-all-auth" +} + +// Provides indicates which hook methods this hook provides. +func (h *AllowHook) Provides(b byte) bool { + return bytes.Contains([]byte{ + mqtt.OnConnectAuthenticate, + mqtt.OnACLCheck, + }, []byte{b}) +} + +// OnConnectAuthenticate returns true/allowed for all requests. +func (h *AllowHook) OnConnectAuthenticate(cl *mqtt.Client, pk packets.Packet) bool { + return true +} + +// OnACLCheck returns true/allowed for all checks. +func (h *AllowHook) OnACLCheck(cl *mqtt.Client, topic string, write bool) bool { + return true +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/auth.go b/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/auth.go new file mode 100644 index 000000000..d50cc5cee --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/auth.go @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package auth + +import ( + "bytes" + + mqtt "github.com/mochi-mqtt/server/v2" + "github.com/mochi-mqtt/server/v2/packets" +) + +// Options contains the configuration/rules data for the auth ledger. +type Options struct { + Data []byte + Ledger *Ledger +} + +// Hook is an authentication hook which implements an auth ledger. +type Hook struct { + mqtt.HookBase + config *Options + ledger *Ledger +} + +// ID returns the ID of the hook. +func (h *Hook) ID() string { + return "auth-ledger" +} + +// Provides indicates which hook methods this hook provides. +func (h *Hook) Provides(b byte) bool { + return bytes.Contains([]byte{ + mqtt.OnConnectAuthenticate, + mqtt.OnACLCheck, + }, []byte{b}) +} + +// Init configures the hook with the auth ledger to be used for checking. +func (h *Hook) Init(config any) error { + if _, ok := config.(*Options); !ok && config != nil { + return mqtt.ErrInvalidConfigType + } + + if config == nil { + config = new(Options) + } + + h.config = config.(*Options) + + var err error + if h.config.Ledger != nil { + h.ledger = h.config.Ledger + } else if len(h.config.Data) > 0 { + h.ledger = new(Ledger) + err = h.ledger.Unmarshal(h.config.Data) + } + if err != nil { + return err + } + + if h.ledger == nil { + h.ledger = &Ledger{ + Auth: AuthRules{}, + ACL: ACLRules{}, + } + } + + h.Log.Info("loaded auth rules", + "authentication", len(h.ledger.Auth), + "acl", len(h.ledger.ACL)) + + return nil +} + +// OnConnectAuthenticate returns true if the connecting client has rules which provide access +// in the auth ledger. +func (h *Hook) OnConnectAuthenticate(cl *mqtt.Client, pk packets.Packet) bool { + if _, ok := h.ledger.AuthOk(cl, pk); ok { + return true + } + + h.Log.Info("client failed authentication check", + "username", string(pk.Connect.Username), + "remote", cl.Net.Remote) + return false +} + +// OnACLCheck returns true if the connecting client has matching read or write access to subscribe +// or publish to a given topic. +func (h *Hook) OnACLCheck(cl *mqtt.Client, topic string, write bool) bool { + if _, ok := h.ledger.ACLOk(cl, topic, write); ok { + return true + } + + h.Log.Debug("client failed allowed ACL check", + "client", cl.ID, + "username", string(cl.Properties.Username), + "topic", topic) + + return false +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/ledger.go b/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/ledger.go new file mode 100644 index 000000000..694b19da1 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/ledger.go @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package auth + +import ( + "encoding/json" + "strings" + "sync" + + "gopkg.in/yaml.v3" + + "github.com/mochi-mqtt/server/v2" + "github.com/mochi-mqtt/server/v2/packets" +) + +const ( + Deny Access = iota // user cannot access the topic + ReadOnly // user can only subscribe to the topic + WriteOnly // user can only publish to the topic + ReadWrite // user can both publish and subscribe to the topic +) + +// Access determines the read/write privileges for an ACL rule. +type Access byte + +// Users contains a map of access rules for specific users, keyed on username. +type Users map[string]UserRule + +// UserRule defines a set of access rules for a specific user. +type UserRule struct { + Username RString `json:"username,omitempty" yaml:"username,omitempty"` // the username of a user + Password RString `json:"password,omitempty" yaml:"password,omitempty"` // the password of a user + ACL Filters `json:"acl,omitempty" yaml:"acl,omitempty"` // filters to match, if desired + Disallow bool `json:"disallow,omitempty" yaml:"disallow,omitempty"` // allow or disallow the user +} + +// AuthRules defines generic access rules applicable to all users. +type AuthRules []AuthRule + +type AuthRule struct { + Client RString `json:"client,omitempty" yaml:"client,omitempty"` // the id of a connecting client + Username RString `json:"username,omitempty" yaml:"username,omitempty"` // the username of a user + Remote RString `json:"remote,omitempty" yaml:"remote,omitempty"` // remote address or + Password RString `json:"password,omitempty" yaml:"password,omitempty"` // the password of a user + Allow bool `json:"allow,omitempty" yaml:"allow,omitempty"` // allow or disallow the users +} + +// ACLRules defines generic topic or filter access rules applicable to all users. +type ACLRules []ACLRule + +// ACLRule defines access rules for a specific topic or filter. +type ACLRule struct { + Client RString `json:"client,omitempty" yaml:"client,omitempty"` // the id of a connecting client + Username RString `json:"username,omitempty" yaml:"username,omitempty"` // the username of a user + Remote RString `json:"remote,omitempty" yaml:"remote,omitempty"` // remote address or + Filters Filters `json:"filters,omitempty" yaml:"filters,omitempty"` // filters to match +} + +// Filters is a map of Access rules keyed on filter. +type Filters map[RString]Access + +// RString is a rule value string. +type RString string + +// Matches returns true if the rule matches a given string. +func (r RString) Matches(a string) bool { + rr := string(r) + if r == "" || r == "*" || a == rr { + return true + } + + i := strings.Index(rr, "*") + if i > 0 && len(a) > i && strings.Compare(rr[:i], a[:i]) == 0 { + return true + } + + return false +} + +// FilterMatches returns true if a filter matches a topic rule. +func (r RString) FilterMatches(a string) bool { + _, ok := MatchTopic(string(r), a) + return ok +} + +// MatchTopic checks if a given topic matches a filter, accounting for filter +// wildcards. Eg. filter /a/b/+/c == topic a/b/d/c. +func MatchTopic(filter string, topic string) (elements []string, matched bool) { + filterParts := strings.Split(filter, "/") + topicParts := strings.Split(topic, "/") + + elements = make([]string, 0) + for i := 0; i < len(filterParts); i++ { + if i >= len(topicParts) { + matched = false + return + } + + if filterParts[i] == "+" { + elements = append(elements, topicParts[i]) + continue + } + + if filterParts[i] == "#" { + matched = true + elements = append(elements, strings.Join(topicParts[i:], "/")) + return + } + + if filterParts[i] != topicParts[i] { + matched = false + return + } + } + + return elements, true +} + +// Ledger is an auth ledger containing access rules for users and topics. +type Ledger struct { + sync.Mutex `json:"-" yaml:"-"` + Users Users `json:"users" yaml:"users"` + Auth AuthRules `json:"auth" yaml:"auth"` + ACL ACLRules `json:"acl" yaml:"acl"` +} + +// Update updates the internal values of the ledger. +func (l *Ledger) Update(ln *Ledger) { + l.Lock() + defer l.Unlock() + l.Auth = ln.Auth + l.ACL = ln.ACL +} + +// AuthOk returns true if the rules indicate the user is allowed to authenticate. +func (l *Ledger) AuthOk(cl *mqtt.Client, pk packets.Packet) (n int, ok bool) { + // If the users map is set, always check for a predefined user first instead + // of iterating through global rules. + if l.Users != nil { + if u, ok := l.Users[string(cl.Properties.Username)]; ok && + u.Password != "" && + u.Password == RString(pk.Connect.Password) { + return 0, !u.Disallow + } + } + + // If there's no users map, or no user was found, attempt to find a matching + // rule (which may also contain a user). + for n, rule := range l.Auth { + if rule.Client.Matches(cl.ID) && + rule.Username.Matches(string(cl.Properties.Username)) && + rule.Password.Matches(string(pk.Connect.Password)) && + rule.Remote.Matches(cl.Net.Remote) { + return n, rule.Allow + } + } + + return 0, false +} + +// ACLOk returns true if the rules indicate the user is allowed to read or write to +// a specific filter or topic respectively, based on the `write` bool. +func (l *Ledger) ACLOk(cl *mqtt.Client, topic string, write bool) (n int, ok bool) { + // If the users map is set, always check for a predefined user first instead + // of iterating through global rules. + if l.Users != nil { + if u, ok := l.Users[string(cl.Properties.Username)]; ok && len(u.ACL) > 0 { + for filter, access := range u.ACL { + if filter.FilterMatches(topic) { + if !write && (access == ReadOnly || access == ReadWrite) { + return n, true + } else if write && (access == WriteOnly || access == ReadWrite) { + return n, true + } else { + return n, false + } + } + } + } + } + + for n, rule := range l.ACL { + if rule.Client.Matches(cl.ID) && + rule.Username.Matches(string(cl.Properties.Username)) && + rule.Remote.Matches(cl.Net.Remote) { + if len(rule.Filters) == 0 { + return n, true + } + + if write { + for filter, access := range rule.Filters { + if access == WriteOnly || access == ReadWrite { + if filter.FilterMatches(topic) { + return n, true + } + } + } + } + + if !write { + for filter, access := range rule.Filters { + if access == ReadOnly || access == ReadWrite { + if filter.FilterMatches(topic) { + return n, true + } + } + } + } + + for filter := range rule.Filters { + if filter.FilterMatches(topic) { + return n, false + } + } + } + } + + return 0, true +} + +// ToJSON encodes the values into a JSON string. +func (l *Ledger) ToJSON() (data []byte, err error) { + return json.Marshal(l) +} + +// ToYAML encodes the values into a YAML string. +func (l *Ledger) ToYAML() (data []byte, err error) { + return yaml.Marshal(l) +} + +// Unmarshal decodes a JSON or YAML string (such as a rule config from a file) into a struct. +func (l *Ledger) Unmarshal(data []byte) error { + l.Lock() + defer l.Unlock() + if len(data) == 0 { + return nil + } + + if data[0] == '{' { + return json.Unmarshal(data, l) + } + + return yaml.Unmarshal(data, &l) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/hooks/storage/storage.go b/vendor/github.com/mochi-mqtt/server/v2/hooks/storage/storage.go new file mode 100644 index 000000000..eb57508fd --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/hooks/storage/storage.go @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package storage + +import ( + "encoding/json" + "errors" + + "github.com/mochi-mqtt/server/v2/packets" + "github.com/mochi-mqtt/server/v2/system" +) + +const ( + SubscriptionKey = "SUB" // unique key to denote Subscriptions in a store + SysInfoKey = "SYS" // unique key to denote server system information in a store + RetainedKey = "RET" // unique key to denote retained messages in a store + InflightKey = "IFM" // unique key to denote inflight messages in a store + ClientKey = "CL" // unique key to denote clients in a store +) + +var ( + // ErrDBFileNotOpen indicates that the file database (e.g. bolt/badger) wasn't open for reading. + ErrDBFileNotOpen = errors.New("db file not open") +) + +// Client is a storable representation of an MQTT client. +type Client struct { + Will ClientWill `json:"will"` // will topic and payload data if applicable + Properties ClientProperties `json:"properties"` // the connect properties for the client + Username []byte `json:"username"` // the username of the client + ID string `json:"id" storm:"id"` // the client id / storage key + T string `json:"t"` // the data type (client) + Remote string `json:"remote"` // the remote address of the client + Listener string `json:"listener"` // the listener the client connected on + ProtocolVersion byte `json:"protocolVersion"` // mqtt protocol version of the client + Clean bool `json:"clean"` // if the client requested a clean start/session +} + +// ClientProperties contains a limited set of the mqtt v5 properties specific to a client connection. +type ClientProperties struct { + AuthenticationData []byte `json:"authenticationData"` + User []packets.UserProperty `json:"user"` + AuthenticationMethod string `json:"authenticationMethod"` + SessionExpiryInterval uint32 `json:"sessionExpiryInterval"` + MaximumPacketSize uint32 `json:"maximumPacketSize"` + ReceiveMaximum uint16 `json:"receiveMaximum"` + TopicAliasMaximum uint16 `json:"topicAliasMaximum"` + SessionExpiryIntervalFlag bool `json:"sessionExpiryIntervalFlag"` + RequestProblemInfo byte `json:"requestProblemInfo"` + RequestProblemInfoFlag bool `json:"requestProblemInfoFlag"` + RequestResponseInfo byte `json:"requestResponseInfo"` +} + +// ClientWill contains a will message for a client, and limited mqtt v5 properties. +type ClientWill struct { + Payload []byte `json:"payload"` + User []packets.UserProperty `json:"user"` + TopicName string `json:"topicName"` + Flag uint32 `json:"flag"` + WillDelayInterval uint32 `json:"willDelayInterval"` + Qos byte `json:"qos"` + Retain bool `json:"retain"` +} + +// MarshalBinary encodes the values into a json string. +func (d Client) MarshalBinary() (data []byte, err error) { + return json.Marshal(d) +} + +// UnmarshalBinary decodes a json string into a struct. +func (d *Client) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return nil + } + return json.Unmarshal(data, d) +} + +// Message is a storable representation of an MQTT message (specifically publish). +type Message struct { + Properties MessageProperties `json:"properties"` // - + Payload []byte `json:"payload"` // the message payload (if retained) + T string `json:"t"` // the data type + ID string `json:"id" storm:"id"` // the storage key + Origin string `json:"origin"` // the id of the client who sent the message + TopicName string `json:"topic_name"` // the topic the message was sent to (if retained) + FixedHeader packets.FixedHeader `json:"fixedheader"` // the header properties of the message + Created int64 `json:"created"` // the time the message was created in unixtime + Sent int64 `json:"sent"` // the last time the message was sent (for retries) in unixtime (if inflight) + PacketID uint16 `json:"packet_id"` // the unique id of the packet (if inflight) +} + +// MessageProperties contains a limited subset of mqtt v5 properties specific to publish messages. +type MessageProperties struct { + CorrelationData []byte `json:"correlationData"` + SubscriptionIdentifier []int `json:"subscriptionIdentifier"` + User []packets.UserProperty `json:"user"` + ContentType string `json:"contentType"` + ResponseTopic string `json:"responseTopic"` + MessageExpiryInterval uint32 `json:"messageExpiry"` + TopicAlias uint16 `json:"topicAlias"` + PayloadFormat byte `json:"payloadFormat"` + PayloadFormatFlag bool `json:"payloadFormatFlag"` +} + +// MarshalBinary encodes the values into a json string. +func (d Message) MarshalBinary() (data []byte, err error) { + return json.Marshal(d) +} + +// UnmarshalBinary decodes a json string into a struct. +func (d *Message) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return nil + } + return json.Unmarshal(data, d) +} + +// ToPacket converts a storage.Message to a standard packet. +func (d *Message) ToPacket() packets.Packet { + pk := packets.Packet{ + FixedHeader: d.FixedHeader, + PacketID: d.PacketID, + TopicName: d.TopicName, + Payload: d.Payload, + Origin: d.Origin, + Created: d.Created, + Properties: packets.Properties{ + PayloadFormat: d.Properties.PayloadFormat, + PayloadFormatFlag: d.Properties.PayloadFormatFlag, + MessageExpiryInterval: d.Properties.MessageExpiryInterval, + ContentType: d.Properties.ContentType, + ResponseTopic: d.Properties.ResponseTopic, + CorrelationData: d.Properties.CorrelationData, + SubscriptionIdentifier: d.Properties.SubscriptionIdentifier, + TopicAlias: d.Properties.TopicAlias, + User: d.Properties.User, + }, + } + + // Return a deep copy of the packet data otherwise the slices will + // continue pointing at the values from the storage packet. + pk = pk.Copy(true) + pk.FixedHeader.Dup = d.FixedHeader.Dup + + return pk +} + +// Subscription is a storable representation of an MQTT subscription. +type Subscription struct { + T string `json:"t"` + ID string `json:"id" storm:"id"` + Client string `json:"client"` + Filter string `json:"filter"` + Identifier int `json:"identifier"` + RetainHandling byte `json:"retain_handling"` + Qos byte `json:"qos"` + RetainAsPublished bool `json:"retain_as_pub"` + NoLocal bool `json:"no_local"` +} + +// MarshalBinary encodes the values into a json string. +func (d Subscription) MarshalBinary() (data []byte, err error) { + return json.Marshal(d) +} + +// UnmarshalBinary decodes a json string into a struct. +func (d *Subscription) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return nil + } + return json.Unmarshal(data, d) +} + +// SystemInfo is a storable representation of the system information values. +type SystemInfo struct { + system.Info // embed the system info struct + T string `json:"t"` // the data type + ID string `json:"id" storm:"id"` // the storage key +} + +// MarshalBinary encodes the values into a json string. +func (d SystemInfo) MarshalBinary() (data []byte, err error) { + return json.Marshal(d) +} + +// UnmarshalBinary decodes a json string into a struct. +func (d *SystemInfo) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return nil + } + return json.Unmarshal(data, d) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/inflight.go b/vendor/github.com/mochi-mqtt/server/v2/inflight.go new file mode 100644 index 000000000..9d949584d --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/inflight.go @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2023 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package mqtt + +import ( + "sort" + "sync" + "sync/atomic" + + "github.com/mochi-mqtt/server/v2/packets" +) + +// Inflight is a map of InflightMessage keyed on packet id. +type Inflight struct { + sync.RWMutex + internal map[uint16]packets.Packet // internal contains the inflight packets + receiveQuota int32 // remaining inbound qos quota for flow control + sendQuota int32 // remaining outbound qos quota for flow control + maximumReceiveQuota int32 // maximum allowed receive quota + maximumSendQuota int32 // maximum allowed send quota +} + +// NewInflights returns a new instance of an Inflight packets map. +func NewInflights() *Inflight { + return &Inflight{ + internal: map[uint16]packets.Packet{}, + } +} + +// Set adds or updates an inflight packet by packet id. +func (i *Inflight) Set(m packets.Packet) bool { + i.Lock() + defer i.Unlock() + + _, ok := i.internal[m.PacketID] + i.internal[m.PacketID] = m + return !ok +} + +// Get returns an inflight packet by packet id. +func (i *Inflight) Get(id uint16) (packets.Packet, bool) { + i.RLock() + defer i.RUnlock() + + if m, ok := i.internal[id]; ok { + return m, true + } + + return packets.Packet{}, false +} + +// Len returns the size of the inflight messages map. +func (i *Inflight) Len() int { + i.RLock() + defer i.RUnlock() + return len(i.internal) +} + +// Clone returns a new instance of Inflight with the same message data. +// This is used when transferring inflights from a taken-over session. +func (i *Inflight) Clone() *Inflight { + c := NewInflights() + i.RLock() + defer i.RUnlock() + for k, v := range i.internal { + c.internal[k] = v + } + return c +} + +// GetAll returns all the inflight messages. +func (i *Inflight) GetAll(immediate bool) []packets.Packet { + i.RLock() + defer i.RUnlock() + + m := []packets.Packet{} + for _, v := range i.internal { + if !immediate || (immediate && v.Expiry < 0) { + m = append(m, v) + } + } + + sort.Slice(m, func(i, j int) bool { + return uint16(m[i].Created) < uint16(m[j].Created) + }) + + return m +} + +// NextImmediate returns the next inflight packet which is indicated to be sent immediately. +// This typically occurs when the quota has been exhausted, and we need to wait until new quota +// is free to continue sending. +func (i *Inflight) NextImmediate() (packets.Packet, bool) { + i.RLock() + defer i.RUnlock() + + m := i.GetAll(true) + if len(m) > 0 { + return m[0], true + } + + return packets.Packet{}, false +} + +// Delete removes an in-flight message from the map. Returns true if the message existed. +func (i *Inflight) Delete(id uint16) bool { + i.Lock() + defer i.Unlock() + + _, ok := i.internal[id] + delete(i.internal, id) + + return ok +} + +// TakeRecieveQuota reduces the receive quota by 1. +func (i *Inflight) DecreaseReceiveQuota() { + if atomic.LoadInt32(&i.receiveQuota) > 0 { + atomic.AddInt32(&i.receiveQuota, -1) + } +} + +// TakeRecieveQuota increases the receive quota by 1. +func (i *Inflight) IncreaseReceiveQuota() { + if atomic.LoadInt32(&i.receiveQuota) < atomic.LoadInt32(&i.maximumReceiveQuota) { + atomic.AddInt32(&i.receiveQuota, 1) + } +} + +// ResetReceiveQuota resets the receive quota to the maximum allowed value. +func (i *Inflight) ResetReceiveQuota(n int32) { + atomic.StoreInt32(&i.receiveQuota, n) + atomic.StoreInt32(&i.maximumReceiveQuota, n) +} + +// DecreaseSendQuota reduces the send quota by 1. +func (i *Inflight) DecreaseSendQuota() { + if atomic.LoadInt32(&i.sendQuota) > 0 { + atomic.AddInt32(&i.sendQuota, -1) + } +} + +// IncreaseSendQuota increases the send quota by 1. +func (i *Inflight) IncreaseSendQuota() { + if atomic.LoadInt32(&i.sendQuota) < atomic.LoadInt32(&i.maximumSendQuota) { + atomic.AddInt32(&i.sendQuota, 1) + } +} + +// ResetSendQuota resets the send quota to the maximum allowed value. +func (i *Inflight) ResetSendQuota(n int32) { + atomic.StoreInt32(&i.sendQuota, n) + atomic.StoreInt32(&i.maximumSendQuota, n) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/http_healthcheck.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/http_healthcheck.go new file mode 100644 index 000000000..a82e2e36d --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/http_healthcheck.go @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2023 mochi-mqtt, mochi-co +// SPDX-FileContributor: Derek Duncan + +package listeners + +import ( + "context" + "log/slog" + "net/http" + "sync" + "sync/atomic" + "time" +) + +// HTTPHealthCheck is a listener for providing an HTTP healthcheck endpoint. +type HTTPHealthCheck struct { + sync.RWMutex + id string // the internal id of the listener + address string // the network address to bind to + config *Config // configuration values for the listener + listen *http.Server // the http server + end uint32 // ensure the close methods are only called once +} + +// NewHTTPHealthCheck initialises and returns a new HTTP listener, listening on an address. +func NewHTTPHealthCheck(id, address string, config *Config) *HTTPHealthCheck { + if config == nil { + config = new(Config) + } + return &HTTPHealthCheck{ + id: id, + address: address, + config: config, + } +} + +// ID returns the id of the listener. +func (l *HTTPHealthCheck) ID() string { + return l.id +} + +// Address returns the address of the listener. +func (l *HTTPHealthCheck) Address() string { + return l.address +} + +// Protocol returns the address of the listener. +func (l *HTTPHealthCheck) Protocol() string { + if l.listen != nil && l.listen.TLSConfig != nil { + return "https" + } + + return "http" +} + +// Init initializes the listener. +func (l *HTTPHealthCheck) Init(_ *slog.Logger) error { + mux := http.NewServeMux() + mux.HandleFunc("/healthcheck", func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + w.WriteHeader(http.StatusMethodNotAllowed) + } + }) + l.listen = &http.Server{ + ReadTimeout: 5 * time.Second, + WriteTimeout: 5 * time.Second, + Addr: l.address, + Handler: mux, + } + + if l.config.TLSConfig != nil { + l.listen.TLSConfig = l.config.TLSConfig + } + + return nil +} + +// Serve starts listening for new connections and serving responses. +func (l *HTTPHealthCheck) Serve(establish EstablishFn) { + if l.listen.TLSConfig != nil { + _ = l.listen.ListenAndServeTLS("", "") + } else { + _ = l.listen.ListenAndServe() + } +} + +// Close closes the listener and any client connections. +func (l *HTTPHealthCheck) Close(closeClients CloseFn) { + l.Lock() + defer l.Unlock() + + if atomic.CompareAndSwapUint32(&l.end, 0, 1) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _ = l.listen.Shutdown(ctx) + } + + closeClients(l.id) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/http_sysinfo.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/http_sysinfo.go new file mode 100644 index 000000000..0e064d57f --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/http_sysinfo.go @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package listeners + +import ( + "context" + "encoding/json" + "io" + "log/slog" + "net/http" + "sync" + "sync/atomic" + "time" + + "github.com/mochi-mqtt/server/v2/system" +) + +// HTTPStats is a listener for presenting the server $SYS stats on a JSON http endpoint. +type HTTPStats struct { + sync.RWMutex + id string // the internal id of the listener + address string // the network address to bind to + config *Config // configuration values for the listener + listen *http.Server // the http server + sysInfo *system.Info // pointers to the server data + log *slog.Logger // server logger + end uint32 // ensure the close methods are only called once +} + +// NewHTTPStats initialises and returns a new HTTP listener, listening on an address. +func NewHTTPStats(id, address string, config *Config, sysInfo *system.Info) *HTTPStats { + if config == nil { + config = new(Config) + } + return &HTTPStats{ + id: id, + address: address, + sysInfo: sysInfo, + config: config, + } +} + +// ID returns the id of the listener. +func (l *HTTPStats) ID() string { + return l.id +} + +// Address returns the address of the listener. +func (l *HTTPStats) Address() string { + return l.address +} + +// Protocol returns the address of the listener. +func (l *HTTPStats) Protocol() string { + if l.listen != nil && l.listen.TLSConfig != nil { + return "https" + } + + return "http" +} + +// Init initializes the listener. +func (l *HTTPStats) Init(log *slog.Logger) error { + l.log = log + mux := http.NewServeMux() + mux.HandleFunc("/", l.jsonHandler) + l.listen = &http.Server{ + ReadTimeout: 5 * time.Second, + WriteTimeout: 5 * time.Second, + Addr: l.address, + Handler: mux, + } + + if l.config.TLSConfig != nil { + l.listen.TLSConfig = l.config.TLSConfig + } + + return nil +} + +// Serve starts listening for new connections and serving responses. +func (l *HTTPStats) Serve(establish EstablishFn) { + + var err error + if l.listen.TLSConfig != nil { + err = l.listen.ListenAndServeTLS("", "") + } else { + err = l.listen.ListenAndServe() + } + + // After the listener has been shutdown, no need to print the http.ErrServerClosed error. + if err != nil && atomic.LoadUint32(&l.end) == 0 { + l.log.Error("failed to serve.", "error", err, "listener", l.id) + } +} + +// Close closes the listener and any client connections. +func (l *HTTPStats) Close(closeClients CloseFn) { + l.Lock() + defer l.Unlock() + + if atomic.CompareAndSwapUint32(&l.end, 0, 1) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _ = l.listen.Shutdown(ctx) + } + + closeClients(l.id) +} + +// jsonHandler is an HTTP handler which outputs the $SYS stats as JSON. +func (l *HTTPStats) jsonHandler(w http.ResponseWriter, req *http.Request) { + info := *l.sysInfo.Clone() + + out, err := json.MarshalIndent(info, "", "\t") + if err != nil { + _, _ = io.WriteString(w, err.Error()) + } + + _, _ = w.Write(out) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/listeners.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/listeners.go new file mode 100644 index 000000000..301dd56ed --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/listeners.go @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package listeners + +import ( + "crypto/tls" + "net" + "sync" + + "log/slog" +) + +// Config contains configuration values for a listener. +type Config struct { + // TLSConfig is a tls.Config configuration to be used with the listener. + // See examples folder for basic and mutual-tls use. + TLSConfig *tls.Config +} + +// EstablishFn is a callback function for establishing new clients. +type EstablishFn func(id string, c net.Conn) error + +// CloseFn is a callback function for closing all listener clients. +type CloseFn func(id string) + +// Listener is an interface for network listeners. A network listener listens +// for incoming client connections and adds them to the server. +type Listener interface { + Init(*slog.Logger) error // open the network address + Serve(EstablishFn) // starting actively listening for new connections + ID() string // return the id of the listener + Address() string // the address of the listener + Protocol() string // the protocol in use by the listener + Close(CloseFn) // stop and close the listener +} + +// Listeners contains the network listeners for the broker. +type Listeners struct { + ClientsWg sync.WaitGroup // a waitgroup that waits for all clients in all listeners to finish. + internal map[string]Listener // a map of active listeners. + sync.RWMutex +} + +// New returns a new instance of Listeners. +func New() *Listeners { + return &Listeners{ + internal: map[string]Listener{}, + } +} + +// Add adds a new listener to the listeners map, keyed on id. +func (l *Listeners) Add(val Listener) { + l.Lock() + defer l.Unlock() + l.internal[val.ID()] = val +} + +// Get returns the value of a listener if it exists. +func (l *Listeners) Get(id string) (Listener, bool) { + l.RLock() + defer l.RUnlock() + val, ok := l.internal[id] + return val, ok +} + +// Len returns the length of the listeners map. +func (l *Listeners) Len() int { + l.RLock() + defer l.RUnlock() + return len(l.internal) +} + +// Delete removes a listener from the internal map. +func (l *Listeners) Delete(id string) { + l.Lock() + defer l.Unlock() + delete(l.internal, id) +} + +// Serve starts a listener serving from the internal map. +func (l *Listeners) Serve(id string, establisher EstablishFn) { + l.RLock() + defer l.RUnlock() + listener := l.internal[id] + + go func(e EstablishFn) { + listener.Serve(e) + }(establisher) +} + +// ServeAll starts all listeners serving from the internal map. +func (l *Listeners) ServeAll(establisher EstablishFn) { + l.RLock() + i := 0 + ids := make([]string, len(l.internal)) + for id := range l.internal { + ids[i] = id + i++ + } + l.RUnlock() + + for _, id := range ids { + l.Serve(id, establisher) + } +} + +// Close stops a listener from the internal map. +func (l *Listeners) Close(id string, closer CloseFn) { + l.RLock() + defer l.RUnlock() + if listener, ok := l.internal[id]; ok { + listener.Close(closer) + } +} + +// CloseAll iterates and closes all registered listeners. +func (l *Listeners) CloseAll(closer CloseFn) { + l.RLock() + i := 0 + ids := make([]string, len(l.internal)) + for id := range l.internal { + ids[i] = id + i++ + } + l.RUnlock() + + for _, id := range ids { + l.Close(id, closer) + } + l.ClientsWg.Wait() +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/mock.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/mock.go new file mode 100644 index 000000000..826f80c32 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/mock.go @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package listeners + +import ( + "fmt" + "net" + "sync" + + "log/slog" +) + +// MockEstablisher is a function signature which can be used in testing. +func MockEstablisher(id string, c net.Conn) error { + return nil +} + +// MockCloser is a function signature which can be used in testing. +func MockCloser(id string) {} + +// MockListener is a mock listener for establishing client connections. +type MockListener struct { + sync.RWMutex + id string // the id of the listener + address string // the network address the listener binds to + Config *Config // configuration for the listener + done chan bool // indicate the listener is done + Serving bool // indicate the listener is serving + Listening bool // indiciate the listener is listening + ErrListen bool // throw an error on listen +} + +// NewMockListener returns a new instance of MockListener. +func NewMockListener(id, address string) *MockListener { + return &MockListener{ + id: id, + address: address, + done: make(chan bool), + } +} + +// Serve serves the mock listener. +func (l *MockListener) Serve(establisher EstablishFn) { + l.Lock() + l.Serving = true + l.Unlock() + + for range l.done { + return + } +} + +// Init initializes the listener. +func (l *MockListener) Init(log *slog.Logger) error { + if l.ErrListen { + return fmt.Errorf("listen failure") + } + + l.Lock() + defer l.Unlock() + l.Listening = true + return nil +} + +// ID returns the id of the mock listener. +func (l *MockListener) ID() string { + return l.id +} + +// Address returns the address of the listener. +func (l *MockListener) Address() string { + return l.address +} + +// Protocol returns the address of the listener. +func (l *MockListener) Protocol() string { + return "mock" +} + +// Close closes the mock listener. +func (l *MockListener) Close(closer CloseFn) { + l.Lock() + defer l.Unlock() + l.Serving = false + closer(l.id) + close(l.done) +} + +// IsServing indicates whether the mock listener is serving. +func (l *MockListener) IsServing() bool { + l.Lock() + defer l.Unlock() + return l.Serving +} + +// IsListening indicates whether the mock listener is listening. +func (l *MockListener) IsListening() bool { + l.Lock() + defer l.Unlock() + return l.Listening +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/net.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/net.go new file mode 100644 index 000000000..fa4ef3dc8 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/net.go @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2023 mochi-mqtt, mochi-co +// SPDX-FileContributor: Jeroen Rinzema + +package listeners + +import ( + "net" + "sync" + "sync/atomic" + + "log/slog" +) + +// Net is a listener for establishing client connections on basic TCP protocol. +type Net struct { // [MQTT-4.2.0-1] + mu sync.Mutex + listener net.Listener // a net.Listener which will listen for new clients + id string // the internal id of the listener + log *slog.Logger // server logger + end uint32 // ensure the close methods are only called once +} + +// NewNet initialises and returns a listener serving incoming connections on the given net.Listener +func NewNet(id string, listener net.Listener) *Net { + return &Net{ + id: id, + listener: listener, + } +} + +// ID returns the id of the listener. +func (l *Net) ID() string { + return l.id +} + +// Address returns the address of the listener. +func (l *Net) Address() string { + return l.listener.Addr().String() +} + +// Protocol returns the network of the listener. +func (l *Net) Protocol() string { + return l.listener.Addr().Network() +} + +// Init initializes the listener. +func (l *Net) Init(log *slog.Logger) error { + l.log = log + return nil +} + +// Serve starts waiting for new TCP connections, and calls the establish +// connection callback for any received. +func (l *Net) Serve(establish EstablishFn) { + for { + if atomic.LoadUint32(&l.end) == 1 { + return + } + + conn, err := l.listener.Accept() + if err != nil { + return + } + + if atomic.LoadUint32(&l.end) == 0 { + go func() { + err = establish(l.id, conn) + if err != nil { + l.log.Warn("", "error", err) + } + }() + } + } +} + +// Close closes the listener and any client connections. +func (l *Net) Close(closeClients CloseFn) { + l.mu.Lock() + defer l.mu.Unlock() + + if atomic.CompareAndSwapUint32(&l.end, 0, 1) { + closeClients(l.id) + } + + if l.listener != nil { + err := l.listener.Close() + if err != nil { + return + } + } +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/tcp.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/tcp.go new file mode 100644 index 000000000..60bd44a21 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/tcp.go @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package listeners + +import ( + "crypto/tls" + "net" + "sync" + "sync/atomic" + + "log/slog" +) + +// TCP is a listener for establishing client connections on basic TCP protocol. +type TCP struct { // [MQTT-4.2.0-1] + sync.RWMutex + id string // the internal id of the listener + address string // the network address to bind to + listen net.Listener // a net.Listener which will listen for new clients + config *Config // configuration values for the listener + log *slog.Logger // server logger + end uint32 // ensure the close methods are only called once +} + +// NewTCP initialises and returns a new TCP listener, listening on an address. +func NewTCP(id, address string, config *Config) *TCP { + if config == nil { + config = new(Config) + } + + return &TCP{ + id: id, + address: address, + config: config, + } +} + +// ID returns the id of the listener. +func (l *TCP) ID() string { + return l.id +} + +// Address returns the address of the listener. +func (l *TCP) Address() string { + if l.listen != nil { + return l.listen.Addr().String() + } + return l.address +} + +// Protocol returns the address of the listener. +func (l *TCP) Protocol() string { + return "tcp" +} + +// Init initializes the listener. +func (l *TCP) Init(log *slog.Logger) error { + l.log = log + + var err error + if l.config.TLSConfig != nil { + l.listen, err = tls.Listen("tcp", l.address, l.config.TLSConfig) + } else { + l.listen, err = net.Listen("tcp", l.address) + } + + return err +} + +// Serve starts waiting for new TCP connections, and calls the establish +// connection callback for any received. +func (l *TCP) Serve(establish EstablishFn) { + for { + if atomic.LoadUint32(&l.end) == 1 { + return + } + + conn, err := l.listen.Accept() + if err != nil { + return + } + + if atomic.LoadUint32(&l.end) == 0 { + go func() { + err = establish(l.id, conn) + if err != nil { + l.log.Warn("", "error", err) + } + }() + } + } +} + +// Close closes the listener and any client connections. +func (l *TCP) Close(closeClients CloseFn) { + l.Lock() + defer l.Unlock() + + if atomic.CompareAndSwapUint32(&l.end, 0, 1) { + closeClients(l.id) + } + + if l.listen != nil { + err := l.listen.Close() + if err != nil { + return + } + } +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/unixsock.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/unixsock.go new file mode 100644 index 000000000..5892fc944 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/unixsock.go @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: jason@zgwit.com + +package listeners + +import ( + "net" + "os" + "sync" + "sync/atomic" + + "log/slog" +) + +// UnixSock is a listener for establishing client connections on basic UnixSock protocol. +type UnixSock struct { + sync.RWMutex + id string // the internal id of the listener. + address string // the network address to bind to. + listen net.Listener // a net.Listener which will listen for new clients. + log *slog.Logger // server logger + end uint32 // ensure the close methods are only called once. +} + +// NewUnixSock initialises and returns a new UnixSock listener, listening on an address. +func NewUnixSock(id, address string) *UnixSock { + return &UnixSock{ + id: id, + address: address, + } +} + +// ID returns the id of the listener. +func (l *UnixSock) ID() string { + return l.id +} + +// Address returns the address of the listener. +func (l *UnixSock) Address() string { + return l.address +} + +// Protocol returns the address of the listener. +func (l *UnixSock) Protocol() string { + return "unix" +} + +// Init initializes the listener. +func (l *UnixSock) Init(log *slog.Logger) error { + l.log = log + + var err error + _ = os.Remove(l.address) + l.listen, err = net.Listen("unix", l.address) + return err +} + +// Serve starts waiting for new UnixSock connections, and calls the establish +// connection callback for any received. +func (l *UnixSock) Serve(establish EstablishFn) { + for { + if atomic.LoadUint32(&l.end) == 1 { + return + } + + conn, err := l.listen.Accept() + if err != nil { + return + } + + if atomic.LoadUint32(&l.end) == 0 { + go func() { + err = establish(l.id, conn) + if err != nil { + l.log.Warn("", "error", err) + } + }() + } + } +} + +// Close closes the listener and any client connections. +func (l *UnixSock) Close(closeClients CloseFn) { + l.Lock() + defer l.Unlock() + + if atomic.CompareAndSwapUint32(&l.end, 0, 1) { + closeClients(l.id) + } + + if l.listen != nil { + err := l.listen.Close() + if err != nil { + return + } + } +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/websocket.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/websocket.go new file mode 100644 index 000000000..58bf74c89 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/websocket.go @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package listeners + +import ( + "context" + "errors" + "io" + "net" + "net/http" + "sync" + "sync/atomic" + "time" + + "log/slog" + + "github.com/gorilla/websocket" +) + +var ( + // ErrInvalidMessage indicates that a message payload was not valid. + ErrInvalidMessage = errors.New("message type not binary") +) + +// Websocket is a listener for establishing websocket connections. +type Websocket struct { // [MQTT-4.2.0-1] + sync.RWMutex + id string // the internal id of the listener + address string // the network address to bind to + config *Config // configuration values for the listener + listen *http.Server // a http server for serving websocket connections + log *slog.Logger // server logger + establish EstablishFn // the server's establish connection handler + upgrader *websocket.Upgrader // upgrade the incoming http/tcp connection to a websocket compliant connection. + end uint32 // ensure the close methods are only called once +} + +// NewWebsocket initialises and returns a new Websocket listener, listening on an address. +func NewWebsocket(id, address string, config *Config) *Websocket { + if config == nil { + config = new(Config) + } + + return &Websocket{ + id: id, + address: address, + config: config, + upgrader: &websocket.Upgrader{ + Subprotocols: []string{"mqtt"}, + CheckOrigin: func(r *http.Request) bool { + return true + }, + }, + } +} + +// ID returns the id of the listener. +func (l *Websocket) ID() string { + return l.id +} + +// Address returns the address of the listener. +func (l *Websocket) Address() string { + return l.address +} + +// Protocol returns the address of the listener. +func (l *Websocket) Protocol() string { + if l.config.TLSConfig != nil { + return "wss" + } + + return "ws" +} + +// Init initializes the listener. +func (l *Websocket) Init(log *slog.Logger) error { + l.log = log + + mux := http.NewServeMux() + mux.HandleFunc("/", l.handler) + l.listen = &http.Server{ + Addr: l.address, + Handler: mux, + TLSConfig: l.config.TLSConfig, + ReadTimeout: 60 * time.Second, + WriteTimeout: 60 * time.Second, + } + + return nil +} + +// handler upgrades and handles an incoming websocket connection. +func (l *Websocket) handler(w http.ResponseWriter, r *http.Request) { + c, err := l.upgrader.Upgrade(w, r, nil) + if err != nil { + return + } + defer c.Close() + + err = l.establish(l.id, &wsConn{Conn: c.UnderlyingConn(), c: c}) + if err != nil { + l.log.Warn("", "error", err) + } +} + +// Serve starts waiting for new Websocket connections, and calls the connection +// establishment callback for any received. +func (l *Websocket) Serve(establish EstablishFn) { + var err error + l.establish = establish + + if l.listen.TLSConfig != nil { + err = l.listen.ListenAndServeTLS("", "") + } else { + err = l.listen.ListenAndServe() + } + + // After the listener has been shutdown, no need to print the http.ErrServerClosed error. + if err != nil && atomic.LoadUint32(&l.end) == 0 { + l.log.Error("failed to serve.", "error", err, "listener", l.id) + } +} + +// Close closes the listener and any client connections. +func (l *Websocket) Close(closeClients CloseFn) { + l.Lock() + defer l.Unlock() + + if atomic.CompareAndSwapUint32(&l.end, 0, 1) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _ = l.listen.Shutdown(ctx) + } + + closeClients(l.id) +} + +// wsConn is a websocket connection which satisfies the net.Conn interface. +type wsConn struct { + net.Conn + c *websocket.Conn + + // reader for the current message (can be nil) + r io.Reader +} + +// Read reads the next span of bytes from the websocket connection and returns the number of bytes read. +func (ws *wsConn) Read(p []byte) (int, error) { + if ws.r == nil { + op, r, err := ws.c.NextReader() + if err != nil { + return 0, err + } + + if op != websocket.BinaryMessage { + err = ErrInvalidMessage + return 0, err + } + + ws.r = r + } + + var n int + for { + // buffer is full, return what we've read so far + if n == len(p) { + return n, nil + } + + br, err := ws.r.Read(p[n:]) + n += br + if err != nil { + // when ANY error occurs, we consider this the end of the current message (either because it really is, via + // io.EOF, or because something bad happened, in which case we want to drop the remainder) + ws.r = nil + + if errors.Is(err, io.EOF) { + err = nil + } + return n, err + } + } +} + +// Write writes bytes to the websocket connection. +func (ws *wsConn) Write(p []byte) (int, error) { + err := ws.c.WriteMessage(websocket.BinaryMessage, p) + if err != nil { + return 0, err + } + + return len(p), nil +} + +// Close signals the underlying websocket conn to close. +func (ws *wsConn) Close() error { + return ws.Conn.Close() +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/mempool/bufpool.go b/vendor/github.com/mochi-mqtt/server/v2/mempool/bufpool.go new file mode 100644 index 000000000..b2a2eabb3 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/mempool/bufpool.go @@ -0,0 +1,81 @@ +package mempool + +import ( + "bytes" + "sync" +) + +var bufPool = NewBuffer(0) + +// GetBuffer takes a Buffer from the default buffer pool +func GetBuffer() *bytes.Buffer { return bufPool.Get() } + +// PutBuffer returns Buffer to the default buffer pool +func PutBuffer(x *bytes.Buffer) { bufPool.Put(x) } + +type BufferPool interface { + Get() *bytes.Buffer + Put(x *bytes.Buffer) +} + +// NewBuffer returns a buffer pool. The max specify the max capacity of the Buffer the pool will +// return. If the Buffer becoomes large than max, it will no longer be returned to the pool. If +// max <= 0, no limit will be enforced. +func NewBuffer(max int) BufferPool { + if max > 0 { + return newBufferWithCap(max) + } + + return newBuffer() +} + +// Buffer is a Buffer pool. +type Buffer struct { + pool *sync.Pool +} + +func newBuffer() *Buffer { + return &Buffer{ + pool: &sync.Pool{ + New: func() any { return new(bytes.Buffer) }, + }, + } +} + +// Get a Buffer from the pool. +func (b *Buffer) Get() *bytes.Buffer { + return b.pool.Get().(*bytes.Buffer) +} + +// Put the Buffer back into pool. It resets the Buffer for reuse. +func (b *Buffer) Put(x *bytes.Buffer) { + x.Reset() + b.pool.Put(x) +} + +// BufferWithCap is a Buffer pool that +type BufferWithCap struct { + bp *Buffer + max int +} + +func newBufferWithCap(max int) *BufferWithCap { + return &BufferWithCap{ + bp: newBuffer(), + max: max, + } +} + +// Get a Buffer from the pool. +func (b *BufferWithCap) Get() *bytes.Buffer { + return b.bp.Get() +} + +// Put the Buffer back into the pool if the capacity doesn't exceed the limit. It resets the Buffer +// for reuse. +func (b *BufferWithCap) Put(x *bytes.Buffer) { + if x.Cap() > b.max { + return + } + b.bp.Put(x) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/packets/codec.go b/vendor/github.com/mochi-mqtt/server/v2/packets/codec.go new file mode 100644 index 000000000..152d777ed --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/packets/codec.go @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package packets + +import ( + "bytes" + "encoding/binary" + "io" + "unicode/utf8" + "unsafe" +) + +// bytesToString provides a zero-alloc no-copy byte to string conversion. +// via https://github.com/golang/go/issues/25484#issuecomment-391415660 +func bytesToString(bs []byte) string { + return *(*string)(unsafe.Pointer(&bs)) +} + +// decodeUint16 extracts the value of two bytes from a byte array. +func decodeUint16(buf []byte, offset int) (uint16, int, error) { + if len(buf) < offset+2 { + return 0, 0, ErrMalformedOffsetUintOutOfRange + } + + return binary.BigEndian.Uint16(buf[offset : offset+2]), offset + 2, nil +} + +// decodeUint32 extracts the value of four bytes from a byte array. +func decodeUint32(buf []byte, offset int) (uint32, int, error) { + if len(buf) < offset+4 { + return 0, 0, ErrMalformedOffsetUintOutOfRange + } + + return binary.BigEndian.Uint32(buf[offset : offset+4]), offset + 4, nil +} + +// decodeString extracts a string from a byte array, beginning at an offset. +func decodeString(buf []byte, offset int) (string, int, error) { + b, n, err := decodeBytes(buf, offset) + if err != nil { + return "", 0, err + } + + if !validUTF8(b) { // [MQTT-1.5.4-1] [MQTT-3.1.3-5] + return "", 0, ErrMalformedInvalidUTF8 + } + + return bytesToString(b), n, nil +} + +// validUTF8 checks if the byte array contains valid UTF-8 characters. +func validUTF8(b []byte) bool { + return utf8.Valid(b) && bytes.IndexByte(b, 0x00) == -1 // [MQTT-1.5.4-1] [MQTT-1.5.4-2] +} + +// decodeBytes extracts a byte array from a byte array, beginning at an offset. Used primarily for message payloads. +func decodeBytes(buf []byte, offset int) ([]byte, int, error) { + length, next, err := decodeUint16(buf, offset) + if err != nil { + return make([]byte, 0), 0, err + } + + if next+int(length) > len(buf) { + return make([]byte, 0), 0, ErrMalformedOffsetBytesOutOfRange + } + + return buf[next : next+int(length)], next + int(length), nil +} + +// decodeByte extracts the value of a byte from a byte array. +func decodeByte(buf []byte, offset int) (byte, int, error) { + if len(buf) <= offset { + return 0, 0, ErrMalformedOffsetByteOutOfRange + } + return buf[offset], offset + 1, nil +} + +// decodeByteBool extracts the value of a byte from a byte array and returns a bool. +func decodeByteBool(buf []byte, offset int) (bool, int, error) { + if len(buf) <= offset { + return false, 0, ErrMalformedOffsetBoolOutOfRange + } + return 1&buf[offset] > 0, offset + 1, nil +} + +// encodeBool returns a byte instead of a bool. +func encodeBool(b bool) byte { + if b { + return 1 + } + return 0 +} + +// encodeBytes encodes a byte array to a byte array. Used primarily for message payloads. +func encodeBytes(val []byte) []byte { + // In most circumstances the number of bytes being encoded is small. + // Setting the cap to a low amount allows us to account for those without + // triggering allocation growth on append unless we need to. + buf := make([]byte, 2, 32) + binary.BigEndian.PutUint16(buf, uint16(len(val))) + return append(buf, val...) +} + +// encodeUint16 encodes a uint16 value to a byte array. +func encodeUint16(val uint16) []byte { + buf := make([]byte, 2) + binary.BigEndian.PutUint16(buf, val) + return buf +} + +// encodeUint32 encodes a uint16 value to a byte array. +func encodeUint32(val uint32) []byte { + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, val) + return buf +} + +// encodeString encodes a string to a byte array. +func encodeString(val string) []byte { + // Like encodeBytes, we set the cap to a small number to avoid + // triggering allocation growth on append unless we absolutely need to. + buf := make([]byte, 2, 32) + binary.BigEndian.PutUint16(buf, uint16(len(val))) + return append(buf, []byte(val)...) +} + +// encodeLength writes length bits for the header. +func encodeLength(b *bytes.Buffer, length int64) { + // 1.5.5 Variable Byte Integer encode non-normative + // https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901027 + for { + eb := byte(length % 128) + length /= 128 + if length > 0 { + eb |= 0x80 + } + b.WriteByte(eb) + if length == 0 { + break // [MQTT-1.5.5-1] + } + } +} + +func DecodeLength(b io.ByteReader) (n, bu int, err error) { + // see 1.5.5 Variable Byte Integer decode non-normative + // https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901027 + var multiplier uint32 + var value uint32 + bu = 1 + for { + eb, err := b.ReadByte() + if err != nil { + return 0, bu, err + } + + value |= uint32(eb&127) << multiplier + if value > 268435455 { + return 0, bu, ErrMalformedVariableByteInteger + } + + if (eb & 128) == 0 { + break + } + + multiplier += 7 + bu++ + } + + return int(value), bu, nil +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/packets/codes.go b/vendor/github.com/mochi-mqtt/server/v2/packets/codes.go new file mode 100644 index 000000000..5af1b7419 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/packets/codes.go @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package packets + +// Code contains a reason code and reason string for a response. +type Code struct { + Reason string + Code byte +} + +// String returns the readable reason for a code. +func (c Code) String() string { + return c.Reason +} + +// Error returns the readable reason for a code. +func (c Code) Error() string { + return c.Reason +} + +var ( + // QosCodes indicates the reason codes for each Qos byte. + QosCodes = map[byte]Code{ + 0: CodeGrantedQos0, + 1: CodeGrantedQos1, + 2: CodeGrantedQos2, + } + + CodeSuccessIgnore = Code{Code: 0x00, Reason: "ignore packet"} + CodeSuccess = Code{Code: 0x00, Reason: "success"} + CodeDisconnect = Code{Code: 0x00, Reason: "disconnected"} + CodeGrantedQos0 = Code{Code: 0x00, Reason: "granted qos 0"} + CodeGrantedQos1 = Code{Code: 0x01, Reason: "granted qos 1"} + CodeGrantedQos2 = Code{Code: 0x02, Reason: "granted qos 2"} + CodeDisconnectWillMessage = Code{Code: 0x04, Reason: "disconnect with will message"} + CodeNoMatchingSubscribers = Code{Code: 0x10, Reason: "no matching subscribers"} + CodeNoSubscriptionExisted = Code{Code: 0x11, Reason: "no subscription existed"} + CodeContinueAuthentication = Code{Code: 0x18, Reason: "continue authentication"} + CodeReAuthenticate = Code{Code: 0x19, Reason: "re-authenticate"} + ErrUnspecifiedError = Code{Code: 0x80, Reason: "unspecified error"} + ErrMalformedPacket = Code{Code: 0x81, Reason: "malformed packet"} + ErrMalformedProtocolName = Code{Code: 0x81, Reason: "malformed packet: protocol name"} + ErrMalformedProtocolVersion = Code{Code: 0x81, Reason: "malformed packet: protocol version"} + ErrMalformedFlags = Code{Code: 0x81, Reason: "malformed packet: flags"} + ErrMalformedKeepalive = Code{Code: 0x81, Reason: "malformed packet: keepalive"} + ErrMalformedPacketID = Code{Code: 0x81, Reason: "malformed packet: packet identifier"} + ErrMalformedTopic = Code{Code: 0x81, Reason: "malformed packet: topic"} + ErrMalformedWillTopic = Code{Code: 0x81, Reason: "malformed packet: will topic"} + ErrMalformedWillPayload = Code{Code: 0x81, Reason: "malformed packet: will message"} + ErrMalformedUsername = Code{Code: 0x81, Reason: "malformed packet: username"} + ErrMalformedPassword = Code{Code: 0x81, Reason: "malformed packet: password"} + ErrMalformedQos = Code{Code: 0x81, Reason: "malformed packet: qos"} + ErrMalformedOffsetUintOutOfRange = Code{Code: 0x81, Reason: "malformed packet: offset uint out of range"} + ErrMalformedOffsetBytesOutOfRange = Code{Code: 0x81, Reason: "malformed packet: offset bytes out of range"} + ErrMalformedOffsetByteOutOfRange = Code{Code: 0x81, Reason: "malformed packet: offset byte out of range"} + ErrMalformedOffsetBoolOutOfRange = Code{Code: 0x81, Reason: "malformed packet: offset boolean out of range"} + ErrMalformedInvalidUTF8 = Code{Code: 0x81, Reason: "malformed packet: invalid utf-8 string"} + ErrMalformedVariableByteInteger = Code{Code: 0x81, Reason: "malformed packet: variable byte integer out of range"} + ErrMalformedBadProperty = Code{Code: 0x81, Reason: "malformed packet: unknown property"} + ErrMalformedProperties = Code{Code: 0x81, Reason: "malformed packet: properties"} + ErrMalformedWillProperties = Code{Code: 0x81, Reason: "malformed packet: will properties"} + ErrMalformedSessionPresent = Code{Code: 0x81, Reason: "malformed packet: session present"} + ErrMalformedReasonCode = Code{Code: 0x81, Reason: "malformed packet: reason code"} + ErrProtocolViolation = Code{Code: 0x82, Reason: "protocol violation"} + ErrProtocolViolationProtocolName = Code{Code: 0x82, Reason: "protocol violation: protocol name"} + ErrProtocolViolationProtocolVersion = Code{Code: 0x82, Reason: "protocol violation: protocol version"} + ErrProtocolViolationReservedBit = Code{Code: 0x82, Reason: "protocol violation: reserved bit not 0"} + ErrProtocolViolationFlagNoUsername = Code{Code: 0x82, Reason: "protocol violation: username flag set but no value"} + ErrProtocolViolationFlagNoPassword = Code{Code: 0x82, Reason: "protocol violation: password flag set but no value"} + ErrProtocolViolationUsernameNoFlag = Code{Code: 0x82, Reason: "protocol violation: username set but no flag"} + ErrProtocolViolationPasswordNoFlag = Code{Code: 0x82, Reason: "protocol violation: username set but no flag"} + ErrProtocolViolationPasswordTooLong = Code{Code: 0x82, Reason: "protocol violation: password too long"} + ErrProtocolViolationUsernameTooLong = Code{Code: 0x82, Reason: "protocol violation: username too long"} + ErrProtocolViolationNoPacketID = Code{Code: 0x82, Reason: "protocol violation: missing packet id"} + ErrProtocolViolationSurplusPacketID = Code{Code: 0x82, Reason: "protocol violation: surplus packet id"} + ErrProtocolViolationQosOutOfRange = Code{Code: 0x82, Reason: "protocol violation: qos out of range"} + ErrProtocolViolationSecondConnect = Code{Code: 0x82, Reason: "protocol violation: second connect packet"} + ErrProtocolViolationZeroNonZeroExpiry = Code{Code: 0x82, Reason: "protocol violation: non-zero expiry"} + ErrProtocolViolationRequireFirstConnect = Code{Code: 0x82, Reason: "protocol violation: first packet must be connect"} + ErrProtocolViolationWillFlagNoPayload = Code{Code: 0x82, Reason: "protocol violation: will flag no payload"} + ErrProtocolViolationWillFlagSurplusRetain = Code{Code: 0x82, Reason: "protocol violation: will flag surplus retain"} + ErrProtocolViolationSurplusWildcard = Code{Code: 0x82, Reason: "protocol violation: topic contains wildcards"} + ErrProtocolViolationSurplusSubID = Code{Code: 0x82, Reason: "protocol violation: contained subscription identifier"} + ErrProtocolViolationInvalidTopic = Code{Code: 0x82, Reason: "protocol violation: invalid topic"} + ErrProtocolViolationInvalidSharedNoLocal = Code{Code: 0x82, Reason: "protocol violation: invalid shared no local"} + ErrProtocolViolationNoFilters = Code{Code: 0x82, Reason: "protocol violation: must contain at least one filter"} + ErrProtocolViolationInvalidReason = Code{Code: 0x82, Reason: "protocol violation: invalid reason"} + ErrProtocolViolationOversizeSubID = Code{Code: 0x82, Reason: "protocol violation: oversize subscription id"} + ErrProtocolViolationDupNoQos = Code{Code: 0x82, Reason: "protocol violation: dup true with no qos"} + ErrProtocolViolationUnsupportedProperty = Code{Code: 0x82, Reason: "protocol violation: unsupported property"} + ErrProtocolViolationNoTopic = Code{Code: 0x82, Reason: "protocol violation: no topic or alias"} + ErrImplementationSpecificError = Code{Code: 0x83, Reason: "implementation specific error"} + ErrRejectPacket = Code{Code: 0x83, Reason: "packet rejected"} + ErrUnsupportedProtocolVersion = Code{Code: 0x84, Reason: "unsupported protocol version"} + ErrClientIdentifierNotValid = Code{Code: 0x85, Reason: "client identifier not valid"} + ErrClientIdentifierTooLong = Code{Code: 0x85, Reason: "client identifier too long"} + ErrBadUsernameOrPassword = Code{Code: 0x86, Reason: "bad username or password"} + ErrNotAuthorized = Code{Code: 0x87, Reason: "not authorized"} + ErrServerUnavailable = Code{Code: 0x88, Reason: "server unavailable"} + ErrServerBusy = Code{Code: 0x89, Reason: "server busy"} + ErrBanned = Code{Code: 0x8A, Reason: "banned"} + ErrServerShuttingDown = Code{Code: 0x8B, Reason: "server shutting down"} + ErrBadAuthenticationMethod = Code{Code: 0x8C, Reason: "bad authentication method"} + ErrKeepAliveTimeout = Code{Code: 0x8D, Reason: "keep alive timeout"} + ErrSessionTakenOver = Code{Code: 0x8E, Reason: "session takeover"} + ErrTopicFilterInvalid = Code{Code: 0x8F, Reason: "topic filter invalid"} + ErrTopicNameInvalid = Code{Code: 0x90, Reason: "topic name invalid"} + ErrPacketIdentifierInUse = Code{Code: 0x91, Reason: "packet identifier in use"} + ErrPacketIdentifierNotFound = Code{Code: 0x92, Reason: "packet identifier not found"} + ErrReceiveMaximum = Code{Code: 0x93, Reason: "receive maximum exceeded"} + ErrTopicAliasInvalid = Code{Code: 0x94, Reason: "topic alias invalid"} + ErrPacketTooLarge = Code{Code: 0x95, Reason: "packet too large"} + ErrMessageRateTooHigh = Code{Code: 0x96, Reason: "message rate too high"} + ErrQuotaExceeded = Code{Code: 0x97, Reason: "quota exceeded"} + ErrPendingClientWritesExceeded = Code{Code: 0x97, Reason: "too many pending writes"} + ErrAdministrativeAction = Code{Code: 0x98, Reason: "administrative action"} + ErrPayloadFormatInvalid = Code{Code: 0x99, Reason: "payload format invalid"} + ErrRetainNotSupported = Code{Code: 0x9A, Reason: "retain not supported"} + ErrQosNotSupported = Code{Code: 0x9B, Reason: "qos not supported"} + ErrUseAnotherServer = Code{Code: 0x9C, Reason: "use another server"} + ErrServerMoved = Code{Code: 0x9D, Reason: "server moved"} + ErrSharedSubscriptionsNotSupported = Code{Code: 0x9E, Reason: "shared subscriptions not supported"} + ErrConnectionRateExceeded = Code{Code: 0x9F, Reason: "connection rate exceeded"} + ErrMaxConnectTime = Code{Code: 0xA0, Reason: "maximum connect time"} + ErrSubscriptionIdentifiersNotSupported = Code{Code: 0xA1, Reason: "subscription identifiers not supported"} + ErrWildcardSubscriptionsNotSupported = Code{Code: 0xA2, Reason: "wildcard subscriptions not supported"} + ErrInlineSubscriptionHandlerInvalid = Code{Code: 0xA3, Reason: "inline subscription handler not valid."} + + // MQTTv3 specific bytes. + Err3UnsupportedProtocolVersion = Code{Code: 0x01} + Err3ClientIdentifierNotValid = Code{Code: 0x02} + Err3ServerUnavailable = Code{Code: 0x03} + ErrMalformedUsernameOrPassword = Code{Code: 0x04} + Err3NotAuthorized = Code{Code: 0x05} + + // V5CodesToV3 maps MQTTv5 Connack reason codes to MQTTv3 return codes. + // This is required because MQTTv3 has different return byte specification. + // See http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc385349257 + V5CodesToV3 = map[Code]Code{ + ErrUnsupportedProtocolVersion: Err3UnsupportedProtocolVersion, + ErrClientIdentifierNotValid: Err3ClientIdentifierNotValid, + ErrServerUnavailable: Err3ServerUnavailable, + ErrMalformedUsername: ErrMalformedUsernameOrPassword, + ErrMalformedPassword: ErrMalformedUsernameOrPassword, + ErrBadUsernameOrPassword: Err3NotAuthorized, + } +) diff --git a/vendor/github.com/mochi-mqtt/server/v2/packets/fixedheader.go b/vendor/github.com/mochi-mqtt/server/v2/packets/fixedheader.go new file mode 100644 index 000000000..eb20451bf --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/packets/fixedheader.go @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package packets + +import ( + "bytes" +) + +// FixedHeader contains the values of the fixed header portion of the MQTT packet. +type FixedHeader struct { + Remaining int `json:"remaining"` // the number of remaining bytes in the payload. + Type byte `json:"type"` // the type of the packet (PUBLISH, SUBSCRIBE, etc) from bits 7 - 4 (byte 1). + Qos byte `json:"qos"` // indicates the quality of service expected. + Dup bool `json:"dup"` // indicates if the packet was already sent at an earlier time. + Retain bool `json:"retain"` // whether the message should be retained. +} + +// Encode encodes the FixedHeader and returns a bytes buffer. +func (fh *FixedHeader) Encode(buf *bytes.Buffer) { + buf.WriteByte(fh.Type<<4 | encodeBool(fh.Dup)<<3 | fh.Qos<<1 | encodeBool(fh.Retain)) + encodeLength(buf, int64(fh.Remaining)) +} + +// Decode extracts the specification bits from the header byte. +func (fh *FixedHeader) Decode(hb byte) error { + fh.Type = hb >> 4 // Get the message type from the first 4 bytes. + + switch fh.Type { + case Publish: + if (hb>>1)&0x01 > 0 && (hb>>1)&0x02 > 0 { + return ErrProtocolViolationQosOutOfRange // [MQTT-3.3.1-4] + } + + fh.Dup = (hb>>3)&0x01 > 0 // is duplicate + fh.Qos = (hb >> 1) & 0x03 // qos flag + fh.Retain = hb&0x01 > 0 // is retain flag + case Pubrel: + fallthrough + case Subscribe: + fallthrough + case Unsubscribe: + if (hb>>0)&0x01 != 0 || (hb>>1)&0x01 != 1 || (hb>>2)&0x01 != 0 || (hb>>3)&0x01 != 0 { // [MQTT-3.8.1-1] [MQTT-3.10.1-1] + return ErrMalformedFlags + } + + fh.Qos = (hb >> 1) & 0x03 + default: + if (hb>>0)&0x01 != 0 || + (hb>>1)&0x01 != 0 || + (hb>>2)&0x01 != 0 || + (hb>>3)&0x01 != 0 { // [MQTT-3.8.3-5] [MQTT-3.14.1-1] [MQTT-3.15.1-1] + return ErrMalformedFlags + } + } + + if fh.Qos == 0 && fh.Dup { + return ErrProtocolViolationDupNoQos // [MQTT-3.3.1-2] + } + + return nil +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/packets/packets.go b/vendor/github.com/mochi-mqtt/server/v2/packets/packets.go new file mode 100644 index 000000000..d91975c6c --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/packets/packets.go @@ -0,0 +1,1172 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package packets + +import ( + "bytes" + "errors" + "fmt" + "math" + "strconv" + "strings" + "sync" + + "github.com/mochi-mqtt/server/v2/mempool" +) + +// All valid packet types and their packet identifiers. +const ( + Reserved byte = iota // 0 - we use this in packet tests to indicate special-test or all packets. + Connect // 1 + Connack // 2 + Publish // 3 + Puback // 4 + Pubrec // 5 + Pubrel // 6 + Pubcomp // 7 + Subscribe // 8 + Suback // 9 + Unsubscribe // 10 + Unsuback // 11 + Pingreq // 12 + Pingresp // 13 + Disconnect // 14 + Auth // 15 + WillProperties byte = 99 // Special byte for validating Will Properties. +) + +var ( + // ErrNoValidPacketAvailable indicates the packet type byte provided does not exist in the mqtt specification. + ErrNoValidPacketAvailable = errors.New("no valid packet available") + + // PacketNames is a map of packet bytes to human-readable names, for easier debugging. + PacketNames = map[byte]string{ + 0: "Reserved", + 1: "Connect", + 2: "Connack", + 3: "Publish", + 4: "Puback", + 5: "Pubrec", + 6: "Pubrel", + 7: "Pubcomp", + 8: "Subscribe", + 9: "Suback", + 10: "Unsubscribe", + 11: "Unsuback", + 12: "Pingreq", + 13: "Pingresp", + 14: "Disconnect", + 15: "Auth", + } +) + +// Packets is a concurrency safe map of packets. +type Packets struct { + internal map[string]Packet + sync.RWMutex +} + +// NewPackets returns a new instance of Packets. +func NewPackets() *Packets { + return &Packets{ + internal: map[string]Packet{}, + } +} + +// Add adds a new packet to the map. +func (p *Packets) Add(id string, val Packet) { + p.Lock() + defer p.Unlock() + p.internal[id] = val +} + +// GetAll returns all packets in the map. +func (p *Packets) GetAll() map[string]Packet { + p.RLock() + defer p.RUnlock() + m := map[string]Packet{} + for k, v := range p.internal { + m[k] = v + } + return m +} + +// Get returns a specific packet in the map by packet id. +func (p *Packets) Get(id string) (val Packet, ok bool) { + p.RLock() + defer p.RUnlock() + val, ok = p.internal[id] + return val, ok +} + +// Len returns the number of packets in the map. +func (p *Packets) Len() int { + p.RLock() + defer p.RUnlock() + val := len(p.internal) + return val +} + +// Delete removes a packet from the map by packet id. +func (p *Packets) Delete(id string) { + p.Lock() + defer p.Unlock() + delete(p.internal, id) +} + +// Packet represents an MQTT packet. Instead of providing a packet interface +// variant packet structs, this is a single concrete packet type to cover all packet +// types, which allows us to take advantage of various compiler optimizations. It +// contains a combination of mqtt spec values and internal broker control codes. +type Packet struct { + Connect ConnectParams // parameters for connect packets (just for organisation) + Properties Properties // all mqtt v5 packet properties + Payload []byte // a message/payload for publish packets + ReasonCodes []byte // one or more reason codes for multi-reason responses (suback, etc) + Filters Subscriptions // a list of subscription filters and their properties (subscribe, unsubscribe) + TopicName string // the topic a payload is being published to + Origin string // client id of the client who is issuing the packet (mostly internal use) + FixedHeader FixedHeader // - + Created int64 // unix timestamp indicating time packet was created/received on the server + Expiry int64 // unix timestamp indicating when the packet will expire and should be deleted + Mods Mods // internal broker control values for controlling certain mqtt v5 compliance + PacketID uint16 // packet id for the packet (publish, qos, etc) + ProtocolVersion byte // protocol version of the client the packet belongs to + SessionPresent bool // session existed for connack + ReasonCode byte // reason code for a packet response (acks, etc) + ReservedBit byte // reserved, do not use (except in testing) + Ignore bool // if true, do not perform any message forwarding operations +} + +// Mods specifies certain values required for certain mqtt v5 compliance within packet encoding/decoding. +type Mods struct { + MaxSize uint32 // the maximum packet size specified by the client / server + DisallowProblemInfo bool // if problem info is disallowed + AllowResponseInfo bool // if response info is disallowed +} + +// ConnectParams contains packet values which are specifically related to connect packets. +type ConnectParams struct { + WillProperties Properties `json:"willProperties"` // - + Password []byte `json:"password"` // - + Username []byte `json:"username"` // - + ProtocolName []byte `json:"protocolName"` // - + WillPayload []byte `json:"willPayload"` // - + ClientIdentifier string `json:"clientId"` // - + WillTopic string `json:"willTopic"` // - + Keepalive uint16 `json:"keepalive"` // - + PasswordFlag bool `json:"passwordFlag"` // - + UsernameFlag bool `json:"usernameFlag"` // - + WillQos byte `json:"willQos"` // - + WillFlag bool `json:"willFlag"` // - + WillRetain bool `json:"willRetain"` // - + Clean bool `json:"clean"` // CleanSession in v3.1.1, CleanStart in v5 +} + +// Subscriptions is a slice of Subscription. +type Subscriptions []Subscription // must be a slice to retain order. + +// Subscription contains details about a client subscription to a topic filter. +type Subscription struct { + ShareName []string + Filter string + Identifier int + Identifiers map[string]int + RetainHandling byte + Qos byte + RetainAsPublished bool + NoLocal bool + FwdRetainedFlag bool // true if the subscription forms part of a publish response to a client subscription and packet is retained. +} + +// Copy creates a new instance of a packet, but with an empty header for inheriting new QoS flags, etc. +func (pk *Packet) Copy(allowTransfer bool) Packet { + p := Packet{ + FixedHeader: FixedHeader{ + Remaining: pk.FixedHeader.Remaining, + Type: pk.FixedHeader.Type, + Retain: pk.FixedHeader.Retain, + Dup: false, // [MQTT-4.3.1-1] [MQTT-4.3.2-2] + Qos: pk.FixedHeader.Qos, + }, + Mods: Mods{ + MaxSize: pk.Mods.MaxSize, + }, + ReservedBit: pk.ReservedBit, + ProtocolVersion: pk.ProtocolVersion, + Connect: ConnectParams{ + ClientIdentifier: pk.Connect.ClientIdentifier, + Keepalive: pk.Connect.Keepalive, + WillQos: pk.Connect.WillQos, + WillTopic: pk.Connect.WillTopic, + WillFlag: pk.Connect.WillFlag, + WillRetain: pk.Connect.WillRetain, + WillProperties: pk.Connect.WillProperties.Copy(allowTransfer), + Clean: pk.Connect.Clean, + }, + TopicName: pk.TopicName, + Properties: pk.Properties.Copy(allowTransfer), + SessionPresent: pk.SessionPresent, + ReasonCode: pk.ReasonCode, + Filters: pk.Filters, + Created: pk.Created, + Expiry: pk.Expiry, + Origin: pk.Origin, + } + + if allowTransfer { + p.PacketID = pk.PacketID + } + + if len(pk.Connect.ProtocolName) > 0 { + p.Connect.ProtocolName = append([]byte{}, pk.Connect.ProtocolName...) + } + + if len(pk.Connect.Password) > 0 { + p.Connect.PasswordFlag = true + p.Connect.Password = append([]byte{}, pk.Connect.Password...) + } + + if len(pk.Connect.Username) > 0 { + p.Connect.UsernameFlag = true + p.Connect.Username = append([]byte{}, pk.Connect.Username...) + } + + if len(pk.Connect.WillPayload) > 0 { + p.Connect.WillPayload = append([]byte{}, pk.Connect.WillPayload...) + } + + if len(pk.Payload) > 0 { + p.Payload = append([]byte{}, pk.Payload...) + } + + if len(pk.ReasonCodes) > 0 { + p.ReasonCodes = append([]byte{}, pk.ReasonCodes...) + } + + return p +} + +// Merge merges a new subscription with a base subscription, preserving the highest +// qos value, matched identifiers and any special properties. +func (s Subscription) Merge(n Subscription) Subscription { + if s.Identifiers == nil { + s.Identifiers = map[string]int{ + s.Filter: s.Identifier, + } + } + + if n.Identifier > 0 { + s.Identifiers[n.Filter] = n.Identifier + } + + if n.Qos > s.Qos { + s.Qos = n.Qos // [MQTT-3.3.4-2] + } + + if n.NoLocal { + s.NoLocal = true // [MQTT-3.8.3-3] + } + + return s +} + +// encode encodes a subscription and properties into bytes. +func (s Subscription) encode() byte { + var flag byte + flag |= s.Qos + + if s.NoLocal { + flag |= 1 << 2 + } + + if s.RetainAsPublished { + flag |= 1 << 3 + } + + flag |= s.RetainHandling << 4 + return flag +} + +// decode decodes subscription bytes into a subscription struct. +func (s *Subscription) decode(b byte) { + s.Qos = b & 3 // byte + s.NoLocal = 1&(b>>2) > 0 // bool + s.RetainAsPublished = 1&(b>>3) > 0 // bool + s.RetainHandling = 3 & (b >> 4) // byte +} + +// ConnectEncode encodes a connect packet. +func (pk *Packet) ConnectEncode(buf *bytes.Buffer) error { + nb := mempool.GetBuffer() + defer mempool.PutBuffer(nb) + nb.Write(encodeBytes(pk.Connect.ProtocolName)) + nb.WriteByte(pk.ProtocolVersion) + + nb.WriteByte( + encodeBool(pk.Connect.Clean)<<1 | + encodeBool(pk.Connect.WillFlag)<<2 | + pk.Connect.WillQos<<3 | + encodeBool(pk.Connect.WillRetain)<<5 | + encodeBool(pk.Connect.PasswordFlag)<<6 | + encodeBool(pk.Connect.UsernameFlag)<<7 | + 0, // [MQTT-2.1.3-1] + ) + + nb.Write(encodeUint16(pk.Connect.Keepalive)) + + if pk.ProtocolVersion == 5 { + pb := mempool.GetBuffer() + defer mempool.PutBuffer(pb) + (&pk.Properties).Encode(pk.FixedHeader.Type, pk.Mods, pb, 0) + nb.Write(pb.Bytes()) + } + + nb.Write(encodeString(pk.Connect.ClientIdentifier)) + + if pk.Connect.WillFlag { + if pk.ProtocolVersion == 5 { + pb := mempool.GetBuffer() + defer mempool.PutBuffer(pb) + (&pk.Connect).WillProperties.Encode(WillProperties, pk.Mods, pb, 0) + nb.Write(pb.Bytes()) + } + + nb.Write(encodeString(pk.Connect.WillTopic)) + nb.Write(encodeBytes(pk.Connect.WillPayload)) + } + + if pk.Connect.UsernameFlag { + nb.Write(encodeBytes(pk.Connect.Username)) + } + + if pk.Connect.PasswordFlag { + nb.Write(encodeBytes(pk.Connect.Password)) + } + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + buf.Write(nb.Bytes()) + + return nil +} + +// ConnectDecode decodes a connect packet. +func (pk *Packet) ConnectDecode(buf []byte) error { + var offset int + var err error + + pk.Connect.ProtocolName, offset, err = decodeBytes(buf, 0) + if err != nil { + return ErrMalformedProtocolName + } + + pk.ProtocolVersion, offset, err = decodeByte(buf, offset) + if err != nil { + return ErrMalformedProtocolVersion + } + + flags, offset, err := decodeByte(buf, offset) + if err != nil { + return ErrMalformedFlags + } + + pk.ReservedBit = 1 & flags + pk.Connect.Clean = 1&(flags>>1) > 0 + pk.Connect.WillFlag = 1&(flags>>2) > 0 + pk.Connect.WillQos = 3 & (flags >> 3) // this one is not a bool + pk.Connect.WillRetain = 1&(flags>>5) > 0 + pk.Connect.PasswordFlag = 1&(flags>>6) > 0 + pk.Connect.UsernameFlag = 1&(flags>>7) > 0 + + pk.Connect.Keepalive, offset, err = decodeUint16(buf, offset) + if err != nil { + return ErrMalformedKeepalive + } + + if pk.ProtocolVersion == 5 { + n, err := pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + offset += n + } + + pk.Connect.ClientIdentifier, offset, err = decodeString(buf, offset) // [MQTT-3.1.3-1] [MQTT-3.1.3-2] [MQTT-3.1.3-3] [MQTT-3.1.3-4] + if err != nil { + return ErrClientIdentifierNotValid // [MQTT-3.1.3-8] + } + + if pk.Connect.WillFlag { // [MQTT-3.1.2-7] + if pk.ProtocolVersion == 5 { + n, err := pk.Connect.WillProperties.Decode(WillProperties, bytes.NewBuffer(buf[offset:])) + if err != nil { + return ErrMalformedWillProperties + } + offset += n + } + + pk.Connect.WillTopic, offset, err = decodeString(buf, offset) + if err != nil { + return ErrMalformedWillTopic + } + + pk.Connect.WillPayload, offset, err = decodeBytes(buf, offset) + if err != nil { + return ErrMalformedWillPayload + } + } + + if pk.Connect.UsernameFlag { // [MQTT-3.1.3-12] + if offset >= len(buf) { // we are at the end of the packet + return ErrProtocolViolationFlagNoUsername // [MQTT-3.1.2-17] + } + + pk.Connect.Username, offset, err = decodeBytes(buf, offset) + if err != nil { + return ErrMalformedUsername + } + } + + if pk.Connect.PasswordFlag { + pk.Connect.Password, _, err = decodeBytes(buf, offset) + if err != nil { + return ErrMalformedPassword + } + } + + return nil +} + +// ConnectValidate ensures the connect packet is compliant. +func (pk *Packet) ConnectValidate() Code { + if !bytes.Equal(pk.Connect.ProtocolName, []byte{'M', 'Q', 'I', 's', 'd', 'p'}) && !bytes.Equal(pk.Connect.ProtocolName, []byte{'M', 'Q', 'T', 'T'}) { + return ErrProtocolViolationProtocolName // [MQTT-3.1.2-1] + } + + if (bytes.Equal(pk.Connect.ProtocolName, []byte{'M', 'Q', 'I', 's', 'd', 'p'}) && pk.ProtocolVersion != 3) || + (bytes.Equal(pk.Connect.ProtocolName, []byte{'M', 'Q', 'T', 'T'}) && pk.ProtocolVersion != 4 && pk.ProtocolVersion != 5) { + return ErrProtocolViolationProtocolVersion // [MQTT-3.1.2-2] + } + + if pk.ReservedBit != 0 { + return ErrProtocolViolationReservedBit // [MQTT-3.1.2-3] + } + + if len(pk.Connect.Password) > math.MaxUint16 { + return ErrProtocolViolationPasswordTooLong + } + + if len(pk.Connect.Username) > math.MaxUint16 { + return ErrProtocolViolationUsernameTooLong + } + + if !pk.Connect.UsernameFlag && len(pk.Connect.Username) > 0 { + return ErrProtocolViolationUsernameNoFlag // [MQTT-3.1.2-16] + } + + if pk.Connect.PasswordFlag && len(pk.Connect.Password) == 0 { + return ErrProtocolViolationFlagNoPassword // [MQTT-3.1.2-19] + } + + if !pk.Connect.PasswordFlag && len(pk.Connect.Password) > 0 { + return ErrProtocolViolationPasswordNoFlag // [MQTT-3.1.2-18] + } + + if len(pk.Connect.ClientIdentifier) > math.MaxUint16 { + return ErrClientIdentifierNotValid + } + + if pk.Connect.WillFlag { + if len(pk.Connect.WillPayload) == 0 || pk.Connect.WillTopic == "" { + return ErrProtocolViolationWillFlagNoPayload // [MQTT-3.1.2-9] + } + + if pk.Connect.WillQos > 2 { + return ErrProtocolViolationQosOutOfRange // [MQTT-3.1.2-12] + } + } + + if !pk.Connect.WillFlag && pk.Connect.WillRetain { + return ErrProtocolViolationWillFlagSurplusRetain // [MQTT-3.1.2-13] + } + + return CodeSuccess +} + +// ConnackEncode encodes a Connack packet. +func (pk *Packet) ConnackEncode(buf *bytes.Buffer) error { + nb := mempool.GetBuffer() + defer mempool.PutBuffer(nb) + nb.WriteByte(encodeBool(pk.SessionPresent)) + nb.WriteByte(pk.ReasonCode) + + if pk.ProtocolVersion == 5 { + pb := mempool.GetBuffer() + defer mempool.PutBuffer(pb) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()+2) // +SessionPresent +ReasonCode + nb.Write(pb.Bytes()) + } + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + buf.Write(nb.Bytes()) + + return nil +} + +// ConnackDecode decodes a Connack packet. +func (pk *Packet) ConnackDecode(buf []byte) error { + var offset int + var err error + + pk.SessionPresent, offset, err = decodeByteBool(buf, 0) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedSessionPresent) + } + + pk.ReasonCode, offset, err = decodeByte(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedReasonCode) + } + + if pk.ProtocolVersion == 5 { + _, err := pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + } + + return nil +} + +// DisconnectEncode encodes a Disconnect packet. +func (pk *Packet) DisconnectEncode(buf *bytes.Buffer) error { + nb := mempool.GetBuffer() + defer mempool.PutBuffer(nb) + + if pk.ProtocolVersion == 5 { + nb.WriteByte(pk.ReasonCode) + + pb := mempool.GetBuffer() + defer mempool.PutBuffer(pb) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()) + nb.Write(pb.Bytes()) + } + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + buf.Write(nb.Bytes()) + + return nil +} + +// DisconnectDecode decodes a Disconnect packet. +func (pk *Packet) DisconnectDecode(buf []byte) error { + if pk.ProtocolVersion == 5 && pk.FixedHeader.Remaining > 1 { + var err error + var offset int + pk.ReasonCode, offset, err = decodeByte(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedReasonCode) + } + + if pk.FixedHeader.Remaining > 2 { + _, err = pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + } + } + + return nil +} + +// PingreqEncode encodes a Pingreq packet. +func (pk *Packet) PingreqEncode(buf *bytes.Buffer) error { + pk.FixedHeader.Encode(buf) + return nil +} + +// PingreqDecode decodes a Pingreq packet. +func (pk *Packet) PingreqDecode(buf []byte) error { + return nil +} + +// PingrespEncode encodes a Pingresp packet. +func (pk *Packet) PingrespEncode(buf *bytes.Buffer) error { + pk.FixedHeader.Encode(buf) + return nil +} + +// PingrespDecode decodes a Pingres packet. +func (pk *Packet) PingrespDecode(buf []byte) error { + return nil +} + +// PublishEncode encodes a Publish packet. +func (pk *Packet) PublishEncode(buf *bytes.Buffer) error { + nb := mempool.GetBuffer() + defer mempool.PutBuffer(nb) + + nb.Write(encodeString(pk.TopicName)) // [MQTT-3.3.2-1] + + if pk.FixedHeader.Qos > 0 { + if pk.PacketID == 0 { + return ErrProtocolViolationNoPacketID // [MQTT-2.2.1-2] + } + nb.Write(encodeUint16(pk.PacketID)) + } + + if pk.ProtocolVersion == 5 { + pb := mempool.GetBuffer() + defer mempool.PutBuffer(pb) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()+len(pk.Payload)) + nb.Write(pb.Bytes()) + } + + pk.FixedHeader.Remaining = nb.Len() + len(pk.Payload) + pk.FixedHeader.Encode(buf) + buf.Write(nb.Bytes()) + buf.Write(pk.Payload) + + return nil +} + +// PublishDecode extracts the data values from the packet. +func (pk *Packet) PublishDecode(buf []byte) error { + var offset int + var err error + + pk.TopicName, offset, err = decodeString(buf, 0) // [MQTT-3.3.2-1] + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedTopic) + } + + if pk.FixedHeader.Qos > 0 { + pk.PacketID, offset, err = decodeUint16(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedPacketID) + } + } + + if pk.ProtocolVersion == 5 { + n, err := pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + + offset += n + } + + pk.Payload = buf[offset:] + + return nil +} + +// PublishValidate validates a publish packet. +func (pk *Packet) PublishValidate(topicAliasMaximum uint16) Code { + if pk.FixedHeader.Qos > 0 && pk.PacketID == 0 { + return ErrProtocolViolationNoPacketID // [MQTT-2.2.1-3] [MQTT-2.2.1-4] + } + + if pk.FixedHeader.Qos == 0 && pk.PacketID > 0 { + return ErrProtocolViolationSurplusPacketID // [MQTT-2.2.1-2] + } + + if strings.ContainsAny(pk.TopicName, "+#") { + return ErrProtocolViolationSurplusWildcard // [MQTT-3.3.2-2] + } + + if pk.Properties.TopicAlias > topicAliasMaximum { + return ErrTopicAliasInvalid // [MQTT-3.2.2-17] [MQTT-3.3.2-9] ~[MQTT-3.3.2-10] [MQTT-3.3.2-12] + } + + if pk.TopicName == "" && pk.Properties.TopicAlias == 0 { + return ErrProtocolViolationNoTopic // ~[MQTT-3.3.2-8] + } + + if pk.Properties.TopicAliasFlag && pk.Properties.TopicAlias == 0 { + return ErrTopicAliasInvalid // [MQTT-3.3.2-8] + } + + if len(pk.Properties.SubscriptionIdentifier) > 0 { + return ErrProtocolViolationSurplusSubID // [MQTT-3.3.4-6] + } + + return CodeSuccess +} + +// encodePubAckRelRecComp encodes a Puback, Pubrel, Pubrec, or Pubcomp packet. +func (pk *Packet) encodePubAckRelRecComp(buf *bytes.Buffer) error { + nb := mempool.GetBuffer() + defer mempool.PutBuffer(nb) + nb.Write(encodeUint16(pk.PacketID)) + + if pk.ProtocolVersion == 5 { + pb := mempool.GetBuffer() + defer mempool.PutBuffer(pb) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()) + if pk.ReasonCode >= ErrUnspecifiedError.Code || pb.Len() > 1 { + nb.WriteByte(pk.ReasonCode) + } + + if pb.Len() > 1 { + nb.Write(pb.Bytes()) + } + } + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + buf.Write(nb.Bytes()) + return nil +} + +// decode extracts the data values from a Puback, Pubrel, Pubrec, or Pubcomp packet. +func (pk *Packet) decodePubAckRelRecComp(buf []byte) error { + var offset int + var err error + pk.PacketID, offset, err = decodeUint16(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedPacketID) + } + + if pk.ProtocolVersion == 5 && pk.FixedHeader.Remaining > 2 { + pk.ReasonCode, offset, err = decodeByte(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedReasonCode) + } + + if pk.FixedHeader.Remaining > 3 { + _, err = pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + } + } + + return nil +} + +// PubackEncode encodes a Puback packet. +func (pk *Packet) PubackEncode(buf *bytes.Buffer) error { + return pk.encodePubAckRelRecComp(buf) +} + +// PubackDecode decodes a Puback packet. +func (pk *Packet) PubackDecode(buf []byte) error { + return pk.decodePubAckRelRecComp(buf) +} + +// PubcompEncode encodes a Pubcomp packet. +func (pk *Packet) PubcompEncode(buf *bytes.Buffer) error { + return pk.encodePubAckRelRecComp(buf) +} + +// PubcompDecode decodes a Pubcomp packet. +func (pk *Packet) PubcompDecode(buf []byte) error { + return pk.decodePubAckRelRecComp(buf) +} + +// PubrecEncode encodes a Pubrec packet. +func (pk *Packet) PubrecEncode(buf *bytes.Buffer) error { + return pk.encodePubAckRelRecComp(buf) +} + +// PubrecDecode decodes a Pubrec packet. +func (pk *Packet) PubrecDecode(buf []byte) error { + return pk.decodePubAckRelRecComp(buf) +} + +// PubrelEncode encodes a Pubrel packet. +func (pk *Packet) PubrelEncode(buf *bytes.Buffer) error { + return pk.encodePubAckRelRecComp(buf) +} + +// PubrelDecode decodes a Pubrel packet. +func (pk *Packet) PubrelDecode(buf []byte) error { + return pk.decodePubAckRelRecComp(buf) +} + +// ReasonCodeValid returns true if the provided reason code is valid for the packet type. +func (pk *Packet) ReasonCodeValid() bool { + switch pk.FixedHeader.Type { + case Pubrec: + return bytes.Contains([]byte{ + CodeSuccess.Code, + CodeNoMatchingSubscribers.Code, + ErrUnspecifiedError.Code, + ErrImplementationSpecificError.Code, + ErrNotAuthorized.Code, + ErrTopicNameInvalid.Code, + ErrPacketIdentifierInUse.Code, + ErrQuotaExceeded.Code, + ErrPayloadFormatInvalid.Code, + }, []byte{pk.ReasonCode}) + case Pubrel: + fallthrough + case Pubcomp: + return bytes.Contains([]byte{ + CodeSuccess.Code, + ErrPacketIdentifierNotFound.Code, + }, []byte{pk.ReasonCode}) + case Suback: + return bytes.Contains([]byte{ + CodeGrantedQos0.Code, + CodeGrantedQos1.Code, + CodeGrantedQos2.Code, + ErrUnspecifiedError.Code, + ErrImplementationSpecificError.Code, + ErrNotAuthorized.Code, + ErrTopicFilterInvalid.Code, + ErrPacketIdentifierInUse.Code, + ErrQuotaExceeded.Code, + ErrSharedSubscriptionsNotSupported.Code, + ErrSubscriptionIdentifiersNotSupported.Code, + ErrWildcardSubscriptionsNotSupported.Code, + }, []byte{pk.ReasonCode}) + case Unsuback: + return bytes.Contains([]byte{ + CodeSuccess.Code, + CodeNoSubscriptionExisted.Code, + ErrUnspecifiedError.Code, + ErrImplementationSpecificError.Code, + ErrNotAuthorized.Code, + ErrTopicFilterInvalid.Code, + ErrPacketIdentifierInUse.Code, + }, []byte{pk.ReasonCode}) + } + + return true +} + +// SubackEncode encodes a Suback packet. +func (pk *Packet) SubackEncode(buf *bytes.Buffer) error { + nb := mempool.GetBuffer() + defer mempool.PutBuffer(nb) + nb.Write(encodeUint16(pk.PacketID)) + + if pk.ProtocolVersion == 5 { + pb := mempool.GetBuffer() + defer mempool.PutBuffer(pb) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()+len(pk.ReasonCodes)) + nb.Write(pb.Bytes()) + } + + nb.Write(pk.ReasonCodes) + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + buf.Write(nb.Bytes()) + + return nil +} + +// SubackDecode decodes a Suback packet. +func (pk *Packet) SubackDecode(buf []byte) error { + var offset int + var err error + + pk.PacketID, offset, err = decodeUint16(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedPacketID) + } + + if pk.ProtocolVersion == 5 { + n, err := pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + offset += n + } + + pk.ReasonCodes = buf[offset:] + + return nil +} + +// SubscribeEncode encodes a Subscribe packet. +func (pk *Packet) SubscribeEncode(buf *bytes.Buffer) error { + if pk.PacketID == 0 { + return ErrProtocolViolationNoPacketID + } + + nb := mempool.GetBuffer() + defer mempool.PutBuffer(nb) + nb.Write(encodeUint16(pk.PacketID)) + + xb := mempool.GetBuffer() // capture and write filters after length checks + defer mempool.PutBuffer(xb) + for _, opts := range pk.Filters { + xb.Write(encodeString(opts.Filter)) // [MQTT-3.8.3-1] + if pk.ProtocolVersion == 5 { + xb.WriteByte(opts.encode()) + } else { + xb.WriteByte(opts.Qos) + } + } + + if pk.ProtocolVersion == 5 { + pb := mempool.GetBuffer() + defer mempool.PutBuffer(pb) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()+xb.Len()) + nb.Write(pb.Bytes()) + } + + nb.Write(xb.Bytes()) + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + buf.Write(nb.Bytes()) + + return nil +} + +// SubscribeDecode decodes a Subscribe packet. +func (pk *Packet) SubscribeDecode(buf []byte) error { + var offset int + var err error + + pk.PacketID, offset, err = decodeUint16(buf, offset) + if err != nil { + return ErrMalformedPacketID + } + + if pk.ProtocolVersion == 5 { + n, err := pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + offset += n + } + + var filter string + pk.Filters = Subscriptions{} + for offset < len(buf) { + filter, offset, err = decodeString(buf, offset) // [MQTT-3.8.3-1] + if err != nil { + return ErrMalformedTopic + } + + var option byte + sub := &Subscription{ + Filter: filter, + } + + if pk.ProtocolVersion == 5 { + sub.decode(buf[offset]) + offset += 1 + } else { + option, offset, err = decodeByte(buf, offset) + if err != nil { + return ErrMalformedQos + } + sub.Qos = option + } + + if len(pk.Properties.SubscriptionIdentifier) > 0 { + sub.Identifier = pk.Properties.SubscriptionIdentifier[0] + } + + if sub.Qos > 2 { + return ErrProtocolViolationQosOutOfRange + } + + pk.Filters = append(pk.Filters, *sub) + } + + return nil +} + +// SubscribeValidate ensures the packet is compliant. +func (pk *Packet) SubscribeValidate() Code { + if pk.FixedHeader.Qos > 0 && pk.PacketID == 0 { + return ErrProtocolViolationNoPacketID // [MQTT-2.2.1-3] [MQTT-2.2.1-4] + } + + if len(pk.Filters) == 0 { + return ErrProtocolViolationNoFilters // [MQTT-3.10.3-2] + } + + for _, v := range pk.Filters { + if v.Identifier > 268435455 { // 3.3.2.3.8 The Subscription Identifier can have the value of 1 to 268,435,455. + return ErrProtocolViolationOversizeSubID // + } + } + + return CodeSuccess +} + +// UnsubackEncode encodes an Unsuback packet. +func (pk *Packet) UnsubackEncode(buf *bytes.Buffer) error { + nb := mempool.GetBuffer() + defer mempool.PutBuffer(nb) + nb.Write(encodeUint16(pk.PacketID)) + + if pk.ProtocolVersion == 5 { + pb := mempool.GetBuffer() + defer mempool.PutBuffer(pb) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()) + nb.Write(pb.Bytes()) + nb.Write(pk.ReasonCodes) + } + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + buf.Write(nb.Bytes()) + + return nil +} + +// UnsubackDecode decodes an Unsuback packet. +func (pk *Packet) UnsubackDecode(buf []byte) error { + var offset int + var err error + + pk.PacketID, offset, err = decodeUint16(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedPacketID) + } + + if pk.ProtocolVersion == 5 { + n, err := pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + + offset += n + + pk.ReasonCodes = buf[offset:] + } + + return nil +} + +// UnsubscribeEncode encodes an Unsubscribe packet. +func (pk *Packet) UnsubscribeEncode(buf *bytes.Buffer) error { + if pk.PacketID == 0 { + return ErrProtocolViolationNoPacketID + } + + nb := mempool.GetBuffer() + defer mempool.PutBuffer(nb) + nb.Write(encodeUint16(pk.PacketID)) + + xb := mempool.GetBuffer() // capture filters and write after length checks + defer mempool.PutBuffer(xb) + for _, sub := range pk.Filters { + xb.Write(encodeString(sub.Filter)) // [MQTT-3.10.3-1] + } + + if pk.ProtocolVersion == 5 { + pb := mempool.GetBuffer() + defer mempool.PutBuffer(pb) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()+xb.Len()) + nb.Write(pb.Bytes()) + } + + nb.Write(xb.Bytes()) + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + buf.Write(nb.Bytes()) + + return nil +} + +// UnsubscribeDecode decodes an Unsubscribe packet. +func (pk *Packet) UnsubscribeDecode(buf []byte) error { + var offset int + var err error + + pk.PacketID, offset, err = decodeUint16(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedPacketID) + } + + if pk.ProtocolVersion == 5 { + n, err := pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + offset += n + } + + var filter string + pk.Filters = Subscriptions{} + for offset < len(buf) { + filter, offset, err = decodeString(buf, offset) // [MQTT-3.10.3-1] + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedTopic) + } + pk.Filters = append(pk.Filters, Subscription{Filter: filter}) + } + + return nil +} + +// UnsubscribeValidate validates an Unsubscribe packet. +func (pk *Packet) UnsubscribeValidate() Code { + if pk.FixedHeader.Qos > 0 && pk.PacketID == 0 { + return ErrProtocolViolationNoPacketID // [MQTT-2.2.1-3] [MQTT-2.2.1-4] + } + + if len(pk.Filters) == 0 { + return ErrProtocolViolationNoFilters // [MQTT-3.10.3-2] + } + + return CodeSuccess +} + +// AuthEncode encodes an Auth packet. +func (pk *Packet) AuthEncode(buf *bytes.Buffer) error { + nb := mempool.GetBuffer() + defer mempool.PutBuffer(nb) + nb.WriteByte(pk.ReasonCode) + + pb := mempool.GetBuffer() + defer mempool.PutBuffer(pb) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()) + nb.Write(pb.Bytes()) + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + buf.Write(nb.Bytes()) + return nil +} + +// AuthDecode decodes an Auth packet. +func (pk *Packet) AuthDecode(buf []byte) error { + var offset int + var err error + + pk.ReasonCode, offset, err = decodeByte(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedReasonCode) + } + + _, err = pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + + return nil +} + +// AuthValidate returns success if the auth packet is valid. +func (pk *Packet) AuthValidate() Code { + if pk.ReasonCode != CodeSuccess.Code && + pk.ReasonCode != CodeContinueAuthentication.Code && + pk.ReasonCode != CodeReAuthenticate.Code { + return ErrProtocolViolationInvalidReason // [MQTT-3.15.2-1] + } + + return CodeSuccess +} + +// FormatID returns the PacketID field as a decimal integer. +func (pk *Packet) FormatID() string { + return strconv.FormatUint(uint64(pk.PacketID), 10) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/packets/properties.go b/vendor/github.com/mochi-mqtt/server/v2/packets/properties.go new file mode 100644 index 000000000..baadd883d --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/packets/properties.go @@ -0,0 +1,481 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package packets + +import ( + "bytes" + "fmt" + "strings" + + "github.com/mochi-mqtt/server/v2/mempool" +) + +const ( + PropPayloadFormat byte = 1 + PropMessageExpiryInterval byte = 2 + PropContentType byte = 3 + PropResponseTopic byte = 8 + PropCorrelationData byte = 9 + PropSubscriptionIdentifier byte = 11 + PropSessionExpiryInterval byte = 17 + PropAssignedClientID byte = 18 + PropServerKeepAlive byte = 19 + PropAuthenticationMethod byte = 21 + PropAuthenticationData byte = 22 + PropRequestProblemInfo byte = 23 + PropWillDelayInterval byte = 24 + PropRequestResponseInfo byte = 25 + PropResponseInfo byte = 26 + PropServerReference byte = 28 + PropReasonString byte = 31 + PropReceiveMaximum byte = 33 + PropTopicAliasMaximum byte = 34 + PropTopicAlias byte = 35 + PropMaximumQos byte = 36 + PropRetainAvailable byte = 37 + PropUser byte = 38 + PropMaximumPacketSize byte = 39 + PropWildcardSubAvailable byte = 40 + PropSubIDAvailable byte = 41 + PropSharedSubAvailable byte = 42 +) + +// validPacketProperties indicates which properties are valid for which packet types. +var validPacketProperties = map[byte]map[byte]byte{ + PropPayloadFormat: {Publish: 1, WillProperties: 1}, + PropMessageExpiryInterval: {Publish: 1, WillProperties: 1}, + PropContentType: {Publish: 1, WillProperties: 1}, + PropResponseTopic: {Publish: 1, WillProperties: 1}, + PropCorrelationData: {Publish: 1, WillProperties: 1}, + PropSubscriptionIdentifier: {Publish: 1, Subscribe: 1}, + PropSessionExpiryInterval: {Connect: 1, Connack: 1, Disconnect: 1}, + PropAssignedClientID: {Connack: 1}, + PropServerKeepAlive: {Connack: 1}, + PropAuthenticationMethod: {Connect: 1, Connack: 1, Auth: 1}, + PropAuthenticationData: {Connect: 1, Connack: 1, Auth: 1}, + PropRequestProblemInfo: {Connect: 1}, + PropWillDelayInterval: {WillProperties: 1}, + PropRequestResponseInfo: {Connect: 1}, + PropResponseInfo: {Connack: 1}, + PropServerReference: {Connack: 1, Disconnect: 1}, + PropReasonString: {Connack: 1, Puback: 1, Pubrec: 1, Pubrel: 1, Pubcomp: 1, Suback: 1, Unsuback: 1, Disconnect: 1, Auth: 1}, + PropReceiveMaximum: {Connect: 1, Connack: 1}, + PropTopicAliasMaximum: {Connect: 1, Connack: 1}, + PropTopicAlias: {Publish: 1}, + PropMaximumQos: {Connack: 1}, + PropRetainAvailable: {Connack: 1}, + PropUser: {Connect: 1, Connack: 1, Publish: 1, Puback: 1, Pubrec: 1, Pubrel: 1, Pubcomp: 1, Subscribe: 1, Suback: 1, Unsubscribe: 1, Unsuback: 1, Disconnect: 1, Auth: 1, WillProperties: 1}, + PropMaximumPacketSize: {Connect: 1, Connack: 1}, + PropWildcardSubAvailable: {Connack: 1}, + PropSubIDAvailable: {Connack: 1}, + PropSharedSubAvailable: {Connack: 1}, +} + +// UserProperty is an arbitrary key-value pair for a packet user properties array. +type UserProperty struct { // [MQTT-1.5.7-1] + Key string `json:"k"` + Val string `json:"v"` +} + +// Properties contains all mqtt v5 properties available for a packet. +// Some properties have valid values of 0 or not-present. In this case, we opt for +// property flags to indicate the usage of property. +// Refer to mqtt v5 2.2.2.2 Property spec for more information. +type Properties struct { + CorrelationData []byte `json:"cd"` + SubscriptionIdentifier []int `json:"si"` + AuthenticationData []byte `json:"ad"` + User []UserProperty `json:"user"` + ContentType string `json:"ct"` + ResponseTopic string `json:"rt"` + AssignedClientID string `json:"aci"` + AuthenticationMethod string `json:"am"` + ResponseInfo string `json:"ri"` + ServerReference string `json:"sr"` + ReasonString string `json:"rs"` + MessageExpiryInterval uint32 `json:"me"` + SessionExpiryInterval uint32 `json:"sei"` + WillDelayInterval uint32 `json:"wdi"` + MaximumPacketSize uint32 `json:"mps"` + ServerKeepAlive uint16 `json:"ska"` + ReceiveMaximum uint16 `json:"rm"` + TopicAliasMaximum uint16 `json:"tam"` + TopicAlias uint16 `json:"ta"` + PayloadFormat byte `json:"pf"` + PayloadFormatFlag bool `json:"fpf"` + SessionExpiryIntervalFlag bool `json:"fsei"` + ServerKeepAliveFlag bool `json:"fska"` + RequestProblemInfo byte `json:"rpi"` + RequestProblemInfoFlag bool `json:"frpi"` + RequestResponseInfo byte `json:"rri"` + TopicAliasFlag bool `json:"fta"` + MaximumQos byte `json:"mqos"` + MaximumQosFlag bool `json:"fmqos"` + RetainAvailable byte `json:"ra"` + RetainAvailableFlag bool `json:"fra"` + WildcardSubAvailable byte `json:"wsa"` + WildcardSubAvailableFlag bool `json:"fwsa"` + SubIDAvailable byte `json:"sida"` + SubIDAvailableFlag bool `json:"fsida"` + SharedSubAvailable byte `json:"ssa"` + SharedSubAvailableFlag bool `json:"fssa"` +} + +// Copy creates a new Properties struct with copies of the values. +func (p *Properties) Copy(allowTransfer bool) Properties { + pr := Properties{ + PayloadFormat: p.PayloadFormat, // [MQTT-3.3.2-4] + PayloadFormatFlag: p.PayloadFormatFlag, + MessageExpiryInterval: p.MessageExpiryInterval, + ContentType: p.ContentType, // [MQTT-3.3.2-20] + ResponseTopic: p.ResponseTopic, // [MQTT-3.3.2-15] + SessionExpiryInterval: p.SessionExpiryInterval, + SessionExpiryIntervalFlag: p.SessionExpiryIntervalFlag, + AssignedClientID: p.AssignedClientID, + ServerKeepAlive: p.ServerKeepAlive, + ServerKeepAliveFlag: p.ServerKeepAliveFlag, + AuthenticationMethod: p.AuthenticationMethod, + RequestProblemInfo: p.RequestProblemInfo, + RequestProblemInfoFlag: p.RequestProblemInfoFlag, + WillDelayInterval: p.WillDelayInterval, + RequestResponseInfo: p.RequestResponseInfo, + ResponseInfo: p.ResponseInfo, + ServerReference: p.ServerReference, + ReasonString: p.ReasonString, + ReceiveMaximum: p.ReceiveMaximum, + TopicAliasMaximum: p.TopicAliasMaximum, + TopicAlias: 0, // NB; do not copy topic alias [MQTT-3.3.2-7] + we do not send to clients (currently) [MQTT-3.1.2-26] [MQTT-3.1.2-27] + MaximumQos: p.MaximumQos, + MaximumQosFlag: p.MaximumQosFlag, + RetainAvailable: p.RetainAvailable, + RetainAvailableFlag: p.RetainAvailableFlag, + MaximumPacketSize: p.MaximumPacketSize, + WildcardSubAvailable: p.WildcardSubAvailable, + WildcardSubAvailableFlag: p.WildcardSubAvailableFlag, + SubIDAvailable: p.SubIDAvailable, + SubIDAvailableFlag: p.SubIDAvailableFlag, + SharedSubAvailable: p.SharedSubAvailable, + SharedSubAvailableFlag: p.SharedSubAvailableFlag, + } + + if allowTransfer { + pr.TopicAlias = p.TopicAlias + pr.TopicAliasFlag = p.TopicAliasFlag + } + + if len(p.CorrelationData) > 0 { + pr.CorrelationData = append([]byte{}, p.CorrelationData...) // [MQTT-3.3.2-16] + } + + if len(p.SubscriptionIdentifier) > 0 { + pr.SubscriptionIdentifier = append([]int{}, p.SubscriptionIdentifier...) + } + + if len(p.AuthenticationData) > 0 { + pr.AuthenticationData = append([]byte{}, p.AuthenticationData...) + } + + if len(p.User) > 0 { + pr.User = []UserProperty{} + for _, v := range p.User { + pr.User = append(pr.User, UserProperty{ // [MQTT-3.3.2-17] + Key: v.Key, + Val: v.Val, + }) + } + } + + return pr +} + +// canEncode returns true if the property type is valid for the packet type. +func (p *Properties) canEncode(pkt byte, k byte) bool { + return validPacketProperties[k][pkt] == 1 +} + +// Encode encodes properties into a bytes buffer. +func (p *Properties) Encode(pkt byte, mods Mods, b *bytes.Buffer, n int) { + if p == nil { + return + } + + buf := mempool.GetBuffer() + defer mempool.PutBuffer(buf) + if p.canEncode(pkt, PropPayloadFormat) && p.PayloadFormatFlag { + buf.WriteByte(PropPayloadFormat) + buf.WriteByte(p.PayloadFormat) + } + + if p.canEncode(pkt, PropMessageExpiryInterval) && p.MessageExpiryInterval > 0 { + buf.WriteByte(PropMessageExpiryInterval) + buf.Write(encodeUint32(p.MessageExpiryInterval)) + } + + if p.canEncode(pkt, PropContentType) && p.ContentType != "" { + buf.WriteByte(PropContentType) + buf.Write(encodeString(p.ContentType)) // [MQTT-3.3.2-19] + } + + if mods.AllowResponseInfo && p.canEncode(pkt, PropResponseTopic) && // [MQTT-3.3.2-14] + p.ResponseTopic != "" && !strings.ContainsAny(p.ResponseTopic, "+#") { // [MQTT-3.1.2-28] + buf.WriteByte(PropResponseTopic) + buf.Write(encodeString(p.ResponseTopic)) // [MQTT-3.3.2-13] + } + + if mods.AllowResponseInfo && p.canEncode(pkt, PropCorrelationData) && len(p.CorrelationData) > 0 { // [MQTT-3.1.2-28] + buf.WriteByte(PropCorrelationData) + buf.Write(encodeBytes(p.CorrelationData)) + } + + if p.canEncode(pkt, PropSubscriptionIdentifier) && len(p.SubscriptionIdentifier) > 0 { + for _, v := range p.SubscriptionIdentifier { + if v > 0 { + buf.WriteByte(PropSubscriptionIdentifier) + encodeLength(buf, int64(v)) + } + } + } + + if p.canEncode(pkt, PropSessionExpiryInterval) && p.SessionExpiryIntervalFlag { // [MQTT-3.14.2-2] + buf.WriteByte(PropSessionExpiryInterval) + buf.Write(encodeUint32(p.SessionExpiryInterval)) + } + + if p.canEncode(pkt, PropAssignedClientID) && p.AssignedClientID != "" { + buf.WriteByte(PropAssignedClientID) + buf.Write(encodeString(p.AssignedClientID)) + } + + if p.canEncode(pkt, PropServerKeepAlive) && p.ServerKeepAliveFlag { + buf.WriteByte(PropServerKeepAlive) + buf.Write(encodeUint16(p.ServerKeepAlive)) + } + + if p.canEncode(pkt, PropAuthenticationMethod) && p.AuthenticationMethod != "" { + buf.WriteByte(PropAuthenticationMethod) + buf.Write(encodeString(p.AuthenticationMethod)) + } + + if p.canEncode(pkt, PropAuthenticationData) && len(p.AuthenticationData) > 0 { + buf.WriteByte(PropAuthenticationData) + buf.Write(encodeBytes(p.AuthenticationData)) + } + + if p.canEncode(pkt, PropRequestProblemInfo) && p.RequestProblemInfoFlag { + buf.WriteByte(PropRequestProblemInfo) + buf.WriteByte(p.RequestProblemInfo) + } + + if p.canEncode(pkt, PropWillDelayInterval) && p.WillDelayInterval > 0 { + buf.WriteByte(PropWillDelayInterval) + buf.Write(encodeUint32(p.WillDelayInterval)) + } + + if p.canEncode(pkt, PropRequestResponseInfo) && p.RequestResponseInfo > 0 { + buf.WriteByte(PropRequestResponseInfo) + buf.WriteByte(p.RequestResponseInfo) + } + + if mods.AllowResponseInfo && p.canEncode(pkt, PropResponseInfo) && len(p.ResponseInfo) > 0 { // [MQTT-3.1.2-28] + buf.WriteByte(PropResponseInfo) + buf.Write(encodeString(p.ResponseInfo)) + } + + if p.canEncode(pkt, PropServerReference) && len(p.ServerReference) > 0 { + buf.WriteByte(PropServerReference) + buf.Write(encodeString(p.ServerReference)) + } + + // [MQTT-3.2.2-19] [MQTT-3.14.2-3] [MQTT-3.4.2-2] [MQTT-3.5.2-2] + // [MQTT-3.6.2-2] [MQTT-3.9.2-1] [MQTT-3.11.2-1] [MQTT-3.15.2-2] + if !mods.DisallowProblemInfo && p.canEncode(pkt, PropReasonString) && p.ReasonString != "" { + b := encodeString(p.ReasonString) + if mods.MaxSize == 0 || uint32(n+len(b)+1) < mods.MaxSize { + buf.WriteByte(PropReasonString) + buf.Write(b) + } + } + + if p.canEncode(pkt, PropReceiveMaximum) && p.ReceiveMaximum > 0 { + buf.WriteByte(PropReceiveMaximum) + buf.Write(encodeUint16(p.ReceiveMaximum)) + } + + if p.canEncode(pkt, PropTopicAliasMaximum) && p.TopicAliasMaximum > 0 { + buf.WriteByte(PropTopicAliasMaximum) + buf.Write(encodeUint16(p.TopicAliasMaximum)) + } + + if p.canEncode(pkt, PropTopicAlias) && p.TopicAliasFlag && p.TopicAlias > 0 { // [MQTT-3.3.2-8] + buf.WriteByte(PropTopicAlias) + buf.Write(encodeUint16(p.TopicAlias)) + } + + if p.canEncode(pkt, PropMaximumQos) && p.MaximumQosFlag && p.MaximumQos < 2 { + buf.WriteByte(PropMaximumQos) + buf.WriteByte(p.MaximumQos) + } + + if p.canEncode(pkt, PropRetainAvailable) && p.RetainAvailableFlag { + buf.WriteByte(PropRetainAvailable) + buf.WriteByte(p.RetainAvailable) + } + + if !mods.DisallowProblemInfo && p.canEncode(pkt, PropUser) { + pb := mempool.GetBuffer() + defer mempool.PutBuffer(pb) + for _, v := range p.User { + pb.WriteByte(PropUser) + pb.Write(encodeString(v.Key)) + pb.Write(encodeString(v.Val)) + } + // [MQTT-3.2.2-20] [MQTT-3.14.2-4] [MQTT-3.4.2-3] [MQTT-3.5.2-3] + // [MQTT-3.6.2-3] [MQTT-3.9.2-2] [MQTT-3.11.2-2] [MQTT-3.15.2-3] + if mods.MaxSize == 0 || uint32(n+pb.Len()+1) < mods.MaxSize { + buf.Write(pb.Bytes()) + } + } + + if p.canEncode(pkt, PropMaximumPacketSize) && p.MaximumPacketSize > 0 { + buf.WriteByte(PropMaximumPacketSize) + buf.Write(encodeUint32(p.MaximumPacketSize)) + } + + if p.canEncode(pkt, PropWildcardSubAvailable) && p.WildcardSubAvailableFlag { + buf.WriteByte(PropWildcardSubAvailable) + buf.WriteByte(p.WildcardSubAvailable) + } + + if p.canEncode(pkt, PropSubIDAvailable) && p.SubIDAvailableFlag { + buf.WriteByte(PropSubIDAvailable) + buf.WriteByte(p.SubIDAvailable) + } + + if p.canEncode(pkt, PropSharedSubAvailable) && p.SharedSubAvailableFlag { + buf.WriteByte(PropSharedSubAvailable) + buf.WriteByte(p.SharedSubAvailable) + } + + encodeLength(b, int64(buf.Len())) + b.Write(buf.Bytes()) // [MQTT-3.1.3-10] +} + +// Decode decodes property bytes into a properties struct. +func (p *Properties) Decode(pkt byte, b *bytes.Buffer) (n int, err error) { + if p == nil { + return 0, nil + } + + var bu int + n, bu, err = DecodeLength(b) + if err != nil { + return n + bu, err + } + + if n == 0 { + return n + bu, nil + } + + bt := b.Bytes() + var k byte + for offset := 0; offset < n; { + k, offset, err = decodeByte(bt, offset) + if err != nil { + return n + bu, err + } + + if _, ok := validPacketProperties[k][pkt]; !ok { + return n + bu, fmt.Errorf("property type %v not valid for packet type %v: %w", k, pkt, ErrProtocolViolationUnsupportedProperty) + } + + switch k { + case PropPayloadFormat: + p.PayloadFormat, offset, err = decodeByte(bt, offset) + p.PayloadFormatFlag = true + case PropMessageExpiryInterval: + p.MessageExpiryInterval, offset, err = decodeUint32(bt, offset) + case PropContentType: + p.ContentType, offset, err = decodeString(bt, offset) + case PropResponseTopic: + p.ResponseTopic, offset, err = decodeString(bt, offset) + case PropCorrelationData: + p.CorrelationData, offset, err = decodeBytes(bt, offset) + case PropSubscriptionIdentifier: + if p.SubscriptionIdentifier == nil { + p.SubscriptionIdentifier = []int{} + } + + n, bu, err := DecodeLength(bytes.NewBuffer(bt[offset:])) + if err != nil { + return n + bu, err + } + p.SubscriptionIdentifier = append(p.SubscriptionIdentifier, n) + offset += bu + case PropSessionExpiryInterval: + p.SessionExpiryInterval, offset, err = decodeUint32(bt, offset) + p.SessionExpiryIntervalFlag = true + case PropAssignedClientID: + p.AssignedClientID, offset, err = decodeString(bt, offset) + case PropServerKeepAlive: + p.ServerKeepAlive, offset, err = decodeUint16(bt, offset) + p.ServerKeepAliveFlag = true + case PropAuthenticationMethod: + p.AuthenticationMethod, offset, err = decodeString(bt, offset) + case PropAuthenticationData: + p.AuthenticationData, offset, err = decodeBytes(bt, offset) + case PropRequestProblemInfo: + p.RequestProblemInfo, offset, err = decodeByte(bt, offset) + p.RequestProblemInfoFlag = true + case PropWillDelayInterval: + p.WillDelayInterval, offset, err = decodeUint32(bt, offset) + case PropRequestResponseInfo: + p.RequestResponseInfo, offset, err = decodeByte(bt, offset) + case PropResponseInfo: + p.ResponseInfo, offset, err = decodeString(bt, offset) + case PropServerReference: + p.ServerReference, offset, err = decodeString(bt, offset) + case PropReasonString: + p.ReasonString, offset, err = decodeString(bt, offset) + case PropReceiveMaximum: + p.ReceiveMaximum, offset, err = decodeUint16(bt, offset) + case PropTopicAliasMaximum: + p.TopicAliasMaximum, offset, err = decodeUint16(bt, offset) + case PropTopicAlias: + p.TopicAlias, offset, err = decodeUint16(bt, offset) + p.TopicAliasFlag = true + case PropMaximumQos: + p.MaximumQos, offset, err = decodeByte(bt, offset) + p.MaximumQosFlag = true + case PropRetainAvailable: + p.RetainAvailable, offset, err = decodeByte(bt, offset) + p.RetainAvailableFlag = true + case PropUser: + var k, v string + k, offset, err = decodeString(bt, offset) + if err != nil { + return n + bu, err + } + v, offset, err = decodeString(bt, offset) + p.User = append(p.User, UserProperty{Key: k, Val: v}) + case PropMaximumPacketSize: + p.MaximumPacketSize, offset, err = decodeUint32(bt, offset) + case PropWildcardSubAvailable: + p.WildcardSubAvailable, offset, err = decodeByte(bt, offset) + p.WildcardSubAvailableFlag = true + case PropSubIDAvailable: + p.SubIDAvailable, offset, err = decodeByte(bt, offset) + p.SubIDAvailableFlag = true + case PropSharedSubAvailable: + p.SharedSubAvailable, offset, err = decodeByte(bt, offset) + p.SharedSubAvailableFlag = true + } + + if err != nil { + return n + bu, err + } + } + + return n + bu, nil +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/packets/tpackets.go b/vendor/github.com/mochi-mqtt/server/v2/packets/tpackets.go new file mode 100644 index 000000000..267721eec --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/packets/tpackets.go @@ -0,0 +1,4005 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package packets + +// TPacketCase contains data for cross-checking the encoding and decoding +// of packets and expected scenarios. +type TPacketCase struct { + RawBytes []byte // the bytes that make the packet + ActualBytes []byte // the actual byte array that is created in the event of a byte mutation + Group string // a group that should run the test, blank for all + Desc string // a description of the test + FailFirst error // expected fail result to be run immediately after the method is called + Packet *Packet // the packet that is Expected + ActualPacket *Packet // the actual packet after mutations + Expect error // generic Expected fail result to be checked + Isolate bool // isolate can be used to isolate a test + Primary bool // primary is a test that should be run using readPackets + Case byte // the identifying byte of the case +} + +// TPacketCases is a slice of TPacketCase. +type TPacketCases []TPacketCase + +// Get returns a case matching a given T byte. +func (f TPacketCases) Get(b byte) TPacketCase { + for _, v := range f { + if v.Case == b { + return v + } + } + + return TPacketCase{} +} + +const ( + TConnectMqtt31 byte = iota + TConnectMqtt311 + TConnectMqtt5 + TConnectMqtt5LWT + TConnectClean + TConnectUserPass + TConnectUserPassLWT + TConnectMalProtocolName + TConnectMalProtocolVersion + TConnectMalFlags + TConnectMalKeepalive + TConnectMalClientID + TConnectMalWillTopic + TConnectMalWillFlag + TConnectMalUsername + TConnectMalPassword + TConnectMalFixedHeader + TConnectMalReservedBit + TConnectMalProperties + TConnectMalWillProperties + TConnectInvalidProtocolName + TConnectInvalidProtocolVersion + TConnectInvalidProtocolVersion2 + TConnectInvalidReservedBit + TConnectInvalidClientIDTooLong + TConnectInvalidFlagNoUsername + TConnectInvalidFlagNoPassword + TConnectInvalidUsernameNoFlag + TConnectInvalidPasswordNoFlag + TConnectInvalidUsernameTooLong + TConnectInvalidPasswordTooLong + TConnectInvalidWillFlagNoPayload + TConnectInvalidWillFlagQosOutOfRange + TConnectInvalidWillSurplusRetain + TConnectZeroByteUsername + TConnectSpecInvalidUTF8D800 + TConnectSpecInvalidUTF8DFFF + TConnectSpecInvalidUTF80000 + TConnectSpecInvalidUTF8NoSkip + TConnackAcceptedNoSession + TConnackAcceptedSessionExists + TConnackAcceptedMqtt5 + TConnackAcceptedAdjustedExpiryInterval + TConnackMinMqtt5 + TConnackMinCleanMqtt5 + TConnackServerKeepalive + TConnackInvalidMinMqtt5 + TConnackBadProtocolVersion + TConnackProtocolViolationNoSession + TConnackBadClientID + TConnackServerUnavailable + TConnackBadUsernamePassword + TConnackBadUsernamePasswordNoSession + TConnackMqtt5BadUsernamePasswordNoSession + TConnackNotAuthorised + TConnackMalSessionPresent + TConnackMalReturnCode + TConnackMalProperties + TConnackDropProperties + TConnackDropPropertiesPartial + TPublishNoPayload + TPublishBasic + TPublishBasicTopicAliasOnly + TPublishBasicMqtt5 + TPublishMqtt5 + TPublishQos1 + TPublishQos1Mqtt5 + TPublishQos1NoPayload + TPublishQos1Dup + TPublishQos2 + TPublishQos2Mqtt5 + TPublishQos2Upgraded + TPublishSubscriberIdentifier + TPublishRetain + TPublishRetainMqtt5 + TPublishDup + TPublishMalTopicName + TPublishMalPacketID + TPublishMalProperties + TPublishCopyBasic + TPublishSpecQos0NoPacketID + TPublishSpecQosMustPacketID + TPublishDropOversize + TPublishInvalidQos0NoPacketID + TPublishInvalidQosMustPacketID + TPublishInvalidSurplusSubID + TPublishInvalidSurplusWildcard + TPublishInvalidSurplusWildcard2 + TPublishInvalidNoTopic + TPublishInvalidTopicAlias + TPublishInvalidExcessTopicAlias + TPublishSpecDenySysTopic + TPuback + TPubackMqtt5 + TPubackMqtt5NotAuthorized + TPubackMalPacketID + TPubackMalProperties + TPubackUnexpectedError + TPubrec + TPubrecMqtt5 + TPubrecMqtt5IDInUse + TPubrecMqtt5NotAuthorized + TPubrecMalPacketID + TPubrecMalProperties + TPubrecMalReasonCode + TPubrecInvalidReason + TPubrel + TPubrelMqtt5 + TPubrelMqtt5AckNoPacket + TPubrelMalPacketID + TPubrelMalProperties + TPubrelInvalidReason + TPubcomp + TPubcompMqtt5 + TPubcompMqtt5AckNoPacket + TPubcompMalPacketID + TPubcompMalProperties + TPubcompInvalidReason + TSubscribe + TSubscribeMany + TSubscribeMqtt5 + TSubscribeRetainHandling1 + TSubscribeRetainHandling2 + TSubscribeRetainAsPublished + TSubscribeMalPacketID + TSubscribeMalTopic + TSubscribeMalQos + TSubscribeMalQosRange + TSubscribeMalProperties + TSubscribeInvalidQosMustPacketID + TSubscribeSpecQosMustPacketID + TSubscribeInvalidNoFilters + TSubscribeInvalidSharedNoLocal + TSubscribeInvalidFilter + TSubscribeInvalidIdentifierOversize + TSuback + TSubackMany + TSubackDeny + TSubackUnspecifiedError + TSubackUnspecifiedErrorMqtt5 + TSubackMqtt5 + TSubackPacketIDInUse + TSubackInvalidFilter + TSubackInvalidSharedNoLocal + TSubackMalPacketID + TSubackMalProperties + TUnsubscribe + TUnsubscribeMany + TUnsubscribeMqtt5 + TUnsubscribeMalPacketID + TUnsubscribeMalTopicName + TUnsubscribeMalProperties + TUnsubscribeInvalidQosMustPacketID + TUnsubscribeSpecQosMustPacketID + TUnsubscribeInvalidNoFilters + TUnsuback + TUnsubackMany + TUnsubackMqtt5 + TUnsubackPacketIDInUse + TUnsubackMalPacketID + TUnsubackMalProperties + TPingreq + TPingresp + TDisconnect + TDisconnectTakeover + TDisconnectMqtt5 + TDisconnectSecondConnect + TDisconnectReceiveMaximum + TDisconnectDropProperties + TDisconnectShuttingDown + TDisconnectMalProperties + TDisconnectMalReasonCode + TDisconnectZeroNonZeroExpiry + TAuth + TAuthMalReasonCode + TAuthMalProperties + TAuthInvalidReason + TAuthInvalidReason2 +) + +// TPacketData contains individual encoding and decoding scenarios for each packet type. +var TPacketData = map[byte]TPacketCases{ + Connect: { + { + Case: TConnectMqtt31, + Desc: "mqtt v3.1", + Primary: true, + RawBytes: []byte{ + Connect << 4, 17, // Fixed header + 0, 6, // Protocol Name - MSB+LSB + 'M', 'Q', 'I', 's', 'd', 'p', // Protocol Name + 3, // Protocol Version + 0, // Packet Flags + 0, 30, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 17, + }, + ProtocolVersion: 3, + Connect: ConnectParams{ + ProtocolName: []byte("MQIsdp"), + Clean: false, + Keepalive: 30, + ClientIdentifier: "zen", + }, + }, + }, + { + Case: TConnectMqtt311, + Desc: "mqtt v3.1.1", + Primary: true, + RawBytes: []byte{ + Connect << 4, 15, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0, // Packet Flags + 0, 60, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 15, + }, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Clean: false, + Keepalive: 60, + ClientIdentifier: "zen", + }, + }, + }, + { + Case: TConnectMqtt5, + Desc: "mqtt v5", + Primary: true, + RawBytes: []byte{ + Connect << 4, 87, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 5, // Protocol Version + 0, // Packet Flags + 0, 30, // Keepalive + + // Properties + 71, // length + 17, 0, 0, 0, 120, // Session Expiry Interval (17) + 21, 0, 5, 'S', 'H', 'A', '-', '1', // Authentication Method (21) + 22, 0, 9, 'a', 'u', 't', 'h', '-', 'd', 'a', 't', 'a', // Authentication Data (22) + 23, 1, // Request Problem Info (23) + 25, 1, // Request Response Info (25) + 33, 1, 244, // Receive Maximum (33) + 34, 3, 231, // Topic Alias Maximum (34) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + 38, // User Properties (38) + 0, 4, 'k', 'e', 'y', '2', + 0, 6, 'v', 'a', 'l', 'u', 'e', '2', + 39, 0, 0, 125, 0, // Maximum Packet Size (39) + + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 87, + }, + ProtocolVersion: 5, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Clean: false, + Keepalive: 30, + ClientIdentifier: "zen", + }, + Properties: Properties{ + SessionExpiryInterval: uint32(120), + SessionExpiryIntervalFlag: true, + AuthenticationMethod: "SHA-1", + AuthenticationData: []byte("auth-data"), + RequestProblemInfo: byte(1), + RequestProblemInfoFlag: true, + RequestResponseInfo: byte(1), + ReceiveMaximum: uint16(500), + TopicAliasMaximum: uint16(999), + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + { + Key: "key2", + Val: "value2", + }, + }, + MaximumPacketSize: uint32(32000), + }, + }, + }, + { + Case: TConnectClean, + Desc: "mqtt 3.1.1, clean session", + RawBytes: []byte{ + Connect << 4, 15, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 2, // Packet Flags + 0, 45, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 15, + }, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Clean: true, + Keepalive: 45, + ClientIdentifier: "zen", + }, + }, + }, + { + Case: TConnectMqtt5LWT, + Desc: "mqtt 5 clean session, lwt", + RawBytes: []byte{ + Connect << 4, 47, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 5, // Protocol Version + 14, // Packet Flags + 0, 30, // Keepalive + + // Properties + 10, // length + 17, 0, 0, 0, 120, // Session Expiry Interval (17) + 39, 0, 0, 125, 0, // Maximum Packet Size (39) + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 5, // will properties length + 24, 0, 0, 2, 88, // will delay interval (24) + + 0, 3, // Will Topic - MSB+LSB + 'l', 'w', 't', + 0, 8, // Will Message MSB+LSB + 'n', 'o', 't', 'a', 'g', 'a', 'i', 'n', + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 42, + }, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Clean: true, + Keepalive: 30, + ClientIdentifier: "zen", + WillFlag: true, + WillTopic: "lwt", + WillPayload: []byte("notagain"), + WillQos: 1, + WillProperties: Properties{ + WillDelayInterval: uint32(600), + }, + }, + Properties: Properties{ + SessionExpiryInterval: uint32(120), + SessionExpiryIntervalFlag: true, + MaximumPacketSize: uint32(32000), + }, + }, + }, + { + Case: TConnectUserPass, + Desc: "mqtt 3.1.1, username, password", + RawBytes: []byte{ + Connect << 4, 28, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0 | 1<<6 | 1<<7, // Packet Flags + 0, 20, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 0, 5, // Username MSB+LSB + 'm', 'o', 'c', 'h', 'i', + 0, 4, // Password MSB+LSB + ',', '.', '/', ';', + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 28, + }, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Clean: false, + Keepalive: 20, + ClientIdentifier: "zen", + UsernameFlag: true, + PasswordFlag: true, + Username: []byte("mochi"), + Password: []byte(",./;"), + }, + }, + }, + { + Case: TConnectUserPassLWT, + Desc: "mqtt 3.1.1, username, password, lwt", + Primary: true, + RawBytes: []byte{ + Connect << 4, 44, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 206, // Packet Flags + 0, 120, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 0, 3, // Will Topic - MSB+LSB + 'l', 'w', 't', + 0, 9, // Will Message MSB+LSB + 'n', 'o', 't', ' ', 'a', 'g', 'a', 'i', 'n', + 0, 5, // Username MSB+LSB + 'm', 'o', 'c', 'h', 'i', + 0, 4, // Password MSB+LSB + ',', '.', '/', ';', + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 44, + }, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Clean: true, + Keepalive: 120, + ClientIdentifier: "zen", + UsernameFlag: true, + PasswordFlag: true, + Username: []byte("mochi"), + Password: []byte(",./;"), + WillFlag: true, + WillTopic: "lwt", + WillPayload: []byte("not again"), + WillQos: 1, + }, + }, + }, + { + Case: TConnectZeroByteUsername, + Desc: "username flag but 0 byte username", + Group: "decode", + RawBytes: []byte{ + Connect << 4, 23, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 5, // Protocol Version + 130, // Packet Flags + 0, 30, // Keepalive + 5, // length + 17, 0, 0, 0, 120, // Session Expiry Interval (17) + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 0, 0, // Username MSB+LSB + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 23, + }, + ProtocolVersion: 5, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Clean: true, + Keepalive: 30, + ClientIdentifier: "zen", + Username: []byte{}, + UsernameFlag: true, + }, + Properties: Properties{ + SessionExpiryInterval: uint32(120), + SessionExpiryIntervalFlag: true, + }, + }, + }, + + // Fail States + { + Case: TConnectMalProtocolName, + Desc: "malformed protocol name", + Group: "decode", + FailFirst: ErrMalformedProtocolName, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 7, // Protocol Name - MSB+LSB + 'M', 'Q', 'I', 's', 'd', // Protocol Name + }, + }, + { + Case: TConnectMalProtocolVersion, + Desc: "malformed protocol version", + Group: "decode", + FailFirst: ErrMalformedProtocolVersion, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + }, + }, + { + Case: TConnectMalFlags, + Desc: "malformed flags", + Group: "decode", + FailFirst: ErrMalformedFlags, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + + }, + }, + { + Case: TConnectMalKeepalive, + Desc: "malformed keepalive", + Group: "decode", + FailFirst: ErrMalformedKeepalive, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0, // Flags + }, + }, + { + Case: TConnectMalClientID, + Desc: "malformed client id", + Group: "decode", + FailFirst: ErrClientIdentifierNotValid, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0, // Flags + 0, 20, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', // Client ID "zen" + }, + }, + { + Case: TConnectMalWillTopic, + Desc: "malformed will topic", + Group: "decode", + FailFirst: ErrMalformedWillTopic, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 14, // Flags + 0, 20, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 0, 6, // Will Topic - MSB+LSB + 'l', + }, + }, + { + Case: TConnectMalWillFlag, + Desc: "malformed will flag", + Group: "decode", + FailFirst: ErrMalformedWillPayload, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 14, // Flags + 0, 20, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 0, 3, // Will Topic - MSB+LSB + 'l', 'w', 't', + 0, 9, // Will Message MSB+LSB + 'n', 'o', 't', ' ', 'a', + }, + }, + { + Case: TConnectMalUsername, + Desc: "malformed username", + Group: "decode", + FailFirst: ErrMalformedUsername, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 206, // Flags + 0, 20, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 0, 3, // Will Topic - MSB+LSB + 'l', 'w', 't', + 0, 9, // Will Message MSB+LSB + 'n', 'o', 't', ' ', 'a', 'g', 'a', 'i', 'n', + 0, 5, // Username MSB+LSB + 'm', 'o', 'c', + }, + }, + + { + Case: TConnectInvalidFlagNoUsername, + Desc: "username flag with no username bytes", + Group: "decode", + FailFirst: ErrProtocolViolationFlagNoUsername, + RawBytes: []byte{ + Connect << 4, 17, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 5, // Protocol Version + 130, // Flags + 0, 20, // Keepalive + 0, + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + }, + }, + { + Case: TConnectMalPassword, + Desc: "malformed password", + Group: "decode", + FailFirst: ErrMalformedPassword, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 206, // Flags + 0, 20, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 0, 3, // Will Topic - MSB+LSB + 'l', 'w', 't', + 0, 9, // Will Message MSB+LSB + 'n', 'o', 't', ' ', 'a', 'g', 'a', 'i', 'n', + 0, 5, // Username MSB+LSB + 'm', 'o', 'c', 'h', 'i', + 0, 4, // Password MSB+LSB + ',', '.', + }, + }, + { + Case: TConnectMalFixedHeader, + Desc: "malformed fixedheader oversize", + Group: "decode", + FailFirst: ErrMalformedProtocolName, // packet test doesn't test fixedheader oversize + RawBytes: []byte{ + Connect << 4, 255, 255, 255, 255, 255, // Fixed header + }, + }, + { + Case: TConnectMalReservedBit, + Desc: "reserved bit not 0", + Group: "nodecode", + FailFirst: ErrProtocolViolation, + RawBytes: []byte{ + Connect << 4, 15, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 1, // Packet Flags + 0, 45, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', + }, + }, + { + Case: TConnectMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Connect << 4, 47, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 5, // Protocol Version + 14, // Packet Flags + 0, 30, // Keepalive + 10, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + { + Case: TConnectMalWillProperties, + Desc: "malformed will properties", + Group: "decode", + FailFirst: ErrMalformedWillProperties, + RawBytes: []byte{ + Connect << 4, 47, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 5, // Protocol Version + 14, // Packet Flags + 0, 30, // Keepalive + 10, // length + 17, 0, 0, 0, 120, // Session Expiry Interval (17) + 39, 0, 0, 125, 0, // Maximum Packet Size (39) + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 5, // will properties length + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + + // Validation Tests + { + Case: TConnectInvalidProtocolName, + Desc: "invalid protocol name", + Group: "validate", + Expect: ErrProtocolViolationProtocolName, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + Connect: ConnectParams{ + ProtocolName: []byte("stuff"), + }, + }, + }, + { + Case: TConnectInvalidProtocolVersion, + Desc: "invalid protocol version", + Group: "validate", + Expect: ErrProtocolViolationProtocolVersion, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 2, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + }, + }, + }, + { + Case: TConnectInvalidProtocolVersion2, + Desc: "invalid protocol version", + Group: "validate", + Expect: ErrProtocolViolationProtocolVersion, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 2, + Connect: ConnectParams{ + ProtocolName: []byte("MQIsdp"), + }, + }, + }, + { + Case: TConnectInvalidReservedBit, + Desc: "reserved bit not 0", + Group: "validate", + Expect: ErrProtocolViolationReservedBit, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + }, + ReservedBit: 1, + }, + }, + { + Case: TConnectInvalidClientIDTooLong, + Desc: "client id too long", + Group: "validate", + Expect: ErrClientIdentifierNotValid, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + ClientIdentifier: func() string { + return string(make([]byte, 65536)) + }(), + }, + }, + }, + { + Case: TConnectInvalidUsernameNoFlag, + Desc: "has username but no flag", + Group: "validate", + Expect: ErrProtocolViolationUsernameNoFlag, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Username: []byte("username"), + }, + }, + }, + { + Case: TConnectInvalidFlagNoPassword, + Desc: "has password flag but no password", + Group: "validate", + Expect: ErrProtocolViolationFlagNoPassword, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + PasswordFlag: true, + }, + }, + }, + { + Case: TConnectInvalidPasswordNoFlag, + Desc: "has password flag but no password", + Group: "validate", + Expect: ErrProtocolViolationPasswordNoFlag, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Password: []byte("password"), + }, + }, + }, + { + Case: TConnectInvalidUsernameTooLong, + Desc: "username too long", + Group: "validate", + Expect: ErrProtocolViolationUsernameTooLong, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + UsernameFlag: true, + Username: func() []byte { + return make([]byte, 65536) + }(), + }, + }, + }, + { + Case: TConnectInvalidPasswordTooLong, + Desc: "password too long", + Group: "validate", + Expect: ErrProtocolViolationPasswordTooLong, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + UsernameFlag: true, + Username: []byte{}, + PasswordFlag: true, + Password: func() []byte { + return make([]byte, 65536) + }(), + }, + }, + }, + { + Case: TConnectInvalidWillFlagNoPayload, + Desc: "will flag no payload", + Group: "validate", + Expect: ErrProtocolViolationWillFlagNoPayload, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + WillFlag: true, + }, + }, + }, + { + Case: TConnectInvalidWillFlagQosOutOfRange, + Desc: "will flag no payload", + Group: "validate", + Expect: ErrProtocolViolationQosOutOfRange, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + WillFlag: true, + WillTopic: "a/b/c", + WillPayload: []byte{'b'}, + WillQos: 4, + }, + }, + }, + { + Case: TConnectInvalidWillSurplusRetain, + Desc: "no will flag surplus retain", + Group: "validate", + Expect: ErrProtocolViolationWillFlagSurplusRetain, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + WillRetain: true, + }, + }, + }, + + // Spec Tests + { + Case: TConnectSpecInvalidUTF8D800, + Desc: "invalid utf8 string (a) - code point U+D800", + Group: "decode", + FailFirst: ErrClientIdentifierNotValid, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0, // Flags + 0, 20, // Keepalive + 0, 4, // Client ID - MSB+LSB + 'e', 0xed, 0xa0, 0x80, // Client id bearing U+D800 + }, + }, + { + Case: TConnectSpecInvalidUTF8DFFF, + Desc: "invalid utf8 string (b) - code point U+DFFF", + Group: "decode", + FailFirst: ErrClientIdentifierNotValid, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0, // Flags + 0, 20, // Keepalive + 0, 4, // Client ID - MSB+LSB + 'e', 0xed, 0xa3, 0xbf, // Client id bearing U+D8FF + }, + }, + + { + Case: TConnectSpecInvalidUTF80000, + Desc: "invalid utf8 string (c) - code point U+0000", + Group: "decode", + FailFirst: ErrClientIdentifierNotValid, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0, // Flags + 0, 20, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'e', 0xc0, 0x80, // Client id bearing U+0000 + }, + }, + + { + Case: TConnectSpecInvalidUTF8NoSkip, + Desc: "utf8 string must not skip or strip code point U+FEFF", + //Group: "decode", + //FailFirst: ErrMalformedClientID, + RawBytes: []byte{ + Connect << 4, 18, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0, // Flags + 0, 20, // Keepalive + 0, 6, // Client ID - MSB+LSB + 'e', 'b', 0xEF, 0xBB, 0xBF, 'd', // Client id bearing U+FEFF + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 16, + }, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Keepalive: 20, + ClientIdentifier: string([]byte{'e', 'b', 0xEF, 0xBB, 0xBF, 'd'}), + }, + }, + }, + }, + Connack: { + { + Case: TConnackAcceptedNoSession, + Desc: "accepted, no session", + Primary: true, + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 0, // No existing session + CodeSuccess.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + SessionPresent: false, + ReasonCode: CodeSuccess.Code, + }, + }, + { + Case: TConnackAcceptedSessionExists, + Desc: "accepted, session exists", + Primary: true, + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 1, // Session present + CodeSuccess.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + SessionPresent: true, + ReasonCode: CodeSuccess.Code, + }, + }, + { + Case: TConnackAcceptedAdjustedExpiryInterval, + Desc: "accepted, no session, adjusted expiry interval mqtt5", + Primary: true, + RawBytes: []byte{ + Connack << 4, 8, // fixed header + 0, // Session present + CodeSuccess.Code, + 5, // length + 17, 0, 0, 0, 120, // Session Expiry Interval (17) + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 8, + }, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + SessionExpiryInterval: uint32(120), + SessionExpiryIntervalFlag: true, + }, + }, + }, + { + Case: TConnackAcceptedMqtt5, + Desc: "accepted no session mqtt5", + Primary: true, + RawBytes: []byte{ + Connack << 4, 124, // fixed header + 0, // No existing session + CodeSuccess.Code, + // Properties + 121, // length + 17, 0, 0, 0, 120, // Session Expiry Interval (17) + 18, 0, 8, 'm', 'o', 'c', 'h', 'i', '-', 'v', '5', // Assigned Client ID (18) + 19, 0, 20, // Server Keep Alive (19) + 21, 0, 5, 'S', 'H', 'A', '-', '1', // Authentication Method (21) + 22, 0, 9, 'a', 'u', 't', 'h', '-', 'd', 'a', 't', 'a', // Authentication Data (22) + 26, 0, 8, 'r', 'e', 's', 'p', 'o', 'n', 's', 'e', // Response Info (26) + 28, 0, 7, 'm', 'o', 'c', 'h', 'i', '-', '2', // Server Reference (28) + 31, 0, 6, 'r', 'e', 'a', 's', 'o', 'n', // Reason String (31) + 33, 1, 244, // Receive Maximum (33) + 34, 3, 231, // Topic Alias Maximum (34) + 36, 1, // Maximum Qos (36) + 37, 1, // Retain Available (37) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + 38, // User Properties (38) + 0, 4, 'k', 'e', 'y', '2', + 0, 6, 'v', 'a', 'l', 'u', 'e', '2', + 39, 0, 0, 125, 0, // Maximum Packet Size (39) + 40, 1, // Wildcard Subscriptions Available (40) + 41, 1, // Subscription ID Available (41) + 42, 1, // Shared Subscriptions Available (42) + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 124, + }, + SessionPresent: false, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + SessionExpiryInterval: uint32(120), + SessionExpiryIntervalFlag: true, + AssignedClientID: "mochi-v5", + ServerKeepAlive: uint16(20), + ServerKeepAliveFlag: true, + AuthenticationMethod: "SHA-1", + AuthenticationData: []byte("auth-data"), + ResponseInfo: "response", + ServerReference: "mochi-2", + ReasonString: "reason", + ReceiveMaximum: uint16(500), + TopicAliasMaximum: uint16(999), + MaximumQos: byte(1), + MaximumQosFlag: true, + RetainAvailable: byte(1), + RetainAvailableFlag: true, + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + { + Key: "key2", + Val: "value2", + }, + }, + MaximumPacketSize: uint32(32000), + WildcardSubAvailable: byte(1), + WildcardSubAvailableFlag: true, + SubIDAvailable: byte(1), + SubIDAvailableFlag: true, + SharedSubAvailable: byte(1), + SharedSubAvailableFlag: true, + }, + }, + }, + { + Case: TConnackMinMqtt5, + Desc: "accepted min properties mqtt5", + Primary: true, + RawBytes: []byte{ + Connack << 4, 13, // fixed header + 1, // existing session + CodeSuccess.Code, + 10, // Properties length + 18, 0, 5, 'm', 'o', 'c', 'h', 'i', // Assigned Client ID (18) + 36, 1, // Maximum Qos (36) + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 13, + }, + SessionPresent: true, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + AssignedClientID: "mochi", + MaximumQos: byte(1), + MaximumQosFlag: true, + }, + }, + }, + { + Case: TConnackMinCleanMqtt5, + Desc: "accepted min properties mqtt5b", + Primary: true, + RawBytes: []byte{ + Connack << 4, 3, // fixed header + 0, // existing session + CodeSuccess.Code, + 0, // Properties length + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 16, + }, + SessionPresent: false, + ReasonCode: CodeSuccess.Code, + }, + }, + { + Case: TConnackServerKeepalive, + Desc: "server set keepalive", + Primary: true, + RawBytes: []byte{ + Connack << 4, 6, // fixed header + 1, // existing session + CodeSuccess.Code, + 3, // Properties length + 19, 0, 10, // server keepalive + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 6, + }, + SessionPresent: true, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + ServerKeepAlive: uint16(10), + ServerKeepAliveFlag: true, + }, + }, + }, + { + Case: TConnackInvalidMinMqtt5, + Desc: "failure min properties mqtt5", + Primary: true, + RawBytes: append([]byte{ + Connack << 4, 23, // fixed header + 0, // No existing session + ErrUnspecifiedError.Code, + // Properties + 20, // length + 31, 0, 17, // Reason String (31) + }, []byte(ErrUnspecifiedError.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 23, + }, + SessionPresent: false, + ReasonCode: ErrUnspecifiedError.Code, + Properties: Properties{ + ReasonString: ErrUnspecifiedError.Reason, + }, + }, + }, + + { + Case: TConnackProtocolViolationNoSession, + Desc: "miscellaneous protocol violation", + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 0, // Session present + ErrProtocolViolation.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + ReasonCode: ErrProtocolViolation.Code, + }, + }, + { + Case: TConnackBadProtocolVersion, + Desc: "bad protocol version", + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 1, // Session present + ErrProtocolViolationProtocolVersion.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + SessionPresent: true, + ReasonCode: ErrProtocolViolationProtocolVersion.Code, + }, + }, + { + Case: TConnackBadClientID, + Desc: "bad client id", + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 1, // Session present + ErrClientIdentifierNotValid.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + SessionPresent: true, + ReasonCode: ErrClientIdentifierNotValid.Code, + }, + }, + { + Case: TConnackServerUnavailable, + Desc: "server unavailable", + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 1, // Session present + ErrServerUnavailable.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + SessionPresent: true, + ReasonCode: ErrServerUnavailable.Code, + }, + }, + { + Case: TConnackBadUsernamePassword, + Desc: "bad username or password", + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 1, // Session present + ErrBadUsernameOrPassword.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + SessionPresent: true, + ReasonCode: ErrBadUsernameOrPassword.Code, + }, + }, + { + Case: TConnackBadUsernamePasswordNoSession, + Desc: "bad username or password no session", + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 0, // No session present + Err3NotAuthorized.Code, // use v3 remapping + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + ReasonCode: Err3NotAuthorized.Code, + }, + }, + { + Case: TConnackMqtt5BadUsernamePasswordNoSession, + Desc: "mqtt5 bad username or password no session", + RawBytes: []byte{ + Connack << 4, 3, // fixed header + 0, // No session present + ErrBadUsernameOrPassword.Code, + 0, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + ReasonCode: ErrBadUsernameOrPassword.Code, + }, + }, + + { + Case: TConnackNotAuthorised, + Desc: "not authorised", + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 1, // Session present + ErrNotAuthorized.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + SessionPresent: true, + ReasonCode: ErrNotAuthorized.Code, + }, + }, + { + Case: TConnackDropProperties, + Desc: "drop oversize properties", + Group: "encode", + RawBytes: []byte{ + Connack << 4, 40, // fixed header + 0, // No existing session + CodeSuccess.Code, + 19, // length + 28, 0, 7, 'm', 'o', 'c', 'h', 'i', '-', '2', // Server Reference (28) + 31, 0, 6, 'r', 'e', 'a', 's', 'o', 'n', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + ActualBytes: []byte{ + Connack << 4, 13, // fixed header + 0, // No existing session + CodeSuccess.Code, + 10, // length + 28, 0, 7, 'm', 'o', 'c', 'h', 'i', '-', '2', // Server Reference (28) + }, + Packet: &Packet{ + Mods: Mods{ + MaxSize: 5, + }, + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 40, + }, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + ReasonString: "reason", + ServerReference: "mochi-2", + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TConnackDropPropertiesPartial, + Desc: "drop oversize properties partial", + Group: "encode", + RawBytes: []byte{ + Connack << 4, 40, // fixed header + 0, // No existing session + CodeSuccess.Code, + 19, // length + 28, 0, 7, 'm', 'o', 'c', 'h', 'i', '-', '2', // Server Reference (28) + 31, 0, 6, 'r', 'e', 'a', 's', 'o', 'n', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + ActualBytes: []byte{ + Connack << 4, 22, // fixed header + 0, // No existing session + CodeSuccess.Code, + 19, // length + 28, 0, 7, 'm', 'o', 'c', 'h', 'i', '-', '2', // Server Reference (28) + 31, 0, 6, 'r', 'e', 'a', 's', 'o', 'n', // Reason String (31) + }, + Packet: &Packet{ + Mods: Mods{ + MaxSize: 18, + }, + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 40, + }, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + ReasonString: "reason", + ServerReference: "mochi-2", + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + // Fail States + { + Case: TConnackMalSessionPresent, + Desc: "malformed session present", + Group: "decode", + FailFirst: ErrMalformedSessionPresent, + RawBytes: []byte{ + Connect << 4, 2, // Fixed header + }, + }, + { + Case: TConnackMalReturnCode, + Desc: "malformed bad return Code", + Group: "decode", + //Primary: true, + FailFirst: ErrMalformedReasonCode, + RawBytes: []byte{ + Connect << 4, 2, // Fixed header + 0, + }, + }, + { + Case: TConnackMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Connack << 4, 40, // fixed header + 0, // No existing session + CodeSuccess.Code, + 19, // length + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + + Publish: { + { + Case: TPublishNoPayload, + Desc: "no payload", + Primary: true, + RawBytes: []byte{ + Publish << 4, 7, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 7, + }, + TopicName: "a/b/c", + Payload: []byte{}, + }, + }, + { + Case: TPublishBasic, + Desc: "basic", + Primary: true, + RawBytes: []byte{ + Publish << 4, 18, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 18, + }, + TopicName: "a/b/c", + Payload: []byte("hello mochi"), + }, + }, + + { + Case: TPublishMqtt5, + Desc: "mqtt v5", + Primary: true, + RawBytes: []byte{ + Publish << 4, 77, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 58, // length + 1, 1, // Payload Format (1) + 2, 0, 0, 0, 2, // Message Expiry (2) + 3, 0, 10, 't', 'e', 'x', 't', '/', 'p', 'l', 'a', 'i', 'n', // Content Type (3) + 8, 0, 5, 'a', '/', 'b', '/', 'c', // Response Topic (8) + 9, 0, 4, 'd', 'a', 't', 'a', // Correlations Data (9) + 11, 202, 212, 19, // Subscription Identifier (11) + 35, 0, 3, // Topic Alias (35) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 77, + }, + TopicName: "a/b/c", + Properties: Properties{ + PayloadFormat: byte(1), // UTF-8 Format + PayloadFormatFlag: true, + MessageExpiryInterval: uint32(2), + ContentType: "text/plain", + ResponseTopic: "a/b/c", + CorrelationData: []byte("data"), + SubscriptionIdentifier: []int{322122}, + TopicAlias: uint16(3), + TopicAliasFlag: true, + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + Payload: []byte("hello mochi"), + }, + }, + { + Case: TPublishBasicTopicAliasOnly, + Desc: "mqtt v5 topic alias only", + Primary: true, + RawBytes: []byte{ + Publish << 4, 17, // Fixed header + 0, 0, // Topic Name - LSB+MSB + 3, // length + 35, 0, 1, // Topic Alias (35) + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 17, + }, + Properties: Properties{ + TopicAlias: 1, + TopicAliasFlag: true, + }, + Payload: []byte("hello mochi"), + }, + }, + { + Case: TPublishBasicMqtt5, + Desc: "mqtt basic v5", + Primary: true, + RawBytes: []byte{ + Publish << 4, 22, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 3, // length + 35, 0, 1, // Topic Alias (35) + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 22, + }, + TopicName: "a/b/c", + Properties: Properties{ + TopicAlias: uint16(1), + TopicAliasFlag: true, + }, + Payload: []byte("hello mochi"), + }, + }, + + { + Case: TPublishQos1, + Desc: "qos:1, packet id", + Primary: true, + RawBytes: []byte{ + Publish<<4 | 1<<1, 20, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 7, // Packet ID - LSB+MSB + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Qos: 1, + Remaining: 20, + }, + TopicName: "a/b/c", + Payload: []byte("hello mochi"), + PacketID: 7, + }, + }, + { + Case: TPublishQos1Mqtt5, + Desc: "mqtt v5", + Primary: true, + RawBytes: []byte{ + Publish<<4 | 1<<1, 37, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 7, // Packet ID - LSB+MSB + // Properties + 16, // length + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 37, + Qos: 1, + }, + PacketID: 7, + TopicName: "a/b/c", + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + Payload: []byte("hello mochi"), + }, + }, + + { + Case: TPublishQos1Dup, + Desc: "qos:1, dup:true, packet id", + Primary: true, + RawBytes: []byte{ + Publish<<4 | 2 | 8, 20, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 7, // Packet ID - LSB+MSB + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Qos: 1, + Remaining: 20, + Dup: true, + }, + TopicName: "a/b/c", + Payload: []byte("hello mochi"), + PacketID: 7, + }, + }, + { + Case: TPublishQos1NoPayload, + Desc: "qos:1, packet id, no payload", + Primary: true, + RawBytes: []byte{ + Publish<<4 | 2, 9, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'y', '/', 'u', '/', 'i', // Topic Name + 0, 7, // Packet ID - LSB+MSB + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Qos: 1, + Remaining: 9, + }, + TopicName: "y/u/i", + PacketID: 7, + Payload: []byte{}, + }, + }, + { + Case: TPublishQos2, + Desc: "qos:2, packet id", + Primary: true, + RawBytes: []byte{ + Publish<<4 | 2<<1, 14, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 7, // Packet ID - LSB+MSB + 'h', 'e', 'l', 'l', 'o', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Qos: 2, + Remaining: 14, + }, + TopicName: "a/b/c", + Payload: []byte("hello"), + PacketID: 7, + }, + }, + { + Case: TPublishQos2Mqtt5, + Desc: "mqtt v5", + Primary: true, + RawBytes: []byte{ + Publish<<4 | 2<<1, 37, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 7, // Packet ID - LSB+MSB + // Properties + 16, // length + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 37, + Qos: 2, + }, + PacketID: 7, + TopicName: "a/b/c", + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + Payload: []byte("hello mochi"), + }, + }, + { + Case: TPublishSubscriberIdentifier, + Desc: "subscription identifiers", + Primary: true, + RawBytes: []byte{ + Publish << 4, 23, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 4, // properties length + 11, 2, // Subscription Identifier (11) + 11, 3, // Subscription Identifier (11) + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 23, + }, + TopicName: "a/b/c", + Properties: Properties{ + SubscriptionIdentifier: []int{2, 3}, + }, + Payload: []byte("hello mochi"), + }, + }, + + { + Case: TPublishQos2Upgraded, + Desc: "qos:2, upgraded from publish to qos2 sub", + Primary: true, + RawBytes: []byte{ + Publish<<4 | 2<<1, 20, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 1, // Packet ID - LSB+MSB + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Qos: 2, + Remaining: 18, + }, + TopicName: "a/b/c", + Payload: []byte("hello mochi"), + PacketID: 1, + }, + }, + { + Case: TPublishRetain, + Desc: "retain", + RawBytes: []byte{ + Publish<<4 | 1<<0, 18, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Retain: true, + }, + TopicName: "a/b/c", + Payload: []byte("hello mochi"), + }, + }, + { + Case: TPublishRetainMqtt5, + Desc: "retain mqtt5", + RawBytes: []byte{ + Publish<<4 | 1<<0, 19, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, // properties length + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Publish, + Retain: true, + Remaining: 19, + }, + TopicName: "a/b/c", + Properties: Properties{}, + Payload: []byte("hello mochi"), + }, + }, + { + Case: TPublishDup, + Desc: "dup", + RawBytes: []byte{ + Publish<<4 | 8, 10, // Fixed header + 0, 3, // Topic Name - LSB+MSB + 'a', '/', 'b', // Topic Name + 'h', 'e', 'l', 'l', 'o', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Dup: true, + }, + TopicName: "a/b", + Payload: []byte("hello"), + }, + }, + + // Fail States + { + Case: TPublishMalTopicName, + Desc: "malformed topic name", + Group: "decode", + FailFirst: ErrMalformedTopic, + RawBytes: []byte{ + Publish << 4, 7, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', + 0, 11, // Packet ID - LSB+MSB + }, + }, + { + Case: TPublishMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Publish<<4 | 2, 7, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'x', '/', 'y', '/', 'z', // Topic Name + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TPublishMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Publish << 4, 35, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 16, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + + // Copy tests + { + Case: TPublishCopyBasic, + Desc: "basic copyable", + Group: "copy", + RawBytes: []byte{ + Publish << 4, 18, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'z', '/', 'e', '/', 'n', // Topic Name + 'm', 'o', 'c', 'h', 'i', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Dup: true, + Retain: true, + Qos: 1, + }, + TopicName: "z/e/n", + Payload: []byte("mochi mochi"), + }, + }, + + // Spec tests + { + Case: TPublishSpecQos0NoPacketID, + Desc: "packet id must be 0 if qos is 0 (a)", + Group: "encode", + // this version tests for correct byte array mutuation. + // this does not check if -incoming- Packets are parsed as correct, + // it is impossible for the parser to determine if the payload start is incorrect. + RawBytes: []byte{ + Publish << 4, 12, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 3, // Packet ID - LSB+MSB + 'h', 'e', 'l', 'l', 'o', // Payload + }, + ActualBytes: []byte{ + Publish << 4, 12, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + // Packet ID is removed. + 'h', 'e', 'l', 'l', 'o', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 12, + }, + TopicName: "a/b/c", + Payload: []byte("hello"), + }, + }, + { + Case: TPublishSpecQosMustPacketID, + Desc: "no packet id with qos > 0", + Group: "encode", + Expect: ErrProtocolViolationNoPacketID, + RawBytes: []byte{ + Publish<<4 | 2, 14, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 0, // Packet ID - LSB+MSB + 'h', 'e', 'l', 'l', 'o', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Qos: 1, + }, + TopicName: "a/b/c", + Payload: []byte("hello"), + PacketID: 0, + }, + }, + { + Case: TPublishDropOversize, + Desc: "drop oversized publish packet", + Group: "encode", + FailFirst: ErrPacketTooLarge, + RawBytes: []byte{ + Publish << 4, 10, // Fixed header + 0, 3, // Topic Name - LSB+MSB + 'a', '/', 'b', // Topic Name + 'h', 'e', 'l', 'l', 'o', // Payload + }, + Packet: &Packet{ + Mods: Mods{ + MaxSize: 2, + }, + FixedHeader: FixedHeader{ + Type: Publish, + }, + TopicName: "a/b", + Payload: []byte("hello"), + }, + }, + + // Validation Tests + { + Case: TPublishInvalidQos0NoPacketID, + Desc: "packet id must be 0 if qos is 0 (b)", + Group: "validate", + Expect: ErrProtocolViolationSurplusPacketID, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 12, + Qos: 0, + }, + TopicName: "a/b/c", + Payload: []byte("hello"), + PacketID: 3, + }, + }, + { + Case: TPublishInvalidQosMustPacketID, + Desc: "no packet id with qos > 0", + Group: "validate", + Expect: ErrProtocolViolationNoPacketID, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Qos: 1, + }, + PacketID: 0, + }, + }, + { + Case: TPublishInvalidSurplusSubID, + Desc: "surplus subscription identifier", + Group: "validate", + Expect: ErrProtocolViolationSurplusSubID, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + }, + Properties: Properties{ + SubscriptionIdentifier: []int{1}, + }, + TopicName: "a/b", + }, + }, + { + Case: TPublishInvalidSurplusWildcard, + Desc: "topic contains wildcards", + Group: "validate", + Expect: ErrProtocolViolationSurplusWildcard, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + }, + TopicName: "a/+", + }, + }, + { + Case: TPublishInvalidSurplusWildcard2, + Desc: "topic contains wildcards 2", + Group: "validate", + Expect: ErrProtocolViolationSurplusWildcard, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + }, + TopicName: "a/#", + }, + }, + { + Case: TPublishInvalidNoTopic, + Desc: "no topic or alias specified", + Group: "validate", + Expect: ErrProtocolViolationNoTopic, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + }, + }, + }, + { + Case: TPublishInvalidExcessTopicAlias, + Desc: "topic alias over maximum", + Group: "validate", + Expect: ErrTopicAliasInvalid, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + }, + Properties: Properties{ + TopicAlias: 1025, + }, + TopicName: "a/b", + }, + }, + { + Case: TPublishInvalidTopicAlias, + Desc: "topic alias flag and no alias", + Group: "validate", + Expect: ErrTopicAliasInvalid, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + }, + Properties: Properties{ + TopicAliasFlag: true, + TopicAlias: 0, + }, + TopicName: "a/b/", + }, + }, + { + Case: TPublishSpecDenySysTopic, + Desc: "deny publishing to $SYS topics", + Group: "validate", + Expect: CodeSuccess, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + }, + TopicName: "$SYS/any", + Payload: []byte("y"), + }, + RawBytes: []byte{ + Publish << 4, 11, // Fixed header + 0, 5, // Topic Name - LSB+MSB + '$', 'S', 'Y', 'S', '/', 'a', 'n', 'y', // Topic Name + 'y', // Payload + }, + }, + }, + + Puback: { + { + Case: TPuback, + Desc: "puback", + Primary: true, + RawBytes: []byte{ + Puback << 4, 2, // Fixed header + 0, 7, // Packet ID - LSB+MSB + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Puback, + Remaining: 2, + }, + PacketID: 7, + }, + }, + { + Case: TPubackMqtt5, + Desc: "mqtt5", + Primary: true, + RawBytes: []byte{ + Puback << 4, 20, // Fixed header + 0, 7, // Packet ID - LSB+MSB + CodeGrantedQos0.Code, // Reason Code + 16, // Properties Length + // 31, 0, 6, 'r', 'e', 'a', 's', 'o', 'n', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Puback, + Remaining: 20, + }, + PacketID: 7, + ReasonCode: CodeGrantedQos0.Code, + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TPubackMqtt5NotAuthorized, + Desc: "QOS 1 publish not authorized mqtt5", + Primary: true, + RawBytes: []byte{ + Puback << 4, 37, // Fixed header + 0, 7, // Packet ID - LSB+MSB + ErrNotAuthorized.Code, // Reason Code + 33, // Properties Length + 31, 0, 14, 'n', 'o', 't', ' ', 'a', 'u', + 't', 'h', 'o', 'r', 'i', 'z', 'e', 'd', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Puback, + Remaining: 31, + }, + PacketID: 7, + ReasonCode: ErrNotAuthorized.Code, + Properties: Properties{ + ReasonString: ErrNotAuthorized.Reason, + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TPubackUnexpectedError, + Desc: "unexpected error", + Group: "decode", + RawBytes: []byte{ + Puback << 4, 29, // Fixed header + 0, 7, // Packet ID - LSB+MSB + ErrPayloadFormatInvalid.Code, // Reason Code + 25, // Properties Length + 31, 0, 22, 'p', 'a', 'y', 'l', 'o', 'a', 'd', + ' ', 'f', 'o', 'r', 'm', 'a', 't', + ' ', 'i', 'n', 'v', 'a', 'l', 'i', 'd', // Reason String (31) + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Puback, + Remaining: 28, + }, + PacketID: 7, + ReasonCode: ErrPayloadFormatInvalid.Code, + Properties: Properties{ + ReasonString: ErrPayloadFormatInvalid.Reason, + }, + }, + }, + + // Fail states + { + Case: TPubackMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Puback << 4, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TPubackMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Puback << 4, 20, // Fixed header + 0, 7, // Packet ID - LSB+MSB + CodeGrantedQos0.Code, // Reason Code + 16, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + Pubrec: { + { + Case: TPubrec, + Desc: "pubrec", + Primary: true, + RawBytes: []byte{ + Pubrec << 4, 2, // Fixed header + 0, 7, // Packet ID - LSB+MSB + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pubrec, + Remaining: 2, + }, + PacketID: 7, + }, + }, + { + Case: TPubrecMqtt5, + Desc: "pubrec mqtt5", + Primary: true, + RawBytes: []byte{ + Pubrec << 4, 20, // Fixed header + 0, 7, // Packet ID - LSB+MSB + CodeSuccess.Code, // Reason Code + 16, // Properties Length + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Pubrec, + Remaining: 20, + }, + PacketID: 7, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TPubrecMqtt5IDInUse, + Desc: "packet id in use mqtt5", + Primary: true, + RawBytes: []byte{ + Pubrec << 4, 47, // Fixed header + 0, 7, // Packet ID - LSB+MSB + ErrPacketIdentifierInUse.Code, // Reason Code + 43, // Properties Length + 31, 0, 24, 'p', 'a', 'c', 'k', 'e', 't', + ' ', 'i', 'd', 'e', 'n', 't', 'i', 'f', 'i', 'e', 'r', + ' ', 'i', 'n', + ' ', 'u', 's', 'e', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Pubrec, + Remaining: 31, + }, + PacketID: 7, + ReasonCode: ErrPacketIdentifierInUse.Code, + Properties: Properties{ + ReasonString: ErrPacketIdentifierInUse.Reason, + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TPubrecMqtt5NotAuthorized, + Desc: "QOS 2 publish not authorized mqtt5", + Primary: true, + RawBytes: []byte{ + Pubrec << 4, 37, // Fixed header + 0, 7, // Packet ID - LSB+MSB + ErrNotAuthorized.Code, // Reason Code + 33, // Properties Length + 31, 0, 14, 'n', 'o', 't', ' ', 'a', 'u', + 't', 'h', 'o', 'r', 'i', 'z', 'e', 'd', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Pubrec, + Remaining: 31, + }, + PacketID: 7, + ReasonCode: ErrNotAuthorized.Code, + Properties: Properties{ + ReasonString: ErrNotAuthorized.Reason, + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TPubrecMalReasonCode, + Desc: "malformed reason code", + Group: "decode", + FailFirst: ErrMalformedReasonCode, + RawBytes: []byte{ + Pubrec << 4, 31, // Fixed header + 0, 7, // Packet ID - LSB+MSB + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + // Validation + { + Case: TPubrecInvalidReason, + Desc: "invalid reason code", + Group: "validate", + FailFirst: ErrProtocolViolationInvalidReason, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pubrec, + }, + PacketID: 7, + ReasonCode: ErrConnectionRateExceeded.Code, + }, + }, + // Fail states + { + Case: TPubrecMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Pubrec << 4, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TPubrecMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Pubrec << 4, 31, // Fixed header + 0, 7, // Packet ID - LSB+MSB + ErrPacketIdentifierInUse.Code, // Reason Code + 27, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + Pubrel: { + { + Case: TPubrel, + Desc: "pubrel", + Primary: true, + RawBytes: []byte{ + Pubrel<<4 | 1<<1, 2, // Fixed header + 0, 7, // Packet ID - LSB+MSB + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pubrel, + Remaining: 2, + Qos: 1, + }, + PacketID: 7, + }, + }, + { + Case: TPubrelMqtt5, + Desc: "pubrel mqtt5", + Primary: true, + RawBytes: []byte{ + Pubrel<<4 | 1<<1, 20, // Fixed header + 0, 7, // Packet ID - LSB+MSB + CodeSuccess.Code, // Reason Code + 16, // Properties Length + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Pubrel, + Remaining: 20, + Qos: 1, + }, + PacketID: 7, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TPubrelMqtt5AckNoPacket, + Desc: "mqtt5 no packet id ack", + Primary: true, + RawBytes: append([]byte{ + Pubrel<<4 | 1<<1, 34, // Fixed header + 0, 7, // Packet ID - LSB+MSB + ErrPacketIdentifierNotFound.Code, // Reason Code + 30, // Properties Length + 31, 0, byte(len(ErrPacketIdentifierNotFound.Reason)), + }, []byte(ErrPacketIdentifierNotFound.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Pubrel, + Remaining: 34, + Qos: 1, + }, + PacketID: 7, + ReasonCode: ErrPacketIdentifierNotFound.Code, + Properties: Properties{ + ReasonString: ErrPacketIdentifierNotFound.Reason, + }, + }, + }, + // Validation + { + Case: TPubrelInvalidReason, + Desc: "invalid reason code", + Group: "validate", + FailFirst: ErrProtocolViolationInvalidReason, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pubrel, + }, + PacketID: 7, + ReasonCode: ErrConnectionRateExceeded.Code, + }, + }, + // Fail states + { + Case: TPubrelMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Pubrel << 4, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TPubrelMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Pubrel<<4 | 1<<1, 20, // Fixed header + 0, 7, // Packet ID - LSB+MSB + CodeSuccess.Code, // Reason Code + 16, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + Pubcomp: { + { + Case: TPubcomp, + Desc: "pubcomp", + Primary: true, + RawBytes: []byte{ + Pubcomp << 4, 2, // Fixed header + 0, 7, // Packet ID - LSB+MSB + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pubcomp, + Remaining: 2, + }, + PacketID: 7, + }, + }, + { + Case: TPubcompMqtt5, + Desc: "mqtt5", + Primary: true, + RawBytes: []byte{ + Pubcomp << 4, 20, // Fixed header + 0, 7, // Packet ID - LSB+MSB + CodeSuccess.Code, // Reason Code + 16, // Properties Length + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Pubcomp, + Remaining: 20, + }, + PacketID: 7, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TPubcompMqtt5AckNoPacket, + Desc: "mqtt5 no packet id ack", + Primary: true, + RawBytes: append([]byte{ + Pubcomp << 4, 34, // Fixed header + 0, 7, // Packet ID - LSB+MSB + ErrPacketIdentifierNotFound.Code, // Reason Code + 30, // Properties Length + 31, 0, byte(len(ErrPacketIdentifierNotFound.Reason)), + }, []byte(ErrPacketIdentifierNotFound.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Pubcomp, + Remaining: 34, + }, + PacketID: 7, + ReasonCode: ErrPacketIdentifierNotFound.Code, + Properties: Properties{ + ReasonString: ErrPacketIdentifierNotFound.Reason, + }, + }, + }, + // Validation + { + Case: TPubcompInvalidReason, + Desc: "invalid reason code", + Group: "validate", + FailFirst: ErrProtocolViolationInvalidReason, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pubcomp, + }, + ReasonCode: ErrConnectionRateExceeded.Code, + }, + }, + // Fail states + { + Case: TPubcompMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Pubcomp << 4, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TPubcompMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Pubcomp << 4, 34, // Fixed header + 0, 7, // Packet ID - LSB+MSB + ErrPacketIdentifierNotFound.Code, // Reason Code + 22, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + Subscribe: { + { + Case: TSubscribe, + Desc: "subscribe", + Primary: true, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 10, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, // QoS + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Remaining: 10, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + {Filter: "a/b/c"}, + }, + }, + }, + { + Case: TSubscribeMany, + Desc: "many", + Primary: true, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 30, // Fixed header + 0, 15, // Packet ID - LSB+MSB + + 0, 3, // Topic Name - LSB+MSB + 'a', '/', 'b', // Topic Name + 0, // QoS + + 0, 11, // Topic Name - LSB+MSB + 'd', '/', 'e', '/', 'f', '/', 'g', '/', 'h', '/', 'i', // Topic Name + 1, // QoS + + 0, 5, // Topic Name - LSB+MSB + 'x', '/', 'y', '/', 'z', // Topic Name + 2, // QoS + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Remaining: 30, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + {Filter: "a/b", Qos: 0}, + {Filter: "d/e/f/g/h/i", Qos: 1}, + {Filter: "x/y/z", Qos: 2}, + }, + }, + }, + { + Case: TSubscribeMqtt5, + Desc: "mqtt5", + Primary: true, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 31, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 20, + 11, 202, 212, 19, // Subscription Identifier (11) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + + 0, 5, 'a', '/', 'b', '/', 'c', // Topic Name + 46, // subscription options + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Subscribe, + Remaining: 31, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + { + Filter: "a/b/c", + Qos: 2, + NoLocal: true, + RetainAsPublished: true, + RetainHandling: 2, + Identifier: 322122, + }, + }, + Properties: Properties{ + SubscriptionIdentifier: []int{322122}, + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TSubscribeRetainHandling1, + Desc: "retain handling 1", + Primary: true, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 11, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, // no properties + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0 | 1<<4, // subscription options + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Subscribe, + Remaining: 11, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + { + Filter: "a/b/c", + RetainHandling: 1, + }, + }, + }, + }, + { + Case: TSubscribeRetainHandling2, + Desc: "retain handling 2", + Primary: true, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 11, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, // no properties + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0 | 2<<4, // subscription options + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Subscribe, + Remaining: 11, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + { + Filter: "a/b/c", + RetainHandling: 2, + }, + }, + }, + }, + { + Case: TSubscribeRetainAsPublished, + Desc: "retain as published", + Primary: true, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 11, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, // no properties + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0 | 1<<3, // subscription options + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Subscribe, + Remaining: 11, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + { + Filter: "a/b/c", + RetainAsPublished: true, + }, + }, + }, + }, + { + Case: TSubscribeInvalidFilter, + Desc: "invalid filter", + Group: "reference", + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + {Filter: "$SHARE/#", Identifier: 5}, + }, + }, + }, + { + Case: TSubscribeInvalidSharedNoLocal, + Desc: "shared and no local", + Group: "reference", + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + {Filter: "$SHARE/tmp/a/b/c", Identifier: 5, NoLocal: true}, + }, + }, + }, + + // Fail states + { + Case: TSubscribeMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TSubscribeMalTopic, + Desc: "malformed topic", + Group: "decode", + FailFirst: ErrMalformedTopic, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 2, // Fixed header + 0, 21, // Packet ID - LSB+MSB + + 0, 3, // Topic Name - LSB+MSB + 'a', '/', + }, + }, + { + Case: TSubscribeMalQos, + Desc: "malformed subscribe - qos", + Group: "decode", + FailFirst: ErrMalformedQos, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 2, // Fixed header + 0, 22, // Packet ID - LSB+MSB + + 0, 3, // Topic Name - LSB+MSB + 'j', '/', 'b', // Topic Name + + }, + }, + { + Case: TSubscribeMalQosRange, + Desc: "malformed qos out of range", + Group: "decode", + FailFirst: ErrProtocolViolationQosOutOfRange, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 2, // Fixed header + 0, 22, // Packet ID - LSB+MSB + + 0, 3, // Topic Name - LSB+MSB + 'c', '/', 'd', // Topic Name + 5, // QoS + + }, + }, + { + Case: TSubscribeMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Subscribe << 4, 11, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 4, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + + // Validation + { + Case: TSubscribeInvalidQosMustPacketID, + Desc: "no packet id with qos > 0", + Group: "validate", + Expect: ErrProtocolViolationNoPacketID, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Qos: 1, + }, + PacketID: 0, + Filters: Subscriptions{ + {Filter: "a/b"}, + }, + }, + }, + { + Case: TSubscribeInvalidNoFilters, + Desc: "no filters", + Group: "validate", + Expect: ErrProtocolViolationNoFilters, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Qos: 1, + }, + PacketID: 2, + }, + }, + + { + Case: TSubscribeInvalidIdentifierOversize, + Desc: "oversize identifier", + Group: "validate", + Expect: ErrProtocolViolationOversizeSubID, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Qos: 1, + }, + PacketID: 2, + Filters: Subscriptions{ + {Filter: "a/b", Identifier: 5}, + {Filter: "d/f", Identifier: 268435456}, + }, + }, + }, + + // Spec tests + { + Case: TSubscribeSpecQosMustPacketID, + Desc: "no packet id with qos > 0", + Group: "encode", + Expect: ErrProtocolViolationNoPacketID, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 10, // Fixed header + 0, 0, // Packet ID - LSB+MSB + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 1, // QoS + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Qos: 1, + Remaining: 10, + }, + Filters: Subscriptions{ + {Filter: "a/b/c", Qos: 1}, + }, + PacketID: 0, + }, + }, + }, + Suback: { + { + Case: TSuback, + Desc: "suback", + Primary: true, + RawBytes: []byte{ + Suback << 4, 3, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, // Return Code QoS 0 + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Suback, + Remaining: 3, + }, + PacketID: 15, + ReasonCodes: []byte{0}, + }, + }, + { + Case: TSubackMany, + Desc: "many", + Primary: true, + RawBytes: []byte{ + Suback << 4, 6, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, // Return Code QoS 0 + 1, // Return Code QoS 1 + 2, // Return Code QoS 2 + 0x80, // Return Code fail + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Suback, + Remaining: 6, + }, + PacketID: 15, + ReasonCodes: []byte{0, 1, 2, 0x80}, + }, + }, + { + Case: TSubackDeny, + Desc: "deny mqtt5", + Primary: true, + RawBytes: []byte{ + Suback << 4, 4, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, // no properties + ErrNotAuthorized.Code, // Return Code QoS 0 + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Suback, + Remaining: 4, + }, + PacketID: 15, + ReasonCodes: []byte{ErrNotAuthorized.Code}, + }, + }, + { + Case: TSubackUnspecifiedError, + Desc: "unspecified error", + Primary: true, + RawBytes: []byte{ + Suback << 4, 3, // Fixed header + 0, 15, // Packet ID - LSB+MSB + ErrUnspecifiedError.Code, // Return Code QoS 0 + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Suback, + Remaining: 3, + }, + PacketID: 15, + ReasonCodes: []byte{ErrUnspecifiedError.Code}, + }, + }, + { + Case: TSubackUnspecifiedErrorMqtt5, + Desc: "unspecified error mqtt5", + Primary: true, + RawBytes: []byte{ + Suback << 4, 4, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, // no properties + ErrUnspecifiedError.Code, // Return Code QoS 0 + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Suback, + Remaining: 4, + }, + PacketID: 15, + ReasonCodes: []byte{ErrUnspecifiedError.Code}, + }, + }, + { + Case: TSubackMqtt5, + Desc: "mqtt5", + Primary: true, + RawBytes: []byte{ + Suback << 4, 20, // Fixed header + 0, 15, // Packet ID + 16, // Properties Length + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + CodeGrantedQos2.Code, // Return Code QoS 0 + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Suback, + Remaining: 20, + }, + PacketID: 15, + ReasonCodes: []byte{CodeGrantedQos2.Code}, + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TSubackPacketIDInUse, + Desc: "packet id in use", + Primary: true, + RawBytes: []byte{ + Suback << 4, 47, // Fixed header + 0, 15, // Packet ID + 43, // Properties Length + 31, 0, 24, 'p', 'a', 'c', 'k', 'e', 't', + ' ', 'i', 'd', 'e', 'n', 't', 'i', 'f', 'i', 'e', 'r', + ' ', 'i', 'n', + ' ', 'u', 's', 'e', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + ErrPacketIdentifierInUse.Code, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Suback, + Remaining: 47, + }, + PacketID: 15, + ReasonCodes: []byte{ErrPacketIdentifierInUse.Code}, + Properties: Properties{ + ReasonString: ErrPacketIdentifierInUse.Reason, + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + + // Fail states + { + Case: TSubackMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Suback << 4, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TSubackMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Suback << 4, 47, + 0, 15, + 43, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + + { + Case: TSubackInvalidFilter, + Desc: "malformed packet id", + Group: "reference", + RawBytes: []byte{ + Suback << 4, 4, + 0, 15, + 0, // no properties + ErrTopicFilterInvalid.Code, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + { + Case: TSubackInvalidSharedNoLocal, + Desc: "invalid shared no local", + Group: "reference", + RawBytes: []byte{ + Suback << 4, 4, + 0, 15, + 0, // no properties + ErrProtocolViolationInvalidSharedNoLocal.Code, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + + Unsubscribe: { + { + Case: TUnsubscribe, + Desc: "unsubscribe", + Primary: true, + RawBytes: []byte{ + Unsubscribe<<4 | 1<<1, 9, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Unsubscribe, + Remaining: 9, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + {Filter: "a/b/c"}, + }, + }, + }, + { + Case: TUnsubscribeMany, + Desc: "unsubscribe many", + Primary: true, + RawBytes: []byte{ + Unsubscribe<<4 | 1<<1, 27, // Fixed header + 0, 35, // Packet ID - LSB+MSB + + 0, 3, // Topic Name - LSB+MSB + 'a', '/', 'b', // Topic Name + + 0, 11, // Topic Name - LSB+MSB + 'd', '/', 'e', '/', 'f', '/', 'g', '/', 'h', '/', 'i', // Topic Name + + 0, 5, // Topic Name - LSB+MSB + 'x', '/', 'y', '/', 'z', // Topic Name + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Unsubscribe, + Remaining: 27, + Qos: 1, + }, + PacketID: 35, + Filters: Subscriptions{ + {Filter: "a/b"}, + {Filter: "d/e/f/g/h/i"}, + {Filter: "x/y/z"}, + }, + }, + }, + { + Case: TUnsubscribeMqtt5, + Desc: "mqtt5", + Primary: true, + RawBytes: []byte{ + Unsubscribe<<4 | 1<<1, 31, // Fixed header + 0, 15, // Packet ID - LSB+MSB + + 16, + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + + 0, 3, // Topic Name - LSB+MSB + 'a', '/', 'b', + + 0, 5, // Topic Name - LSB+MSB + 'x', '/', 'y', '/', 'w', + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Unsubscribe, + Remaining: 31, + Qos: 1, + }, + PacketID: 15, + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + Filters: Subscriptions{ + {Filter: "a/b"}, + {Filter: "x/y/w"}, + }, + }, + }, + + // Fail states + { + Case: TUnsubscribeMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Unsubscribe << 4, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TUnsubscribeMalTopicName, + Desc: "malformed topic", + Group: "decode", + FailFirst: ErrMalformedTopic, + RawBytes: []byte{ + Unsubscribe << 4, 2, // Fixed header + 0, 21, // Packet ID - LSB+MSB + 0, 3, // Topic Name - LSB+MSB + 'a', '/', + }, + }, + { + Case: TUnsubscribeMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Unsubscribe<<4 | 1<<1, 31, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 16, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + + { + Case: TUnsubscribeInvalidQosMustPacketID, + Desc: "no packet id with qos > 0", + Group: "validate", + Expect: ErrProtocolViolationNoPacketID, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Unsubscribe, + Qos: 1, + }, + PacketID: 0, + Filters: Subscriptions{ + Subscription{Filter: "a/b"}, + }, + }, + }, + { + Case: TUnsubscribeInvalidNoFilters, + Desc: "no filters", + Group: "validate", + Expect: ErrProtocolViolationNoFilters, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Unsubscribe, + Qos: 1, + }, + PacketID: 2, + }, + }, + + { + Case: TUnsubscribeSpecQosMustPacketID, + Desc: "no packet id with qos > 0", + Group: "encode", + Expect: ErrProtocolViolationNoPacketID, + RawBytes: []byte{ + Unsubscribe<<4 | 1<<1, 9, // Fixed header + 0, 0, // Packet ID - LSB+MSB + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Unsubscribe, + Qos: 1, + Remaining: 9, + }, + PacketID: 0, + Filters: Subscriptions{ + {Filter: "a/b/c"}, + }, + }, + }, + }, + Unsuback: { + { + Case: TUnsuback, + Desc: "unsuback", + Primary: true, + RawBytes: []byte{ + Unsuback << 4, 2, // Fixed header + 0, 15, // Packet ID - LSB+MSB + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Unsuback, + Remaining: 2, + }, + PacketID: 15, + }, + }, + { + Case: TUnsubackMany, + Desc: "unsuback many", + Primary: true, + RawBytes: []byte{ + Unsuback << 4, 5, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, + CodeSuccess.Code, CodeSuccess.Code, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Unsuback, + Remaining: 5, + }, + PacketID: 15, + ReasonCodes: []byte{CodeSuccess.Code, CodeSuccess.Code}, + }, + }, + { + Case: TUnsubackMqtt5, + Desc: "mqtt5", + Primary: true, + RawBytes: []byte{ + Unsuback << 4, 21, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 16, // Properties Length + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + CodeSuccess.Code, CodeNoSubscriptionExisted.Code, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Unsuback, + Remaining: 21, + }, + PacketID: 15, + ReasonCodes: []byte{CodeSuccess.Code, CodeNoSubscriptionExisted.Code}, + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TUnsubackPacketIDInUse, + Desc: "packet id in use", + Primary: true, + RawBytes: []byte{ + Unsuback << 4, 48, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 43, // Properties Length + 31, 0, 24, 'p', 'a', 'c', 'k', 'e', 't', + ' ', 'i', 'd', 'e', 'n', 't', 'i', 'f', 'i', 'e', 'r', + ' ', 'i', 'n', + ' ', 'u', 's', 'e', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + ErrPacketIdentifierInUse.Code, ErrPacketIdentifierInUse.Code, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Unsuback, + Remaining: 48, + }, + PacketID: 15, + ReasonCodes: []byte{ErrPacketIdentifierInUse.Code, ErrPacketIdentifierInUse.Code}, + Properties: Properties{ + ReasonString: ErrPacketIdentifierInUse.Reason, + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + + // Fail states + { + Case: TUnsubackMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Unsuback << 4, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TUnsubackMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Unsuback << 4, 48, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 43, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + + Pingreq: { + { + Case: TPingreq, + Desc: "ping request", + Primary: true, + RawBytes: []byte{ + Pingreq << 4, 0, // fixed header + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pingreq, + Remaining: 0, + }, + }, + }, + }, + Pingresp: { + { + Case: TPingresp, + Desc: "ping response", + Primary: true, + RawBytes: []byte{ + Pingresp << 4, 0, // fixed header + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pingresp, + Remaining: 0, + }, + }, + }, + }, + + Disconnect: { + { + Case: TDisconnect, + Desc: "disconnect", + Primary: true, + RawBytes: []byte{ + Disconnect << 4, 0, // fixed header + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 0, + }, + }, + }, + { + Case: TDisconnectTakeover, + Desc: "takeover", + Primary: true, + RawBytes: append([]byte{ + Disconnect << 4, 21, // fixed header + ErrSessionTakenOver.Code, // Reason Code + 19, // Properties Length + 31, 0, 16, // Reason String (31) + }, []byte(ErrSessionTakenOver.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 0, + }, + ReasonCode: ErrSessionTakenOver.Code, + Properties: Properties{ + ReasonString: ErrSessionTakenOver.Reason, + }, + }, + }, + { + Case: TDisconnectShuttingDown, + Desc: "shutting down", + Primary: true, + RawBytes: append([]byte{ + Disconnect << 4, 25, // fixed header + ErrServerShuttingDown.Code, // Reason Code + 23, // Properties Length + 31, 0, 20, // Reason String (31) + }, []byte(ErrServerShuttingDown.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 0, + }, + ReasonCode: ErrServerShuttingDown.Code, + Properties: Properties{ + ReasonString: ErrServerShuttingDown.Reason, + }, + }, + }, + { + Case: TDisconnectMqtt5, + Desc: "mqtt5", + Primary: true, + RawBytes: append([]byte{ + Disconnect << 4, 22, // fixed header + CodeDisconnect.Code, // Reason Code + 20, // Properties Length + 17, 0, 0, 0, 120, // Session Expiry Interval (17) + 31, 0, 12, // Reason String (31) + }, []byte(CodeDisconnect.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 22, + }, + ReasonCode: CodeDisconnect.Code, + Properties: Properties{ + ReasonString: CodeDisconnect.Reason, + SessionExpiryInterval: 120, + SessionExpiryIntervalFlag: true, + }, + }, + }, + { + Case: TDisconnectSecondConnect, + Desc: "second connect packet mqtt5", + RawBytes: append([]byte{ + Disconnect << 4, 46, // fixed header + ErrProtocolViolationSecondConnect.Code, + 44, + 31, 0, 41, // Reason String (31) + }, []byte(ErrProtocolViolationSecondConnect.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 45, + }, + ReasonCode: ErrProtocolViolationSecondConnect.Code, + Properties: Properties{ + ReasonString: ErrProtocolViolationSecondConnect.Reason, + }, + }, + }, + { + Case: TDisconnectZeroNonZeroExpiry, + Desc: "zero non zero expiry", + RawBytes: []byte{ + Disconnect << 4, 2, // fixed header + ErrProtocolViolationZeroNonZeroExpiry.Code, + 0, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 2, + }, + ReasonCode: ErrProtocolViolationZeroNonZeroExpiry.Code, + }, + }, + { + Case: TDisconnectReceiveMaximum, + Desc: "receive maximum mqtt5", + RawBytes: append([]byte{ + Disconnect << 4, 29, // fixed header + ErrReceiveMaximum.Code, + 27, // Properties Length + 31, 0, 24, // Reason String (31) + }, []byte(ErrReceiveMaximum.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 29, + }, + ReasonCode: ErrReceiveMaximum.Code, + Properties: Properties{ + ReasonString: ErrReceiveMaximum.Reason, + }, + }, + }, + { + Case: TDisconnectDropProperties, + Desc: "drop oversize properties partial", + Group: "encode", + RawBytes: []byte{ + Disconnect << 4, 39, // fixed header + CodeDisconnect.Code, + 19, // length + 28, 0, 7, 'm', 'o', 'c', 'h', 'i', '-', '2', // Server Reference (28) + 31, 0, 6, 'r', 'e', 'a', 's', 'o', 'n', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + ActualBytes: []byte{ + Disconnect << 4, 12, // fixed header + CodeDisconnect.Code, + 10, // length + 28, 0, 7, 'm', 'o', 'c', 'h', 'i', '-', '2', // Server Reference (28) + }, + Packet: &Packet{ + Mods: Mods{ + MaxSize: 3, + }, + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 40, + }, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + ReasonString: "reason", + ServerReference: "mochi-2", + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + // fail states + { + Case: TDisconnectMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Disconnect << 4, 48, // fixed header + CodeDisconnect.Code, // Reason Code + 46, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + { + Case: TDisconnectMalReasonCode, + Desc: "malformed reason code", + Group: "decode", + FailFirst: ErrMalformedReasonCode, + RawBytes: []byte{ + Disconnect << 4, 48, // fixed header + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + Auth: { + { + Case: TAuth, + Desc: "auth", + Primary: true, + RawBytes: []byte{ + Auth << 4, 47, + CodeSuccess.Code, // reason code + 45, + 21, 0, 5, 'S', 'H', 'A', '-', '1', // Authentication Method (21) + 22, 0, 9, 'a', 'u', 't', 'h', '-', 'd', 'a', 't', 'a', // Authentication Data (22) + 31, 0, 6, 'r', 'e', 'a', 's', 'o', 'n', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Auth, + Remaining: 47, + }, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + AuthenticationMethod: "SHA-1", + AuthenticationData: []byte("auth-data"), + ReasonString: "reason", + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TAuthMalReasonCode, + Desc: "malformed reason code", + Group: "decode", + FailFirst: ErrMalformedReasonCode, + RawBytes: []byte{ + Auth << 4, 47, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Auth, + }, + ReasonCode: CodeNoMatchingSubscribers.Code, + }, + }, + // fail states + { + Case: TAuthMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Auth << 4, 3, + CodeSuccess.Code, + 12, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + // Validation + { + Case: TAuthInvalidReason, + Desc: "invalid reason code", + Group: "validate", + Expect: ErrProtocolViolationInvalidReason, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Auth, + }, + ReasonCode: CodeNoMatchingSubscribers.Code, + }, + }, + { + Case: TAuthInvalidReason2, + Desc: "invalid reason code", + Group: "validate", + Expect: ErrProtocolViolationInvalidReason, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Auth, + }, + ReasonCode: CodeNoMatchingSubscribers.Code, + }, + }, + }, +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/server.go b/vendor/github.com/mochi-mqtt/server/v2/server.go new file mode 100644 index 000000000..e525b1618 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/server.go @@ -0,0 +1,1679 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +// Package mqtt provides a high performance, fully compliant MQTT v5 broker server with v3.1.1 backward compatibility. +package mqtt + +import ( + "errors" + "fmt" + "math" + "net" + "os" + "runtime" + "sort" + "strconv" + "sync/atomic" + "time" + + "github.com/mochi-mqtt/server/v2/hooks/storage" + "github.com/mochi-mqtt/server/v2/listeners" + "github.com/mochi-mqtt/server/v2/packets" + "github.com/mochi-mqtt/server/v2/system" + + "log/slog" +) + +const ( + Version = "2.4.6" // the current server version. + defaultSysTopicInterval int64 = 1 // the interval between $SYS topic publishes + LocalListener = "local" + InlineClientId = "inline" +) + +var ( + // Deprecated: Use NewDefaultServerCapabilities to avoid data race issue. + DefaultServerCapabilities = NewDefaultServerCapabilities() + + ErrListenerIDExists = errors.New("listener id already exists") // a listener with the same id already exists + ErrConnectionClosed = errors.New("connection not open") // connection is closed + ErrInlineClientNotEnabled = errors.New("please set Options.InlineClient=true to use this feature") // inline client is not enabled by default +) + +// Capabilities indicates the capabilities and features provided by the server. +type Capabilities struct { + MaximumMessageExpiryInterval int64 // maximum message expiry if message expiry is 0 or over + MaximumClientWritesPending int32 // maximum number of pending message writes for a client + MaximumSessionExpiryInterval uint32 // maximum number of seconds to keep disconnected sessions + MaximumPacketSize uint32 // maximum packet size, no limit if 0 + maximumPacketID uint32 // unexported, used for testing only + ReceiveMaximum uint16 // maximum number of concurrent qos messages per client + MaximumInflight uint16 // maximum number of qos > 0 messages can be stored, 0(=8192)-65535 + TopicAliasMaximum uint16 // maximum topic alias value + SharedSubAvailable byte // support of shared subscriptions + MinimumProtocolVersion byte // minimum supported mqtt version + Compatibilities Compatibilities + MaximumQos byte // maximum qos value available to clients + RetainAvailable byte // support of retain messages + WildcardSubAvailable byte // support of wildcard subscriptions + SubIDAvailable byte // support of subscription identifiers +} + +// NewDefaultServerCapabilities defines the default features and capabilities provided by the server. +func NewDefaultServerCapabilities() *Capabilities { + return &Capabilities{ + MaximumMessageExpiryInterval: 60 * 60 * 24, // maximum message expiry if message expiry is 0 or over + MaximumClientWritesPending: 1024 * 8, // maximum number of pending message writes for a client + MaximumSessionExpiryInterval: math.MaxUint32, // maximum number of seconds to keep disconnected sessions + MaximumPacketSize: 0, // no maximum packet size + maximumPacketID: math.MaxUint16, + ReceiveMaximum: 1024, // maximum number of concurrent qos messages per client + MaximumInflight: 1024 * 8, // maximum number of qos > 0 messages can be stored + TopicAliasMaximum: math.MaxUint16, // maximum topic alias value + SharedSubAvailable: 1, // shared subscriptions are available + MinimumProtocolVersion: 3, // minimum supported mqtt version (3.0.0) + MaximumQos: 2, // maximum qos value available to clients + RetainAvailable: 1, // retain messages is available + WildcardSubAvailable: 1, // wildcard subscriptions are available + SubIDAvailable: 1, // subscription identifiers are available + } +} + +// Compatibilities provides flags for using compatibility modes. +type Compatibilities struct { + ObscureNotAuthorized bool // return unspecified errors instead of not authorized + PassiveClientDisconnect bool // don't disconnect the client forcefully after sending disconnect packet (paho - spec violation) + AlwaysReturnResponseInfo bool // always return response info (useful for testing) + RestoreSysInfoOnRestart bool // restore system info from store as if server never stopped + NoInheritedPropertiesOnAck bool // don't allow inherited user properties on ack (paho - spec violation) +} + +// Options contains configurable options for the server. +type Options struct { + // Capabilities defines the server features and behaviour. If you only wish to modify + // several of these values, set them explicitly - e.g. + // server.Options.Capabilities.MaximumClientWritesPending = 16 * 1024 + Capabilities *Capabilities + + // ClientNetWriteBufferSize specifies the size of the client *bufio.Writer write buffer. + ClientNetWriteBufferSize int + + // ClientNetReadBufferSize specifies the size of the client *bufio.Reader read buffer. + ClientNetReadBufferSize int + + // Logger specifies a custom configured implementation of zerolog to override + // the servers default logger configuration. If you wish to change the log level, + // of the default logger, you can do so by setting + // server := mqtt.New(nil) + // level := new(slog.LevelVar) + // server.Slog = slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ + // Level: level, + // })) + // level.Set(slog.LevelDebug) + Logger *slog.Logger + + // SysTopicResendInterval specifies the interval between $SYS topic updates in seconds. + SysTopicResendInterval int64 + + // Enable Inline client to allow direct subscribing and publishing from the parent codebase, + // with negligible performance difference (disabled by default to prevent confusion in statistics). + InlineClient bool +} + +// Server is an MQTT broker server. It should be created with server.New() +// in order to ensure all the internal fields are correctly populated. +type Server struct { + Options *Options // configurable server options + Listeners *listeners.Listeners // listeners are network interfaces which listen for new connections + Clients *Clients // clients known to the broker + Topics *TopicsIndex // an index of topic filter subscriptions and retained messages + Info *system.Info // values about the server commonly known as $SYS topics + loop *loop // loop contains tickers for the system event loop + done chan bool // indicate that the server is ending + Log *slog.Logger // minimal no-alloc logger + hooks *Hooks // hooks contains hooks for extra functionality such as auth and persistent storage + inlineClient *Client // inlineClient is a special client used for inline subscriptions and inline Publish +} + +// loop contains interval tickers for the system events loop. +type loop struct { + sysTopics *time.Ticker // interval ticker for sending updating $SYS topics + clientExpiry *time.Ticker // interval ticker for cleaning expired clients + inflightExpiry *time.Ticker // interval ticker for cleaning up expired inflight messages + retainedExpiry *time.Ticker // interval ticker for cleaning retained messages + willDelaySend *time.Ticker // interval ticker for sending Will Messages with a delay + willDelayed *packets.Packets // activate LWT packets which will be sent after a delay +} + +// ops contains server values which can be propagated to other structs. +type ops struct { + options *Options // a pointer to the server options and capabilities, for referencing in clients + info *system.Info // pointers to server system info + hooks *Hooks // pointer to the server hooks + log *slog.Logger // a structured logger for the client +} + +// New returns a new instance of mochi mqtt broker. Optional parameters +// can be specified to override some default settings (see Options). +func New(opts *Options) *Server { + if opts == nil { + opts = new(Options) + } + + opts.ensureDefaults() + + s := &Server{ + done: make(chan bool), + Clients: NewClients(), + Topics: NewTopicsIndex(), + Listeners: listeners.New(), + loop: &loop{ + sysTopics: time.NewTicker(time.Second * time.Duration(opts.SysTopicResendInterval)), + clientExpiry: time.NewTicker(time.Second), + inflightExpiry: time.NewTicker(time.Second), + retainedExpiry: time.NewTicker(time.Second), + willDelaySend: time.NewTicker(time.Second), + willDelayed: packets.NewPackets(), + }, + Options: opts, + Info: &system.Info{ + Version: Version, + Started: time.Now().Unix(), + }, + Log: opts.Logger, + hooks: &Hooks{ + Log: opts.Logger, + }, + } + + if s.Options.InlineClient { + s.inlineClient = s.NewClient(nil, LocalListener, InlineClientId, true) + s.Clients.Add(s.inlineClient) + } + + return s +} + +// ensureDefaults ensures that the server starts with sane default values, if none are provided. +func (o *Options) ensureDefaults() { + if o.Capabilities == nil { + o.Capabilities = NewDefaultServerCapabilities() + } + + o.Capabilities.maximumPacketID = math.MaxUint16 // spec maximum is 65535 + + if o.Capabilities.MaximumInflight == 0 { + o.Capabilities.MaximumInflight = 1024 * 8 + } + + if o.SysTopicResendInterval == 0 { + o.SysTopicResendInterval = defaultSysTopicInterval + } + + if o.ClientNetWriteBufferSize == 0 { + o.ClientNetWriteBufferSize = 1024 * 2 + } + + if o.ClientNetReadBufferSize == 0 { + o.ClientNetReadBufferSize = 1024 * 2 + } + + if o.Logger == nil { + log := slog.New(slog.NewTextHandler(os.Stdout, nil)) + o.Logger = log + } +} + +// NewClient returns a new Client instance, populated with all the required values and +// references to be used with the server. If you are using this client to directly publish +// messages from the embedding application, set the inline flag to true to bypass ACL and +// topic validation checks. +func (s *Server) NewClient(c net.Conn, listener string, id string, inline bool) *Client { + cl := newClient(c, &ops{ // [MQTT-3.1.2-6] implicit + options: s.Options, + info: s.Info, + hooks: s.hooks, + log: s.Log, + }) + + cl.ID = id + cl.Net.Listener = listener + + if inline { // inline clients bypass acl and some validity checks. + cl.Net.Inline = true + // By default, we don't want to restrict developer publishes, + // but if you do, reset this after creating inline client. + cl.State.Inflight.ResetReceiveQuota(math.MaxInt32) + } + + return cl +} + +// AddHook attaches a new Hook to the server. Ideally, this should be called +// before the server is started with s.Serve(). +func (s *Server) AddHook(hook Hook, config any) error { + nl := s.Log.With("hook", hook.ID()) + hook.SetOpts(nl, &HookOptions{ + Capabilities: s.Options.Capabilities, + }) + + s.Log.Info("added hook", "hook", hook.ID()) + return s.hooks.Add(hook, config) +} + +// AddListener adds a new network listener to the server, for receiving incoming client connections. +func (s *Server) AddListener(l listeners.Listener) error { + if _, ok := s.Listeners.Get(l.ID()); ok { + return ErrListenerIDExists + } + + nl := s.Log.With(slog.String("listener", l.ID())) + err := l.Init(nl) + if err != nil { + return err + } + + s.Listeners.Add(l) + + s.Log.Info("attached listener", "id", l.ID(), "protocol", l.Protocol(), "address", l.Address()) + return nil +} + +// Serve starts the event loops responsible for establishing client connections +// on all attached listeners, publishing the system topics, and starting all hooks. +func (s *Server) Serve() error { + s.Log.Info("mochi mqtt starting", "version", Version) + defer s.Log.Info("mochi mqtt server started") + + if s.hooks.Provides( + StoredClients, + StoredInflightMessages, + StoredRetainedMessages, + StoredSubscriptions, + StoredSysInfo, + ) { + err := s.readStore() + if err != nil { + return err + } + } + + go s.eventLoop() // spin up event loop for issuing $SYS values and closing server. + s.Listeners.ServeAll(s.EstablishConnection) // start listening on all listeners. + s.publishSysTopics() // begin publishing $SYS system values. + s.hooks.OnStarted() + + return nil +} + +// eventLoop loops forever, running various server housekeeping methods at different intervals. +func (s *Server) eventLoop() { + s.Log.Debug("system event loop started") + defer s.Log.Debug("system event loop halted") + + for { + select { + case <-s.done: + s.loop.sysTopics.Stop() + return + case <-s.loop.sysTopics.C: + s.publishSysTopics() + case <-s.loop.clientExpiry.C: + s.clearExpiredClients(time.Now().Unix()) + case <-s.loop.retainedExpiry.C: + s.clearExpiredRetainedMessages(time.Now().Unix()) + case <-s.loop.willDelaySend.C: + s.sendDelayedLWT(time.Now().Unix()) + case <-s.loop.inflightExpiry.C: + s.clearExpiredInflights(time.Now().Unix()) + } + } +} + +// EstablishConnection establishes a new client when a listener accepts a new connection. +func (s *Server) EstablishConnection(listener string, c net.Conn) error { + cl := s.NewClient(c, listener, "", false) + return s.attachClient(cl, listener) +} + +// attachClient validates an incoming client connection and if viable, attaches the client +// to the server, performs session housekeeping, and reads incoming packets. +func (s *Server) attachClient(cl *Client, listener string) error { + defer s.Listeners.ClientsWg.Done() + s.Listeners.ClientsWg.Add(1) + + go cl.WriteLoop() + defer cl.Stop(nil) + + pk, err := s.readConnectionPacket(cl) + if err != nil { + return fmt.Errorf("read connection: %w", err) + } + + cl.ParseConnect(listener, pk) + code := s.validateConnect(cl, pk) // [MQTT-3.1.4-1] [MQTT-3.1.4-2] + if code != packets.CodeSuccess { + if err := s.SendConnack(cl, code, false, nil); err != nil { + return fmt.Errorf("invalid connection send ack: %w", err) + } + return code // [MQTT-3.2.2-7] [MQTT-3.1.4-6] + } + + err = s.hooks.OnConnect(cl, pk) + if err != nil { + return err + } + + cl.refreshDeadline(cl.State.Keepalive) + if !s.hooks.OnConnectAuthenticate(cl, pk) { // [MQTT-3.1.4-2] + err := s.SendConnack(cl, packets.ErrBadUsernameOrPassword, false, nil) + if err != nil { + return fmt.Errorf("invalid connection send ack: %w", err) + } + + return packets.ErrBadUsernameOrPassword + } + + atomic.AddInt64(&s.Info.ClientsConnected, 1) + defer atomic.AddInt64(&s.Info.ClientsConnected, -1) + + s.hooks.OnSessionEstablish(cl, pk) + + sessionPresent := s.inheritClientSession(pk, cl) + s.Clients.Add(cl) // [MQTT-4.1.0-1] + + err = s.SendConnack(cl, code, sessionPresent, nil) // [MQTT-3.1.4-5] [MQTT-3.2.0-1] [MQTT-3.2.0-2] &[MQTT-3.14.0-1] + if err != nil { + return fmt.Errorf("ack connection packet: %w", err) + } + + s.loop.willDelayed.Delete(cl.ID) // [MQTT-3.1.3-9] + + if sessionPresent { + err = cl.ResendInflightMessages(true) + if err != nil { + return fmt.Errorf("resend inflight: %w", err) + } + } + + s.hooks.OnSessionEstablished(cl, pk) + + err = cl.Read(s.receivePacket) + if err != nil { + s.sendLWT(cl) + cl.Stop(err) + } else { + cl.Properties.Will = Will{} // [MQTT-3.14.4-3] [MQTT-3.1.2-10] + } + s.Log.Debug("client disconnected", "error", err, "client", cl.ID, "remote", cl.Net.Remote, "listener", listener) + + expire := (cl.Properties.ProtocolVersion == 5 && cl.Properties.Props.SessionExpiryInterval == 0) || (cl.Properties.ProtocolVersion < 5 && cl.Properties.Clean) + s.hooks.OnDisconnect(cl, err, expire) + + if expire && atomic.LoadUint32(&cl.State.isTakenOver) == 0 { + cl.ClearInflights() + s.UnsubscribeClient(cl) + s.Clients.Delete(cl.ID) // [MQTT-4.1.0-2] ![MQTT-3.1.2-23] + } + + return err +} + +// readConnectionPacket reads the first incoming header for a connection, and if +// acceptable, returns the valid connection packet. +func (s *Server) readConnectionPacket(cl *Client) (pk packets.Packet, err error) { + fh := new(packets.FixedHeader) + err = cl.ReadFixedHeader(fh) + if err != nil { + return + } + + if fh.Type != packets.Connect { + return pk, packets.ErrProtocolViolationRequireFirstConnect // [MQTT-3.1.0-1] + } + + pk, err = cl.ReadPacket(fh) + if err != nil { + return + } + + return +} + +// receivePacket processes an incoming packet for a client, and issues a disconnect to the client +// if an error has occurred (if mqtt v5). +func (s *Server) receivePacket(cl *Client, pk packets.Packet) error { + err := s.processPacket(cl, pk) + if err != nil { + if code, ok := err.(packets.Code); ok && + cl.Properties.ProtocolVersion == 5 && + code.Code >= packets.ErrUnspecifiedError.Code { + _ = s.DisconnectClient(cl, code) + } + + s.Log.Warn("error processing packet", "error", err, "client", cl.ID, "listener", cl.Net.Listener, "pk", pk) + + return err + } + + return nil +} + +// validateConnect validates that a connect packet is compliant. +func (s *Server) validateConnect(cl *Client, pk packets.Packet) packets.Code { + code := pk.ConnectValidate() // [MQTT-3.1.4-1] [MQTT-3.1.4-2] + if code != packets.CodeSuccess { + return code + } + + if cl.Properties.ProtocolVersion < 5 && !pk.Connect.Clean && pk.Connect.ClientIdentifier == "" { + return packets.ErrUnspecifiedError + } + + if cl.Properties.ProtocolVersion < s.Options.Capabilities.MinimumProtocolVersion { + return packets.ErrUnsupportedProtocolVersion // [MQTT-3.1.2-2] + } else if cl.Properties.Will.Qos > s.Options.Capabilities.MaximumQos { + return packets.ErrQosNotSupported // [MQTT-3.2.2-12] + } else if cl.Properties.Will.Retain && s.Options.Capabilities.RetainAvailable == 0x00 { + return packets.ErrRetainNotSupported // [MQTT-3.2.2-13] + } + + return code +} + +// inheritClientSession inherits the state of an existing client sharing the same +// connection ID. If clean is true, the state of any previously existing client +// session is abandoned. +func (s *Server) inheritClientSession(pk packets.Packet, cl *Client) bool { + if existing, ok := s.Clients.Get(pk.Connect.ClientIdentifier); ok { + _ = s.DisconnectClient(existing, packets.ErrSessionTakenOver) // [MQTT-3.1.4-3] + if pk.Connect.Clean || (existing.Properties.Clean && existing.Properties.ProtocolVersion < 5) { // [MQTT-3.1.2-4] [MQTT-3.1.4-4] + s.UnsubscribeClient(existing) + existing.ClearInflights() + atomic.StoreUint32(&existing.State.isTakenOver, 1) // only set isTakenOver after unsubscribe has occurred + return false // [MQTT-3.2.2-3] + } + + atomic.StoreUint32(&existing.State.isTakenOver, 1) + if existing.State.Inflight.Len() > 0 { + cl.State.Inflight = existing.State.Inflight.Clone() // [MQTT-3.1.2-5] + if cl.State.Inflight.maximumReceiveQuota == 0 && cl.ops.options.Capabilities.ReceiveMaximum != 0 { + cl.State.Inflight.ResetReceiveQuota(int32(cl.ops.options.Capabilities.ReceiveMaximum)) // server receive max per client + cl.State.Inflight.ResetSendQuota(int32(cl.Properties.Props.ReceiveMaximum)) // client receive max + } + } + + for _, sub := range existing.State.Subscriptions.GetAll() { + existed := !s.Topics.Subscribe(cl.ID, sub) // [MQTT-3.8.4-3] + if !existed { + atomic.AddInt64(&s.Info.Subscriptions, 1) + } + cl.State.Subscriptions.Add(sub.Filter, sub) + } + + // Clean the state of the existing client to prevent sequential take-overs + // from increasing memory usage by inflights + subs * client-id. + s.UnsubscribeClient(existing) + existing.ClearInflights() + + s.Log.Debug("session taken over", "client", cl.ID, "old_remote", existing.Net.Remote, "new_remote", cl.Net.Remote) + + return true // [MQTT-3.2.2-3] + } + + if atomic.LoadInt64(&s.Info.ClientsConnected) > atomic.LoadInt64(&s.Info.ClientsMaximum) { + atomic.AddInt64(&s.Info.ClientsMaximum, 1) + } + + return false // [MQTT-3.2.2-2] +} + +// SendConnack returns a Connack packet to a client. +func (s *Server) SendConnack(cl *Client, reason packets.Code, present bool, properties *packets.Properties) error { + if properties == nil { + properties = &packets.Properties{ + ReceiveMaximum: s.Options.Capabilities.ReceiveMaximum, + } + } + + properties.ReceiveMaximum = s.Options.Capabilities.ReceiveMaximum // 3.2.2.3.3 Receive Maximum + if cl.State.ServerKeepalive { // You can set this dynamically using the OnConnect hook. + properties.ServerKeepAlive = cl.State.Keepalive // [MQTT-3.1.2-21] + properties.ServerKeepAliveFlag = true + } + + if reason.Code >= packets.ErrUnspecifiedError.Code { + if cl.Properties.ProtocolVersion < 5 { + if v3reason, ok := packets.V5CodesToV3[reason]; ok { // NB v3 3.2.2.3 Connack return codes + reason = v3reason + } + } + + properties.ReasonString = reason.Reason + ack := packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Connack, + }, + SessionPresent: false, // [MQTT-3.2.2-6] + ReasonCode: reason.Code, // [MQTT-3.2.2-8] + Properties: *properties, + } + return cl.WritePacket(ack) + } + + if s.Options.Capabilities.MaximumQos < 2 { + properties.MaximumQos = s.Options.Capabilities.MaximumQos // [MQTT-3.2.2-9] + properties.MaximumQosFlag = true + } + + if cl.Properties.Props.AssignedClientID != "" { + properties.AssignedClientID = cl.Properties.Props.AssignedClientID // [MQTT-3.1.3-7] [MQTT-3.2.2-16] + } + + if cl.Properties.Props.SessionExpiryInterval > s.Options.Capabilities.MaximumSessionExpiryInterval { + properties.SessionExpiryInterval = s.Options.Capabilities.MaximumSessionExpiryInterval + properties.SessionExpiryIntervalFlag = true + cl.Properties.Props.SessionExpiryInterval = properties.SessionExpiryInterval + cl.Properties.Props.SessionExpiryIntervalFlag = true + } + + ack := packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Connack, + }, + SessionPresent: present, + ReasonCode: reason.Code, // [MQTT-3.2.2-8] + Properties: *properties, + } + return cl.WritePacket(ack) +} + +// processPacket processes an inbound packet for a client. Since the method is +// typically called as a goroutine, errors are primarily for test checking purposes. +func (s *Server) processPacket(cl *Client, pk packets.Packet) error { + var err error + + switch pk.FixedHeader.Type { + case packets.Connect: + err = s.processConnect(cl, pk) + case packets.Disconnect: + err = s.processDisconnect(cl, pk) + case packets.Pingreq: + err = s.processPingreq(cl, pk) + case packets.Publish: + code := pk.PublishValidate(s.Options.Capabilities.TopicAliasMaximum) + if code != packets.CodeSuccess { + return code + } + err = s.processPublish(cl, pk) + case packets.Puback: + err = s.processPuback(cl, pk) + case packets.Pubrec: + err = s.processPubrec(cl, pk) + case packets.Pubrel: + err = s.processPubrel(cl, pk) + case packets.Pubcomp: + err = s.processPubcomp(cl, pk) + case packets.Subscribe: + code := pk.SubscribeValidate() + if code != packets.CodeSuccess { + return code + } + err = s.processSubscribe(cl, pk) + case packets.Unsubscribe: + code := pk.UnsubscribeValidate() + if code != packets.CodeSuccess { + return code + } + err = s.processUnsubscribe(cl, pk) + case packets.Auth: + code := pk.AuthValidate() + if code != packets.CodeSuccess { + return code + } + err = s.processAuth(cl, pk) + default: + return fmt.Errorf("no valid packet available; %v", pk.FixedHeader.Type) + } + + s.hooks.OnPacketProcessed(cl, pk, err) + if err != nil { + return err + } + + if cl.State.Inflight.Len() > 0 && atomic.LoadInt32(&cl.State.Inflight.sendQuota) > 0 { + next, ok := cl.State.Inflight.NextImmediate() + if ok { + _ = cl.WritePacket(next) + if ok := cl.State.Inflight.Delete(next.PacketID); ok { + atomic.AddInt64(&s.Info.Inflight, -1) + } + cl.State.Inflight.DecreaseSendQuota() + } + } + + return nil +} + +// processConnect processes a Connect packet. The packet cannot be used to establish +// a new connection on an existing connection. See EstablishConnection instead. +func (s *Server) processConnect(cl *Client, _ packets.Packet) error { + s.sendLWT(cl) + return packets.ErrProtocolViolationSecondConnect // [MQTT-3.1.0-2] +} + +// processPingreq processes a Pingreq packet. +func (s *Server) processPingreq(cl *Client, _ packets.Packet) error { + return cl.WritePacket(packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Pingresp, // [MQTT-3.12.4-1] + }, + }) +} + +// Publish publishes a publish packet into the broker as if it were sent from the specified client. +// This is a convenience function which wraps InjectPacket. As such, this method can publish packets +// to any topic (including $SYS) and bypass ACL checks. The qos byte is used for limiting the +// outbound qos (mqtt v5) rather than issuing to the broker (we assume qos 2 complete). +func (s *Server) Publish(topic string, payload []byte, retain bool, qos byte) error { + if !s.Options.InlineClient { + return ErrInlineClientNotEnabled + } + + return s.InjectPacket(s.inlineClient, packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Publish, + Qos: qos, + Retain: retain, + }, + TopicName: topic, + Payload: payload, + PacketID: uint16(qos), // we never process the inbound qos, but we need a packet id for validity checks. + }) +} + +// Subscribe adds an inline subscription for the specified topic filter and subscription identifier +// with the provided handler function. +func (s *Server) Subscribe(filter string, subscriptionId int, handler InlineSubFn) error { + if !s.Options.InlineClient { + return ErrInlineClientNotEnabled + } + + if handler == nil { + return packets.ErrInlineSubscriptionHandlerInvalid + } + + if !IsValidFilter(filter, false) { + return packets.ErrTopicFilterInvalid + } + + subscription := packets.Subscription{ + Identifier: subscriptionId, + Filter: filter, + } + + pk := s.hooks.OnSubscribe(s.inlineClient, packets.Packet{ // subscribe like a normal client. + Origin: s.inlineClient.ID, + FixedHeader: packets.FixedHeader{Type: packets.Subscribe}, + Filters: packets.Subscriptions{subscription}, + }) + + inlineSubscription := InlineSubscription{ + Subscription: subscription, + Handler: handler, + } + + s.Topics.InlineSubscribe(inlineSubscription) + s.hooks.OnSubscribed(s.inlineClient, pk, []byte{packets.CodeSuccess.Code}) + + // Handling retained messages. + for _, pkv := range s.Topics.Messages(filter) { // [MQTT-3.8.4-4] + handler(s.inlineClient, inlineSubscription.Subscription, pkv) + } + return nil +} + +// Unsubscribe removes an inline subscription for the specified subscription and topic filter. +// It allows you to unsubscribe a specific subscription from the internal subscription +// associated with the given topic filter. +func (s *Server) Unsubscribe(filter string, subscriptionId int) error { + if !s.Options.InlineClient { + return ErrInlineClientNotEnabled + } + + if !IsValidFilter(filter, false) { + return packets.ErrTopicFilterInvalid + } + + pk := s.hooks.OnUnsubscribe(s.inlineClient, packets.Packet{ + Origin: s.inlineClient.ID, + FixedHeader: packets.FixedHeader{Type: packets.Unsubscribe}, + Filters: packets.Subscriptions{ + { + Identifier: subscriptionId, + Filter: filter, + }, + }, + }) + + s.Topics.InlineUnsubscribe(subscriptionId, filter) + s.hooks.OnUnsubscribed(s.inlineClient, pk) + return nil +} + +// InjectPacket injects a packet into the broker as if it were sent from the specified client. +// InlineClients using this method can publish packets to any topic (including $SYS) and bypass ACL checks. +func (s *Server) InjectPacket(cl *Client, pk packets.Packet) error { + pk.ProtocolVersion = cl.Properties.ProtocolVersion + + err := s.processPacket(cl, pk) + if err != nil { + return err + } + + atomic.AddInt64(&cl.ops.info.PacketsReceived, 1) + if pk.FixedHeader.Type == packets.Publish { + atomic.AddInt64(&cl.ops.info.MessagesReceived, 1) + } + + return nil +} + +// processPublish processes a Publish packet. +func (s *Server) processPublish(cl *Client, pk packets.Packet) error { + if !cl.Net.Inline && !IsValidFilter(pk.TopicName, true) { + return nil + } + + if atomic.LoadInt32(&cl.State.Inflight.receiveQuota) == 0 { + return s.DisconnectClient(cl, packets.ErrReceiveMaximum) // ~[MQTT-3.3.4-7] ~[MQTT-3.3.4-8] + } + + if !cl.Net.Inline && !s.hooks.OnACLCheck(cl, pk.TopicName, true) { + if pk.FixedHeader.Qos == 0 { + return nil + } + + if cl.Properties.ProtocolVersion != 5 { + return s.DisconnectClient(cl, packets.ErrNotAuthorized) + } + + ackType := packets.Puback + if pk.FixedHeader.Qos == 2 { + ackType = packets.Pubrec + } + + ack := s.buildAck(pk.PacketID, ackType, 0, pk.Properties, packets.ErrNotAuthorized) + return cl.WritePacket(ack) + } + + pk.Origin = cl.ID + pk.Created = time.Now().Unix() + + if !cl.Net.Inline { + if pki, ok := cl.State.Inflight.Get(pk.PacketID); ok { + if pki.FixedHeader.Type == packets.Pubrec { // [MQTT-4.3.3-10] + ack := s.buildAck(pk.PacketID, packets.Pubrec, 0, pk.Properties, packets.ErrPacketIdentifierInUse) + return cl.WritePacket(ack) + } + if ok := cl.State.Inflight.Delete(pk.PacketID); ok { // [MQTT-4.3.2-5] + atomic.AddInt64(&s.Info.Inflight, -1) + } + } + } + + if pk.Properties.TopicAliasFlag && pk.Properties.TopicAlias > 0 { // [MQTT-3.3.2-11] + pk.TopicName = cl.State.TopicAliases.Inbound.Set(pk.Properties.TopicAlias, pk.TopicName) + } + + if pk.FixedHeader.Qos > s.Options.Capabilities.MaximumQos { + pk.FixedHeader.Qos = s.Options.Capabilities.MaximumQos // [MQTT-3.2.2-9] Reduce qos based on server max qos capability + } + + pkx, err := s.hooks.OnPublish(cl, pk) + if err == nil { + pk = pkx + } else if errors.Is(err, packets.ErrRejectPacket) { + return nil + } else if errors.Is(err, packets.CodeSuccessIgnore) { + pk.Ignore = true + } else if cl.Properties.ProtocolVersion == 5 && pk.FixedHeader.Qos > 0 && errors.As(err, new(packets.Code)) { + err = cl.WritePacket(s.buildAck(pk.PacketID, packets.Puback, 0, pk.Properties, err.(packets.Code))) + if err != nil { + return err + } + return nil + } + + if pk.FixedHeader.Retain { // [MQTT-3.3.1-5] ![MQTT-3.3.1-8] + s.retainMessage(cl, pk) + } + + // If it's inlineClient, it can't handle PUBREC and PUBREL. + // When it publishes a package with a qos > 0, the server treats + // the package as qos=0, and the client receives it as qos=1 or 2. + if pk.FixedHeader.Qos == 0 || cl.Net.Inline { + s.publishToSubscribers(pk) + s.hooks.OnPublished(cl, pk) + return nil + } + + cl.State.Inflight.DecreaseReceiveQuota() + ack := s.buildAck(pk.PacketID, packets.Puback, 0, pk.Properties, packets.QosCodes[pk.FixedHeader.Qos]) // [MQTT-4.3.2-4] + if pk.FixedHeader.Qos == 2 { + ack = s.buildAck(pk.PacketID, packets.Pubrec, 0, pk.Properties, packets.CodeSuccess) // [MQTT-3.3.4-1] [MQTT-4.3.3-8] + } + + if ok := cl.State.Inflight.Set(ack); ok { + atomic.AddInt64(&s.Info.Inflight, 1) + s.hooks.OnQosPublish(cl, ack, ack.Created, 0) + } + + err = cl.WritePacket(ack) + if err != nil { + return err + } + + if pk.FixedHeader.Qos == 1 { + if ok := cl.State.Inflight.Delete(ack.PacketID); ok { + atomic.AddInt64(&s.Info.Inflight, -1) + } + cl.State.Inflight.IncreaseReceiveQuota() + s.hooks.OnQosComplete(cl, ack) + } + + s.publishToSubscribers(pk) + s.hooks.OnPublished(cl, pk) + + return nil +} + +// retainMessage adds a message to a topic, and if a persistent store is provided, +// adds the message to the store to be reloaded if necessary. +func (s *Server) retainMessage(cl *Client, pk packets.Packet) { + if s.Options.Capabilities.RetainAvailable == 0 || pk.Ignore { + return + } + + out := pk.Copy(false) + r := s.Topics.RetainMessage(out) + s.hooks.OnRetainMessage(cl, pk, r) + atomic.StoreInt64(&s.Info.Retained, int64(s.Topics.Retained.Len())) +} + +// publishToSubscribers publishes a publish packet to all subscribers with matching topic filters. +func (s *Server) publishToSubscribers(pk packets.Packet) { + if pk.Ignore { + return + } + + if pk.Created == 0 { + pk.Created = time.Now().Unix() + } + + pk.Expiry = pk.Created + s.Options.Capabilities.MaximumMessageExpiryInterval + if pk.Properties.MessageExpiryInterval > 0 { + pk.Expiry = pk.Created + int64(pk.Properties.MessageExpiryInterval) + } + + subscribers := s.Topics.Subscribers(pk.TopicName) + if len(subscribers.Shared) > 0 { + subscribers = s.hooks.OnSelectSubscribers(subscribers, pk) + if len(subscribers.SharedSelected) == 0 { + subscribers.SelectShared() + } + subscribers.MergeSharedSelected() + } + + for _, inlineSubscription := range subscribers.InlineSubscriptions { + inlineSubscription.Handler(s.inlineClient, inlineSubscription.Subscription, pk) + } + + for id, subs := range subscribers.Subscriptions { + if cl, ok := s.Clients.Get(id); ok { + _, err := s.publishToClient(cl, subs, pk) + if err != nil { + s.Log.Debug("failed publishing packet", "error", err, "client", cl.ID, "packet", pk) + } + } + } +} + +func (s *Server) publishToClient(cl *Client, sub packets.Subscription, pk packets.Packet) (packets.Packet, error) { + if sub.NoLocal && pk.Origin == cl.ID { + return pk, nil // [MQTT-3.8.3-3] + } + + out := pk.Copy(false) + if !s.hooks.OnACLCheck(cl, pk.TopicName, false) { + return out, packets.ErrNotAuthorized + } + if !sub.FwdRetainedFlag && ((cl.Properties.ProtocolVersion == 5 && !sub.RetainAsPublished) || cl.Properties.ProtocolVersion < 5) { // ![MQTT-3.3.1-13] [v3 MQTT-3.3.1-9] + out.FixedHeader.Retain = false // [MQTT-3.3.1-12] + } + + if len(sub.Identifiers) > 0 { // [MQTT-3.3.4-3] + out.Properties.SubscriptionIdentifier = []int{} + for _, id := range sub.Identifiers { + out.Properties.SubscriptionIdentifier = append(out.Properties.SubscriptionIdentifier, id) // [MQTT-3.3.4-4] ![MQTT-3.3.4-5] + } + sort.Ints(out.Properties.SubscriptionIdentifier) + } + + if out.FixedHeader.Qos > sub.Qos { + out.FixedHeader.Qos = sub.Qos + } + + if out.FixedHeader.Qos > s.Options.Capabilities.MaximumQos { + out.FixedHeader.Qos = s.Options.Capabilities.MaximumQos // [MQTT-3.2.2-9] + } + + if cl.Properties.Props.TopicAliasMaximum > 0 { + var aliasExists bool + out.Properties.TopicAlias, aliasExists = cl.State.TopicAliases.Outbound.Set(pk.TopicName) + if out.Properties.TopicAlias > 0 { + out.Properties.TopicAliasFlag = true + if aliasExists { + out.TopicName = "" + } + } + } + + if out.FixedHeader.Qos > 0 { + if cl.State.Inflight.Len() >= int(s.Options.Capabilities.MaximumInflight) { + // add hook? + atomic.AddInt64(&s.Info.InflightDropped, 1) + s.Log.Warn("client store quota reached", "client", cl.ID, "listener", cl.Net.Listener) + return out, packets.ErrQuotaExceeded + } + + i, err := cl.NextPacketID() // [MQTT-4.3.2-1] [MQTT-4.3.3-1] + if err != nil { + s.hooks.OnPacketIDExhausted(cl, pk) + atomic.AddInt64(&s.Info.InflightDropped, 1) + s.Log.Warn("packet ids exhausted", "error", err, "client", cl.ID, "listener", cl.Net.Listener) + return out, packets.ErrQuotaExceeded + } + + out.PacketID = uint16(i) // [MQTT-2.2.1-4] + sentQuota := atomic.LoadInt32(&cl.State.Inflight.sendQuota) + + if ok := cl.State.Inflight.Set(out); ok { // [MQTT-4.3.2-3] [MQTT-4.3.3-3] + atomic.AddInt64(&s.Info.Inflight, 1) + s.hooks.OnQosPublish(cl, out, out.Created, 0) + cl.State.Inflight.DecreaseSendQuota() + } + + if sentQuota == 0 && atomic.LoadInt32(&cl.State.Inflight.maximumSendQuota) > 0 { + out.Expiry = -1 + cl.State.Inflight.Set(out) + return out, nil + } + } + + if cl.Net.Conn == nil || cl.Closed() { + return out, packets.CodeDisconnect + } + + select { + case cl.State.outbound <- &out: + atomic.AddInt32(&cl.State.outboundQty, 1) + default: + atomic.AddInt64(&s.Info.MessagesDropped, 1) + cl.ops.hooks.OnPublishDropped(cl, pk) + if out.FixedHeader.Qos > 0 { + cl.State.Inflight.Delete(out.PacketID) // packet was dropped due to irregular circumstances, so rollback inflight. + cl.State.Inflight.IncreaseSendQuota() + } + return out, packets.ErrPendingClientWritesExceeded + } + + return out, nil +} + +func (s *Server) publishRetainedToClient(cl *Client, sub packets.Subscription, existed bool) { + if IsSharedFilter(sub.Filter) { + return // 4.8.2 Non-normative - Shared Subscriptions - No Retained Messages are sent to the Session when it first subscribes. + } + + if sub.RetainHandling == 1 && existed || sub.RetainHandling == 2 { // [MQTT-3.3.1-10] [MQTT-3.3.1-11] + return + } + + sub.FwdRetainedFlag = true + for _, pkv := range s.Topics.Messages(sub.Filter) { // [MQTT-3.8.4-4] + _, err := s.publishToClient(cl, sub, pkv) + if err != nil { + s.Log.Debug("failed to publish retained message", "error", err, "client", cl.ID, "listener", cl.Net.Listener, "packet", pkv) + continue + } + s.hooks.OnRetainPublished(cl, pkv) + } +} + +// buildAck builds a standardised ack message for Puback, Pubrec, Pubrel, Pubcomp packets. +func (s *Server) buildAck(packetID uint16, pkt, qos byte, properties packets.Properties, reason packets.Code) packets.Packet { + if s.Options.Capabilities.Compatibilities.NoInheritedPropertiesOnAck { + properties = packets.Properties{} + } + if reason.Code >= packets.ErrUnspecifiedError.Code { + properties.ReasonString = reason.Reason + } + + pk := packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: pkt, + Qos: qos, + }, + PacketID: packetID, // [MQTT-2.2.1-5] + ReasonCode: reason.Code, // [MQTT-3.4.2-1] + Properties: properties, + Created: time.Now().Unix(), + Expiry: time.Now().Unix() + s.Options.Capabilities.MaximumMessageExpiryInterval, + } + + return pk +} + +// processPuback processes a Puback packet, denoting completion of a QOS 1 packet sent from the server. +func (s *Server) processPuback(cl *Client, pk packets.Packet) error { + if _, ok := cl.State.Inflight.Get(pk.PacketID); !ok { + return nil // omit, but would be packets.ErrPacketIdentifierNotFound + } + + if ok := cl.State.Inflight.Delete(pk.PacketID); ok { // [MQTT-4.3.2-5] + cl.State.Inflight.IncreaseSendQuota() + atomic.AddInt64(&s.Info.Inflight, -1) + s.hooks.OnQosComplete(cl, pk) + } + + return nil +} + +// processPubrec processes a Pubrec packet, denoting receipt of a QOS 2 packet sent from the server. +func (s *Server) processPubrec(cl *Client, pk packets.Packet) error { + if _, ok := cl.State.Inflight.Get(pk.PacketID); !ok { // [MQTT-4.3.3-7] [MQTT-4.3.3-13] + return cl.WritePacket(s.buildAck(pk.PacketID, packets.Pubrel, 1, pk.Properties, packets.ErrPacketIdentifierNotFound)) + } + + if pk.ReasonCode >= packets.ErrUnspecifiedError.Code || !pk.ReasonCodeValid() { // [MQTT-4.3.3-4] + if ok := cl.State.Inflight.Delete(pk.PacketID); ok { + atomic.AddInt64(&s.Info.Inflight, -1) + } + cl.ops.hooks.OnQosDropped(cl, pk) + return nil // as per MQTT5 Section 4.13.2 paragraph 2 + } + + ack := s.buildAck(pk.PacketID, packets.Pubrel, 1, pk.Properties, packets.CodeSuccess) // [MQTT-4.3.3-4] ![MQTT-4.3.3-6] + cl.State.Inflight.DecreaseReceiveQuota() // -1 RECV QUOTA + cl.State.Inflight.Set(ack) // [MQTT-4.3.3-5] + return cl.WritePacket(ack) +} + +// processPubrel processes a Pubrel packet, denoting completion of a QOS 2 packet sent from the client. +func (s *Server) processPubrel(cl *Client, pk packets.Packet) error { + if _, ok := cl.State.Inflight.Get(pk.PacketID); !ok { // [MQTT-4.3.3-7] [MQTT-4.3.3-13] + return cl.WritePacket(s.buildAck(pk.PacketID, packets.Pubcomp, 0, pk.Properties, packets.ErrPacketIdentifierNotFound)) + } + + if pk.ReasonCode >= packets.ErrUnspecifiedError.Code || !pk.ReasonCodeValid() { // [MQTT-4.3.3-9] + if ok := cl.State.Inflight.Delete(pk.PacketID); ok { + atomic.AddInt64(&s.Info.Inflight, -1) + } + cl.ops.hooks.OnQosDropped(cl, pk) + return nil + } + + ack := s.buildAck(pk.PacketID, packets.Pubcomp, 0, pk.Properties, packets.CodeSuccess) // [MQTT-4.3.3-11] + cl.State.Inflight.Set(ack) + + err := cl.WritePacket(ack) + if err != nil { + return err + } + + cl.State.Inflight.IncreaseReceiveQuota() // +1 RECV QUOTA + cl.State.Inflight.IncreaseSendQuota() // +1 SENT QUOTA + if ok := cl.State.Inflight.Delete(pk.PacketID); ok { // [MQTT-4.3.3-12] + atomic.AddInt64(&s.Info.Inflight, -1) + s.hooks.OnQosComplete(cl, pk) + } + + return nil +} + +// processPubcomp processes a Pubcomp packet, denoting completion of a QOS 2 packet sent from the server. +func (s *Server) processPubcomp(cl *Client, pk packets.Packet) error { + // regardless of whether the pubcomp is a success or failure, we end the qos flow, delete inflight, and restore the quotas. + cl.State.Inflight.IncreaseReceiveQuota() // +1 RECV QUOTA + cl.State.Inflight.IncreaseSendQuota() // +1 SENT QUOTA + if ok := cl.State.Inflight.Delete(pk.PacketID); ok { + atomic.AddInt64(&s.Info.Inflight, -1) + s.hooks.OnQosComplete(cl, pk) + } + + return nil +} + +// processSubscribe processes a Subscribe packet. +func (s *Server) processSubscribe(cl *Client, pk packets.Packet) error { + pk = s.hooks.OnSubscribe(cl, pk) + code := packets.CodeSuccess + if _, ok := cl.State.Inflight.Get(pk.PacketID); ok { + code = packets.ErrPacketIdentifierInUse + } + + filterExisted := make([]bool, len(pk.Filters)) + reasonCodes := make([]byte, len(pk.Filters)) + for i, sub := range pk.Filters { + if code != packets.CodeSuccess { + reasonCodes[i] = code.Code // NB 3.9.3 Non-normative 0x91 + continue + } else if !IsValidFilter(sub.Filter, false) { + reasonCodes[i] = packets.ErrTopicFilterInvalid.Code + } else if sub.NoLocal && IsSharedFilter(sub.Filter) { + reasonCodes[i] = packets.ErrProtocolViolationInvalidSharedNoLocal.Code // [MQTT-3.8.3-4] + } else if !s.hooks.OnACLCheck(cl, sub.Filter, false) { + reasonCodes[i] = packets.ErrNotAuthorized.Code + if s.Options.Capabilities.Compatibilities.ObscureNotAuthorized { + reasonCodes[i] = packets.ErrUnspecifiedError.Code + } + } else { + isNew := s.Topics.Subscribe(cl.ID, sub) // [MQTT-3.8.4-3] + if isNew { + atomic.AddInt64(&s.Info.Subscriptions, 1) + } + cl.State.Subscriptions.Add(sub.Filter, sub) // [MQTT-3.2.2-10] + + if sub.Qos > s.Options.Capabilities.MaximumQos { + sub.Qos = s.Options.Capabilities.MaximumQos // [MQTT-3.2.2-9] + } + + filterExisted[i] = !isNew + reasonCodes[i] = sub.Qos // [MQTT-3.9.3-1] [MQTT-3.8.4-7] + } + + if reasonCodes[i] > packets.CodeGrantedQos2.Code && cl.Properties.ProtocolVersion < 5 { // MQTT3 + reasonCodes[i] = packets.ErrUnspecifiedError.Code + } + } + + ack := packets.Packet{ // [MQTT-3.8.4-1] [MQTT-3.8.4-5] + FixedHeader: packets.FixedHeader{ + Type: packets.Suback, + }, + PacketID: pk.PacketID, // [MQTT-2.2.1-6] [MQTT-3.8.4-2] + ReasonCodes: reasonCodes, // [MQTT-3.8.4-6] + Properties: packets.Properties{ + User: pk.Properties.User, + }, + } + + if code.Code >= packets.ErrUnspecifiedError.Code { + ack.Properties.ReasonString = code.Reason + } + + s.hooks.OnSubscribed(cl, pk, reasonCodes) + err := cl.WritePacket(ack) + if err != nil { + return err + } + + for i, sub := range pk.Filters { // [MQTT-3.3.1-9] + if reasonCodes[i] >= packets.ErrUnspecifiedError.Code { + continue + } + + s.publishRetainedToClient(cl, sub, filterExisted[i]) + } + + return nil +} + +// processUnsubscribe processes an unsubscribe packet. +func (s *Server) processUnsubscribe(cl *Client, pk packets.Packet) error { + code := packets.CodeSuccess + if _, ok := cl.State.Inflight.Get(pk.PacketID); ok { + code = packets.ErrPacketIdentifierInUse + } + + pk = s.hooks.OnUnsubscribe(cl, pk) + reasonCodes := make([]byte, len(pk.Filters)) + for i, sub := range pk.Filters { // [MQTT-3.10.4-6] [MQTT-3.11.3-1] + if code != packets.CodeSuccess { + reasonCodes[i] = code.Code // NB 3.11.3 Non-normative 0x91 + continue + } + + if q := s.Topics.Unsubscribe(sub.Filter, cl.ID); q { + atomic.AddInt64(&s.Info.Subscriptions, -1) + reasonCodes[i] = packets.CodeSuccess.Code + } else { + reasonCodes[i] = packets.CodeNoSubscriptionExisted.Code + } + + cl.State.Subscriptions.Delete(sub.Filter) // [MQTT-3.10.4-2] [MQTT-3.10.4-2] ~[MQTT-3.10.4-3] + } + + ack := packets.Packet{ // [MQTT-3.10.4-4] + FixedHeader: packets.FixedHeader{ + Type: packets.Unsuback, + }, + PacketID: pk.PacketID, // [MQTT-2.2.1-6] [MQTT-3.10.4-5] + ReasonCodes: reasonCodes, // [MQTT-3.11.3-2] + Properties: packets.Properties{ + User: pk.Properties.User, + }, + } + + if code.Code >= packets.ErrUnspecifiedError.Code { + ack.Properties.ReasonString = code.Reason + } + + s.hooks.OnUnsubscribed(cl, pk) + return cl.WritePacket(ack) +} + +// UnsubscribeClient unsubscribes a client from all of their subscriptions. +func (s *Server) UnsubscribeClient(cl *Client) { + i := 0 + filterMap := cl.State.Subscriptions.GetAll() + filters := make([]packets.Subscription, len(filterMap)) + for k := range filterMap { + cl.State.Subscriptions.Delete(k) + } + + if atomic.LoadUint32(&cl.State.isTakenOver) == 1 { + return + } + + for k, v := range filterMap { + if s.Topics.Unsubscribe(k, cl.ID) { + atomic.AddInt64(&s.Info.Subscriptions, -1) + } + filters[i] = v + i++ + } + s.hooks.OnUnsubscribed(cl, packets.Packet{FixedHeader: packets.FixedHeader{Type: packets.Unsubscribe}, Filters: filters}) +} + +// processAuth processes an Auth packet. +func (s *Server) processAuth(cl *Client, pk packets.Packet) error { + _, err := s.hooks.OnAuthPacket(cl, pk) + if err != nil { + return err + } + + return nil +} + +// processDisconnect processes a Disconnect packet. +func (s *Server) processDisconnect(cl *Client, pk packets.Packet) error { + if pk.Properties.SessionExpiryIntervalFlag { + if pk.Properties.SessionExpiryInterval > 0 && cl.Properties.Props.SessionExpiryInterval == 0 { + return packets.ErrProtocolViolationZeroNonZeroExpiry + } + + cl.Properties.Props.SessionExpiryInterval = pk.Properties.SessionExpiryInterval + cl.Properties.Props.SessionExpiryIntervalFlag = true + } + + s.loop.willDelayed.Delete(cl.ID) // [MQTT-3.1.3-9] [MQTT-3.1.2-8] + cl.Stop(packets.CodeDisconnect) // [MQTT-3.14.4-2] + + return nil +} + +// DisconnectClient sends a Disconnect packet to a client and then closes the client connection. +func (s *Server) DisconnectClient(cl *Client, code packets.Code) error { + out := packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Disconnect, + }, + ReasonCode: code.Code, + Properties: packets.Properties{}, + } + + if code.Code >= packets.ErrUnspecifiedError.Code { + out.Properties.ReasonString = code.Reason // // [MQTT-3.14.2-1] + } + + // We already have a code we are using to disconnect the client, so we are not + // interested if the write packet fails due to a closed connection (as we are closing it). + err := cl.WritePacket(out) + if !s.Options.Capabilities.Compatibilities.PassiveClientDisconnect { + cl.Stop(code) + if code.Code >= packets.ErrUnspecifiedError.Code { + return code + } + } + + return err +} + +// publishSysTopics publishes the current values to the server $SYS topics. +// Due to the int to string conversions this method is not as cheap as +// some of the others so the publishing interval should be set appropriately. +func (s *Server) publishSysTopics() { + pk := packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Publish, + Retain: true, + }, + Created: time.Now().Unix(), + } + + var m runtime.MemStats + runtime.ReadMemStats(&m) + atomic.StoreInt64(&s.Info.MemoryAlloc, int64(m.HeapInuse)) + atomic.StoreInt64(&s.Info.Threads, int64(runtime.NumGoroutine())) + atomic.StoreInt64(&s.Info.Time, time.Now().Unix()) + atomic.StoreInt64(&s.Info.Uptime, time.Now().Unix()-atomic.LoadInt64(&s.Info.Started)) + atomic.StoreInt64(&s.Info.ClientsTotal, int64(s.Clients.Len())) + atomic.StoreInt64(&s.Info.ClientsDisconnected, atomic.LoadInt64(&s.Info.ClientsTotal)-atomic.LoadInt64(&s.Info.ClientsConnected)) + + info := s.Info.Clone() + topics := map[string]string{ + SysPrefix + "/broker/version": s.Info.Version, + SysPrefix + "/broker/time": Int64toa(info.Time), + SysPrefix + "/broker/uptime": Int64toa(info.Uptime), + SysPrefix + "/broker/started": Int64toa(info.Started), + SysPrefix + "/broker/load/bytes/received": Int64toa(info.BytesReceived), + SysPrefix + "/broker/load/bytes/sent": Int64toa(info.BytesSent), + SysPrefix + "/broker/clients/connected": Int64toa(info.ClientsConnected), + SysPrefix + "/broker/clients/disconnected": Int64toa(info.ClientsDisconnected), + SysPrefix + "/broker/clients/maximum": Int64toa(info.ClientsMaximum), + SysPrefix + "/broker/clients/total": Int64toa(info.ClientsTotal), + SysPrefix + "/broker/packets/received": Int64toa(info.PacketsReceived), + SysPrefix + "/broker/packets/sent": Int64toa(info.PacketsSent), + SysPrefix + "/broker/messages/received": Int64toa(info.MessagesReceived), + SysPrefix + "/broker/messages/sent": Int64toa(info.MessagesSent), + SysPrefix + "/broker/messages/dropped": Int64toa(info.MessagesDropped), + SysPrefix + "/broker/messages/inflight": Int64toa(info.Inflight), + SysPrefix + "/broker/retained": Int64toa(info.Retained), + SysPrefix + "/broker/subscriptions": Int64toa(info.Subscriptions), + SysPrefix + "/broker/system/memory": Int64toa(info.MemoryAlloc), + SysPrefix + "/broker/system/threads": Int64toa(info.Threads), + } + + for topic, payload := range topics { + pk.TopicName = topic + pk.Payload = []byte(payload) + s.Topics.RetainMessage(pk.Copy(false)) + s.publishToSubscribers(pk) + } + + s.hooks.OnSysInfoTick(info) +} + +// Close attempts to gracefully shut down the server, all listeners, clients, and stores. +func (s *Server) Close() error { + close(s.done) + s.Log.Info("gracefully stopping server") + s.Listeners.CloseAll(s.closeListenerClients) + s.hooks.OnStopped() + s.hooks.Stop() + + s.Log.Info("mochi mqtt server stopped") + return nil +} + +// closeListenerClients closes all clients on the specified listener. +func (s *Server) closeListenerClients(listener string) { + clients := s.Clients.GetByListener(listener) + for _, cl := range clients { + _ = s.DisconnectClient(cl, packets.ErrServerShuttingDown) + } +} + +// sendLWT issues an LWT message to a topic when a client disconnects. +func (s *Server) sendLWT(cl *Client) { + if atomic.LoadUint32(&cl.Properties.Will.Flag) == 0 { + return + } + + modifiedLWT := s.hooks.OnWill(cl, cl.Properties.Will) + + pk := packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Publish, + Retain: modifiedLWT.Retain, // [MQTT-3.1.2-14] [MQTT-3.1.2-15] + Qos: modifiedLWT.Qos, + }, + TopicName: modifiedLWT.TopicName, + Payload: modifiedLWT.Payload, + Properties: packets.Properties{ + User: modifiedLWT.User, + }, + Origin: cl.ID, + Created: time.Now().Unix(), + } + + if cl.Properties.Will.WillDelayInterval > 0 { + pk.Connect.WillProperties.WillDelayInterval = cl.Properties.Will.WillDelayInterval + pk.Expiry = time.Now().Unix() + int64(pk.Connect.WillProperties.WillDelayInterval) + s.loop.willDelayed.Add(cl.ID, pk) + return + } + + if pk.FixedHeader.Retain { + s.retainMessage(cl, pk) + } + + s.publishToSubscribers(pk) // [MQTT-3.1.2-8] + atomic.StoreUint32(&cl.Properties.Will.Flag, 0) // [MQTT-3.1.2-10] + s.hooks.OnWillSent(cl, pk) +} + +// readStore reads in any data from the persistent datastore (if applicable). +func (s *Server) readStore() error { + if s.hooks.Provides(StoredClients) { + clients, err := s.hooks.StoredClients() + if err != nil { + return fmt.Errorf("failed to load clients; %w", err) + } + s.loadClients(clients) + s.Log.Debug("loaded clients from store", "len", len(clients)) + } + + if s.hooks.Provides(StoredSubscriptions) { + subs, err := s.hooks.StoredSubscriptions() + if err != nil { + return fmt.Errorf("load subscriptions; %w", err) + } + s.loadSubscriptions(subs) + s.Log.Debug("loaded subscriptions from store", "len", len(subs)) + } + + if s.hooks.Provides(StoredInflightMessages) { + inflight, err := s.hooks.StoredInflightMessages() + if err != nil { + return fmt.Errorf("load inflight; %w", err) + } + s.loadInflight(inflight) + s.Log.Debug("loaded inflights from store", "len", len(inflight)) + } + + if s.hooks.Provides(StoredRetainedMessages) { + retained, err := s.hooks.StoredRetainedMessages() + if err != nil { + return fmt.Errorf("load retained; %w", err) + } + s.loadRetained(retained) + s.Log.Debug("loaded retained messages from store", "len", len(retained)) + } + + if s.hooks.Provides(StoredSysInfo) { + sysInfo, err := s.hooks.StoredSysInfo() + if err != nil { + return fmt.Errorf("load server info; %w", err) + } + s.loadServerInfo(sysInfo.Info) + s.Log.Debug("loaded $SYS info from store") + } + + return nil +} + +// loadServerInfo restores server info from the datastore. +func (s *Server) loadServerInfo(v system.Info) { + if s.Options.Capabilities.Compatibilities.RestoreSysInfoOnRestart { + atomic.StoreInt64(&s.Info.BytesReceived, v.BytesReceived) + atomic.StoreInt64(&s.Info.BytesSent, v.BytesSent) + atomic.StoreInt64(&s.Info.ClientsMaximum, v.ClientsMaximum) + atomic.StoreInt64(&s.Info.ClientsTotal, v.ClientsTotal) + atomic.StoreInt64(&s.Info.ClientsDisconnected, v.ClientsDisconnected) + atomic.StoreInt64(&s.Info.MessagesReceived, v.MessagesReceived) + atomic.StoreInt64(&s.Info.MessagesSent, v.MessagesSent) + atomic.StoreInt64(&s.Info.MessagesDropped, v.MessagesDropped) + atomic.StoreInt64(&s.Info.PacketsReceived, v.PacketsReceived) + atomic.StoreInt64(&s.Info.PacketsSent, v.PacketsSent) + atomic.StoreInt64(&s.Info.InflightDropped, v.InflightDropped) + } + atomic.StoreInt64(&s.Info.Retained, v.Retained) + atomic.StoreInt64(&s.Info.Inflight, v.Inflight) + atomic.StoreInt64(&s.Info.Subscriptions, v.Subscriptions) +} + +// loadSubscriptions restores subscriptions from the datastore. +func (s *Server) loadSubscriptions(v []storage.Subscription) { + for _, sub := range v { + sb := packets.Subscription{ + Filter: sub.Filter, + RetainHandling: sub.RetainHandling, + Qos: sub.Qos, + RetainAsPublished: sub.RetainAsPublished, + NoLocal: sub.NoLocal, + Identifier: sub.Identifier, + } + if s.Topics.Subscribe(sub.Client, sb) { + if cl, ok := s.Clients.Get(sub.Client); ok { + cl.State.Subscriptions.Add(sub.Filter, sb) + } + } + } +} + +// loadClients restores clients from the datastore. +func (s *Server) loadClients(v []storage.Client) { + for _, c := range v { + cl := s.NewClient(nil, c.Listener, c.ID, false) + cl.Properties.Username = c.Username + cl.Properties.Clean = c.Clean + cl.Properties.ProtocolVersion = c.ProtocolVersion + cl.Properties.Props = packets.Properties{ + SessionExpiryInterval: c.Properties.SessionExpiryInterval, + SessionExpiryIntervalFlag: c.Properties.SessionExpiryIntervalFlag, + AuthenticationMethod: c.Properties.AuthenticationMethod, + AuthenticationData: c.Properties.AuthenticationData, + RequestProblemInfoFlag: c.Properties.RequestProblemInfoFlag, + RequestProblemInfo: c.Properties.RequestProblemInfo, + RequestResponseInfo: c.Properties.RequestResponseInfo, + ReceiveMaximum: c.Properties.ReceiveMaximum, + TopicAliasMaximum: c.Properties.TopicAliasMaximum, + User: c.Properties.User, + MaximumPacketSize: c.Properties.MaximumPacketSize, + } + cl.Properties.Will = Will(c.Will) + + // cancel the context, update cl.State such as disconnected time and stopCause. + cl.Stop(packets.ErrServerShuttingDown) + + expire := (cl.Properties.ProtocolVersion == 5 && cl.Properties.Props.SessionExpiryInterval == 0) || (cl.Properties.ProtocolVersion < 5 && cl.Properties.Clean) + s.hooks.OnDisconnect(cl, packets.ErrServerShuttingDown, expire) + if expire { + cl.ClearInflights() + s.UnsubscribeClient(cl) + } else { + s.Clients.Add(cl) + } + } +} + +// loadInflight restores inflight messages from the datastore. +func (s *Server) loadInflight(v []storage.Message) { + for _, msg := range v { + if client, ok := s.Clients.Get(msg.Origin); ok { + client.State.Inflight.Set(msg.ToPacket()) + } + } +} + +// loadRetained restores retained messages from the datastore. +func (s *Server) loadRetained(v []storage.Message) { + for _, msg := range v { + s.Topics.RetainMessage(msg.ToPacket()) + } +} + +// clearExpiredClients deletes all clients which have been disconnected for longer +// than their given expiry intervals. +func (s *Server) clearExpiredClients(dt int64) { + for id, client := range s.Clients.GetAll() { + disconnected := atomic.LoadInt64(&client.State.disconnected) + if disconnected == 0 { + continue + } + + expire := s.Options.Capabilities.MaximumSessionExpiryInterval + if client.Properties.ProtocolVersion == 5 && client.Properties.Props.SessionExpiryIntervalFlag { + expire = client.Properties.Props.SessionExpiryInterval + } + + if disconnected+int64(expire) < dt { + s.hooks.OnClientExpired(client) + s.Clients.Delete(id) // [MQTT-4.1.0-2] + } + } +} + +// clearExpiredRetainedMessage deletes retained messages from topics if they have expired. +func (s *Server) clearExpiredRetainedMessages(now int64) { + for filter, pk := range s.Topics.Retained.GetAll() { + expired := pk.ProtocolVersion == 5 && pk.Expiry > 0 && pk.Expiry < now // [MQTT-3.3.2-5] + + // If the maximum message expiry interval is set (greater than 0), and the message + // retention period exceeds the maximum expiry, the message will be forcibly removed. + enforced := s.Options.Capabilities.MaximumMessageExpiryInterval > 0 && + now-pk.Created > s.Options.Capabilities.MaximumMessageExpiryInterval + + if expired || enforced { + s.Topics.Retained.Delete(filter) + s.hooks.OnRetainedExpired(filter) + } + } +} + +// clearExpiredInflights deletes any inflight messages which have expired. +func (s *Server) clearExpiredInflights(now int64) { + for _, client := range s.Clients.GetAll() { + if deleted := client.ClearExpiredInflights(now, s.Options.Capabilities.MaximumMessageExpiryInterval); len(deleted) > 0 { + for _, id := range deleted { + s.hooks.OnQosDropped(client, packets.Packet{PacketID: id}) + } + } + } +} + +// sendDelayedLWT sends any LWT messages which have reached their issue time. +func (s *Server) sendDelayedLWT(dt int64) { + for id, pk := range s.loop.willDelayed.GetAll() { + if dt > pk.Expiry { + s.publishToSubscribers(pk) // [MQTT-3.1.2-8] + if cl, ok := s.Clients.Get(id); ok { + if pk.FixedHeader.Retain { + s.retainMessage(cl, pk) + } + cl.Properties.Will = Will{} // [MQTT-3.1.2-10] + s.hooks.OnWillSent(cl, pk) + } + s.loop.willDelayed.Delete(id) + } + } +} + +// Int64toa converts an int64 to a string. +func Int64toa(v int64) string { + return strconv.FormatInt(v, 10) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/system/system.go b/vendor/github.com/mochi-mqtt/server/v2/system/system.go new file mode 100644 index 000000000..2ed47d0c4 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/system/system.go @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package system + +import "sync/atomic" + +// Info contains atomic counters and values for various server statistics +// commonly found in $SYS topics (and others). +// based on https://github.com/mqtt/mqtt.org/wiki/SYS-Topics +type Info struct { + Version string `json:"version"` // the current version of the server + Started int64 `json:"started"` // the time the server started in unix seconds + Time int64 `json:"time"` // current time on the server + Uptime int64 `json:"uptime"` // the number of seconds the server has been online + BytesReceived int64 `json:"bytes_received"` // total number of bytes received since the broker started + BytesSent int64 `json:"bytes_sent"` // total number of bytes sent since the broker started + ClientsConnected int64 `json:"clients_connected"` // number of currently connected clients + ClientsDisconnected int64 `json:"clients_disconnected"` // total number of persistent clients (with clean session disabled) that are registered at the broker but are currently disconnected + ClientsMaximum int64 `json:"clients_maximum"` // maximum number of active clients that have been connected + ClientsTotal int64 `json:"clients_total"` // total number of connected and disconnected clients with a persistent session currently connected and registered + MessagesReceived int64 `json:"messages_received"` // total number of publish messages received + MessagesSent int64 `json:"messages_sent"` // total number of publish messages sent + MessagesDropped int64 `json:"messages_dropped"` // total number of publish messages dropped to slow subscriber + Retained int64 `json:"retained"` // total number of retained messages active on the broker + Inflight int64 `json:"inflight"` // the number of messages currently in-flight + InflightDropped int64 `json:"inflight_dropped"` // the number of inflight messages which were dropped + Subscriptions int64 `json:"subscriptions"` // total number of subscriptions active on the broker + PacketsReceived int64 `json:"packets_received"` // the total number of publish messages received + PacketsSent int64 `json:"packets_sent"` // total number of messages of any type sent since the broker started + MemoryAlloc int64 `json:"memory_alloc"` // memory currently allocated + Threads int64 `json:"threads"` // number of active goroutines, named as threads for platform ambiguity +} + +// Clone makes a copy of Info using atomic operation +func (i *Info) Clone() *Info { + return &Info{ + Version: i.Version, + Started: atomic.LoadInt64(&i.Started), + Time: atomic.LoadInt64(&i.Time), + Uptime: atomic.LoadInt64(&i.Uptime), + BytesReceived: atomic.LoadInt64(&i.BytesReceived), + BytesSent: atomic.LoadInt64(&i.BytesSent), + ClientsConnected: atomic.LoadInt64(&i.ClientsConnected), + ClientsMaximum: atomic.LoadInt64(&i.ClientsMaximum), + ClientsTotal: atomic.LoadInt64(&i.ClientsTotal), + ClientsDisconnected: atomic.LoadInt64(&i.ClientsDisconnected), + MessagesReceived: atomic.LoadInt64(&i.MessagesReceived), + MessagesSent: atomic.LoadInt64(&i.MessagesSent), + MessagesDropped: atomic.LoadInt64(&i.MessagesDropped), + Retained: atomic.LoadInt64(&i.Retained), + Inflight: atomic.LoadInt64(&i.Inflight), + InflightDropped: atomic.LoadInt64(&i.InflightDropped), + Subscriptions: atomic.LoadInt64(&i.Subscriptions), + PacketsReceived: atomic.LoadInt64(&i.PacketsReceived), + PacketsSent: atomic.LoadInt64(&i.PacketsSent), + MemoryAlloc: atomic.LoadInt64(&i.MemoryAlloc), + Threads: atomic.LoadInt64(&i.Threads), + } +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/topics.go b/vendor/github.com/mochi-mqtt/server/v2/topics.go new file mode 100644 index 000000000..63f704d26 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/topics.go @@ -0,0 +1,822 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2023 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package mqtt + +import ( + "strings" + "sync" + "sync/atomic" + + "github.com/mochi-mqtt/server/v2/packets" +) + +var ( + SharePrefix = "$SHARE" // the prefix indicating a share topic + SysPrefix = "$SYS" // the prefix indicating a system info topic +) + +// TopicAliases contains inbound and outbound topic alias registrations. +type TopicAliases struct { + Inbound *InboundTopicAliases + Outbound *OutboundTopicAliases +} + +// NewTopicAliases returns an instance of TopicAliases. +func NewTopicAliases(topicAliasMaximum uint16) TopicAliases { + return TopicAliases{ + Inbound: NewInboundTopicAliases(topicAliasMaximum), + Outbound: NewOutboundTopicAliases(topicAliasMaximum), + } +} + +// NewInboundTopicAliases returns a pointer to InboundTopicAliases. +func NewInboundTopicAliases(topicAliasMaximum uint16) *InboundTopicAliases { + return &InboundTopicAliases{ + maximum: topicAliasMaximum, + internal: map[uint16]string{}, + } +} + +// InboundTopicAliases contains a map of topic aliases received from the client. +type InboundTopicAliases struct { + internal map[uint16]string + sync.RWMutex + maximum uint16 +} + +// Set sets a new alias for a specific topic. +func (a *InboundTopicAliases) Set(id uint16, topic string) string { + a.Lock() + defer a.Unlock() + + if a.maximum == 0 { + return topic // ? + } + + if existing, ok := a.internal[id]; ok && topic == "" { + return existing + } + + a.internal[id] = topic + return topic +} + +// OutboundTopicAliases contains a map of topic aliases sent from the broker to the client. +type OutboundTopicAliases struct { + internal map[string]uint16 + sync.RWMutex + cursor uint32 + maximum uint16 +} + +// NewOutboundTopicAliases returns a pointer to OutboundTopicAliases. +func NewOutboundTopicAliases(topicAliasMaximum uint16) *OutboundTopicAliases { + return &OutboundTopicAliases{ + maximum: topicAliasMaximum, + internal: map[string]uint16{}, + } +} + +// Set sets a new topic alias for a topic and returns the alias value, and a boolean +// indicating if the alias already existed. +func (a *OutboundTopicAliases) Set(topic string) (uint16, bool) { + a.Lock() + defer a.Unlock() + + if a.maximum == 0 { + return 0, false + } + + if i, ok := a.internal[topic]; ok { + return i, true + } + + i := atomic.LoadUint32(&a.cursor) + if i+1 > uint32(a.maximum) { + // if i+1 > math.MaxUint16 { + return 0, false + } + + a.internal[topic] = uint16(i) + 1 + atomic.StoreUint32(&a.cursor, i+1) + return uint16(i) + 1, false +} + +// SharedSubscriptions contains a map of subscriptions to a shared filter, +// keyed on share group then client id. +type SharedSubscriptions struct { + internal map[string]map[string]packets.Subscription + sync.RWMutex +} + +// NewSharedSubscriptions returns a new instance of Subscriptions. +func NewSharedSubscriptions() *SharedSubscriptions { + return &SharedSubscriptions{ + internal: map[string]map[string]packets.Subscription{}, + } +} + +// Add creates a new shared subscription for a group and client id pair. +func (s *SharedSubscriptions) Add(group, id string, val packets.Subscription) { + s.Lock() + defer s.Unlock() + if _, ok := s.internal[group]; !ok { + s.internal[group] = map[string]packets.Subscription{} + } + s.internal[group][id] = val +} + +// Delete deletes a client id from a shared subscription group. +func (s *SharedSubscriptions) Delete(group, id string) { + s.Lock() + defer s.Unlock() + delete(s.internal[group], id) + if len(s.internal[group]) == 0 { + delete(s.internal, group) + } +} + +// Get returns the subscription properties for a client id in a share group, if one exists. +func (s *SharedSubscriptions) Get(group, id string) (val packets.Subscription, ok bool) { + s.RLock() + defer s.RUnlock() + if _, ok := s.internal[group]; !ok { + return val, ok + } + + val, ok = s.internal[group][id] + return val, ok +} + +// GroupLen returns the number of groups subscribed to the filter. +func (s *SharedSubscriptions) GroupLen() int { + s.RLock() + defer s.RUnlock() + val := len(s.internal) + return val +} + +// Len returns the total number of shared subscriptions to a filter across all groups. +func (s *SharedSubscriptions) Len() int { + s.RLock() + defer s.RUnlock() + n := 0 + for _, group := range s.internal { + n += len(group) + } + return n +} + +// GetAll returns all shared subscription groups and their subscriptions. +func (s *SharedSubscriptions) GetAll() map[string]map[string]packets.Subscription { + s.RLock() + defer s.RUnlock() + m := map[string]map[string]packets.Subscription{} + for group, subs := range s.internal { + if _, ok := m[group]; !ok { + m[group] = map[string]packets.Subscription{} + } + + for id, sub := range subs { + m[group][id] = sub + } + } + return m +} + +// InlineSubFn is the signature for a callback function which will be called +// when an inline client receives a message on a topic it is subscribed to. +// The sub argument contains information about the subscription that was matched for any filters. +type InlineSubFn func(cl *Client, sub packets.Subscription, pk packets.Packet) + +// InlineSubscriptions represents a map of internal subscriptions keyed on client. +type InlineSubscriptions struct { + internal map[int]InlineSubscription + sync.RWMutex +} + +// NewInlineSubscriptions returns a new instance of InlineSubscriptions. +func NewInlineSubscriptions() *InlineSubscriptions { + return &InlineSubscriptions{ + internal: map[int]InlineSubscription{}, + } +} + +// Add adds a new internal subscription for a client id. +func (s *InlineSubscriptions) Add(val InlineSubscription) { + s.Lock() + defer s.Unlock() + s.internal[val.Identifier] = val +} + +// GetAll returns all internal subscriptions. +func (s *InlineSubscriptions) GetAll() map[int]InlineSubscription { + s.RLock() + defer s.RUnlock() + m := map[int]InlineSubscription{} + for k, v := range s.internal { + m[k] = v + } + return m +} + +// Get returns an internal subscription for a client id. +func (s *InlineSubscriptions) Get(id int) (val InlineSubscription, ok bool) { + s.RLock() + defer s.RUnlock() + val, ok = s.internal[id] + return val, ok +} + +// Len returns the number of internal subscriptions. +func (s *InlineSubscriptions) Len() int { + s.RLock() + defer s.RUnlock() + val := len(s.internal) + return val +} + +// Delete removes an internal subscription by the client id. +func (s *InlineSubscriptions) Delete(id int) { + s.Lock() + defer s.Unlock() + delete(s.internal, id) +} + +// Subscriptions is a map of subscriptions keyed on client. +type Subscriptions struct { + internal map[string]packets.Subscription + sync.RWMutex +} + +// NewSubscriptions returns a new instance of Subscriptions. +func NewSubscriptions() *Subscriptions { + return &Subscriptions{ + internal: map[string]packets.Subscription{}, + } +} + +// Add adds a new subscription for a client. ID can be a filter in the +// case this map is client state, or a client id if particle state. +func (s *Subscriptions) Add(id string, val packets.Subscription) { + s.Lock() + defer s.Unlock() + s.internal[id] = val +} + +// GetAll returns all subscriptions. +func (s *Subscriptions) GetAll() map[string]packets.Subscription { + s.RLock() + defer s.RUnlock() + m := map[string]packets.Subscription{} + for k, v := range s.internal { + m[k] = v + } + return m +} + +// Get returns a subscriptions for a specific client or filter id. +func (s *Subscriptions) Get(id string) (val packets.Subscription, ok bool) { + s.RLock() + defer s.RUnlock() + val, ok = s.internal[id] + return val, ok +} + +// Len returns the number of subscriptions. +func (s *Subscriptions) Len() int { + s.RLock() + defer s.RUnlock() + val := len(s.internal) + return val +} + +// Delete removes a subscription by client or filter id. +func (s *Subscriptions) Delete(id string) { + s.Lock() + defer s.Unlock() + delete(s.internal, id) +} + +// ClientSubscriptions is a map of aggregated subscriptions for a client. +type ClientSubscriptions map[string]packets.Subscription + +type InlineSubscription struct { + packets.Subscription + Handler InlineSubFn +} + +// Subscribers contains the shared and non-shared subscribers matching a topic. +type Subscribers struct { + Shared map[string]map[string]packets.Subscription + SharedSelected map[string]packets.Subscription + Subscriptions map[string]packets.Subscription + InlineSubscriptions map[int]InlineSubscription +} + +// SelectShared returns one subscriber for each shared subscription group. +func (s *Subscribers) SelectShared() { + s.SharedSelected = map[string]packets.Subscription{} + for _, subs := range s.Shared { + for client, sub := range subs { + cls, ok := s.SharedSelected[client] + if !ok { + cls = sub + } + + s.SharedSelected[client] = cls.Merge(sub) + break + } + } +} + +// MergeSharedSelected merges the selected subscribers for a shared subscription group +// and the non-shared subscribers, to ensure that no subscriber gets multiple messages +// due to have both types of subscription matching the same filter. +func (s *Subscribers) MergeSharedSelected() { + for client, sub := range s.SharedSelected { + cls, ok := s.Subscriptions[client] + if !ok { + cls = sub + } + + s.Subscriptions[client] = cls.Merge(sub) + } +} + +// TopicsIndex is a prefix/trie tree containing topic subscribers and retained messages. +type TopicsIndex struct { + Retained *packets.Packets + root *particle // a leaf containing a message and more leaves. +} + +// NewTopicsIndex returns a pointer to a new instance of Index. +func NewTopicsIndex() *TopicsIndex { + return &TopicsIndex{ + Retained: packets.NewPackets(), + root: &particle{ + particles: newParticles(), + subscriptions: NewSubscriptions(), + }, + } +} + +// InlineSubscribe adds a new internal subscription for a topic filter, returning +// true if the subscription was new. +func (x *TopicsIndex) InlineSubscribe(subscription InlineSubscription) bool { + x.root.Lock() + defer x.root.Unlock() + + var existed bool + n := x.set(subscription.Filter, 0) + _, existed = n.inlineSubscriptions.Get(subscription.Identifier) + n.inlineSubscriptions.Add(subscription) + + return !existed +} + +// InlineUnsubscribe removes an internal subscription for a topic filter associated with a specific client, +// returning true if the subscription existed. +func (x *TopicsIndex) InlineUnsubscribe(id int, filter string) bool { + x.root.Lock() + defer x.root.Unlock() + + particle := x.seek(filter, 0) + if particle == nil { + return false + } + + particle.inlineSubscriptions.Delete(id) + + if particle.inlineSubscriptions.Len() == 0 { + x.trim(particle) + } + return true +} + +// Subscribe adds a new subscription for a client to a topic filter, returning +// true if the subscription was new. +func (x *TopicsIndex) Subscribe(client string, subscription packets.Subscription) bool { + x.root.Lock() + defer x.root.Unlock() + + var existed bool + prefix, _ := isolateParticle(subscription.Filter, 0) + if strings.EqualFold(prefix, SharePrefix) { + group, _ := isolateParticle(subscription.Filter, 1) + n := x.set(subscription.Filter, 2) + _, existed = n.shared.Get(group, client) + n.shared.Add(group, client, subscription) + } else { + n := x.set(subscription.Filter, 0) + _, existed = n.subscriptions.Get(client) + n.subscriptions.Add(client, subscription) + } + + return !existed +} + +// Unsubscribe removes a subscription filter for a client, returning true if the +// subscription existed. +func (x *TopicsIndex) Unsubscribe(filter, client string) bool { + x.root.Lock() + defer x.root.Unlock() + + var d int + prefix, _ := isolateParticle(filter, 0) + shareSub := strings.EqualFold(prefix, SharePrefix) + if shareSub { + d = 2 + } + + particle := x.seek(filter, d) + if particle == nil { + return false + } + + if shareSub { + group, _ := isolateParticle(filter, 1) + particle.shared.Delete(group, client) + } else { + particle.subscriptions.Delete(client) + } + + x.trim(particle) + return true +} + +// RetainMessage saves a message payload to the end of a topic address. Returns +// 1 if a retained message was added, and -1 if the retained message was removed. +// 0 is returned if sequential empty payloads are received. +func (x *TopicsIndex) RetainMessage(pk packets.Packet) int64 { + x.root.Lock() + defer x.root.Unlock() + + n := x.set(pk.TopicName, 0) + n.Lock() + defer n.Unlock() + if len(pk.Payload) > 0 { + n.retainPath = pk.TopicName + x.Retained.Add(pk.TopicName, pk) + return 1 + } + + var out int64 + if pke, ok := x.Retained.Get(pk.TopicName); ok && len(pke.Payload) > 0 && pke.FixedHeader.Retain { + out = -1 // if a retained packet existed, return -1 + } + + n.retainPath = "" + x.Retained.Delete(pk.TopicName) // [MQTT-3.3.1-6] [MQTT-3.3.1-7] + x.trim(n) + + return out +} + +// set creates a topic address in the index and returns the final particle. +func (x *TopicsIndex) set(topic string, d int) *particle { + var key string + var hasNext = true + n := x.root + for hasNext { + key, hasNext = isolateParticle(topic, d) + d++ + + p := n.particles.get(key) + if p == nil { + p = newParticle(key, n) + n.particles.add(p) + } + n = p + } + + return n +} + +// seek finds the particle at a specific index in a topic filter. +func (x *TopicsIndex) seek(filter string, d int) *particle { + var key string + var hasNext = true + n := x.root + for hasNext { + key, hasNext = isolateParticle(filter, d) + n = n.particles.get(key) + d++ + if n == nil { + return nil + } + } + + return n +} + +// trim removes empty filter particles from the index. +func (x *TopicsIndex) trim(n *particle) { + for n.parent != nil && n.retainPath == "" && n.particles.len()+n.subscriptions.Len()+n.shared.Len() == 0 { + key := n.key + n = n.parent + n.particles.delete(key) + } +} + +// Messages returns a slice of any retained messages which match a filter. +func (x *TopicsIndex) Messages(filter string) []packets.Packet { + return x.scanMessages(filter, 0, nil, []packets.Packet{}) +} + +// scanMessages returns all retained messages on topics matching a given filter. +func (x *TopicsIndex) scanMessages(filter string, d int, n *particle, pks []packets.Packet) []packets.Packet { + if n == nil { + n = x.root + } + + if len(filter) == 0 || x.Retained.Len() == 0 { + return pks + } + + if !strings.ContainsRune(filter, '#') && !strings.ContainsRune(filter, '+') { + if pk, ok := x.Retained.Get(filter); ok { + pks = append(pks, pk) + } + return pks + } + + key, hasNext := isolateParticle(filter, d) + if key == "+" || key == "#" || d == -1 { + for _, adjacent := range n.particles.getAll() { + if d == 0 && adjacent.key == SysPrefix { + continue + } + + if !hasNext { + if adjacent.retainPath != "" { + if pk, ok := x.Retained.Get(adjacent.retainPath); ok { + pks = append(pks, pk) + } + } + } + + if hasNext || (d >= 0 && key == "#") { + pks = x.scanMessages(filter, d+1, adjacent, pks) + } + } + return pks + } + + if particle := n.particles.get(key); particle != nil { + if hasNext { + return x.scanMessages(filter, d+1, particle, pks) + } + + if pk, ok := x.Retained.Get(particle.retainPath); ok { + pks = append(pks, pk) + } + } + + return pks +} + +// Subscribers returns a map of clients who are subscribed to matching filters, +// their subscription ids and highest qos. +func (x *TopicsIndex) Subscribers(topic string) *Subscribers { + return x.scanSubscribers(topic, 0, nil, &Subscribers{ + Shared: map[string]map[string]packets.Subscription{}, + SharedSelected: map[string]packets.Subscription{}, + Subscriptions: map[string]packets.Subscription{}, + InlineSubscriptions: map[int]InlineSubscription{}, + }) +} + +// scanSubscribers returns a list of client subscriptions matching an indexed topic address. +func (x *TopicsIndex) scanSubscribers(topic string, d int, n *particle, subs *Subscribers) *Subscribers { + if n == nil { + n = x.root + } + + if len(topic) == 0 { + return subs + } + + key, hasNext := isolateParticle(topic, d) + for _, partKey := range []string{key, "+"} { + if particle := n.particles.get(partKey); particle != nil { // [MQTT-3.3.2-3] + if hasNext { + x.scanSubscribers(topic, d+1, particle, subs) + } else { + x.gatherSubscriptions(topic, particle, subs) + x.gatherSharedSubscriptions(particle, subs) + x.gatherInlineSubscriptions(particle, subs) + + if wild := particle.particles.get("#"); wild != nil && partKey != "+" { + x.gatherSubscriptions(topic, wild, subs) // also match any subs where filter/# is filter as per 4.7.1.2 + x.gatherSharedSubscriptions(wild, subs) + x.gatherInlineSubscriptions(particle, subs) + } + } + } + } + + if particle := n.particles.get("#"); particle != nil { + x.gatherSubscriptions(topic, particle, subs) + x.gatherSharedSubscriptions(particle, subs) + x.gatherInlineSubscriptions(particle, subs) + } + + return subs +} + +// gatherSubscriptions collects any matching subscriptions, and gathers any identifiers or highest qos values. +func (x *TopicsIndex) gatherSubscriptions(topic string, particle *particle, subs *Subscribers) { + if subs.Subscriptions == nil { + subs.Subscriptions = map[string]packets.Subscription{} + } + + for client, sub := range particle.subscriptions.GetAll() { + if len(sub.Filter) > 0 && topic[0] == '$' && (sub.Filter[0] == '+' || sub.Filter[0] == '#') { // don't match $ topics with top level wildcards [MQTT-4.7.1-1] [MQTT-4.7.1-2] + continue + } + + cls, ok := subs.Subscriptions[client] + if !ok { + cls = sub + } + + subs.Subscriptions[client] = cls.Merge(sub) + } +} + +// gatherSharedSubscriptions gathers all shared subscriptions for a particle. +func (x *TopicsIndex) gatherSharedSubscriptions(particle *particle, subs *Subscribers) { + if subs.Shared == nil { + subs.Shared = map[string]map[string]packets.Subscription{} + } + + for _, shares := range particle.shared.GetAll() { + for client, sub := range shares { + if _, ok := subs.Shared[sub.Filter]; !ok { + subs.Shared[sub.Filter] = map[string]packets.Subscription{} + } + + subs.Shared[sub.Filter][client] = sub + } + } +} + +// gatherSharedSubscriptions gathers all inline subscriptions for a particle. +func (x *TopicsIndex) gatherInlineSubscriptions(particle *particle, subs *Subscribers) { + if subs.InlineSubscriptions == nil { + subs.InlineSubscriptions = map[int]InlineSubscription{} + } + + for id, inline := range particle.inlineSubscriptions.GetAll() { + subs.InlineSubscriptions[id] = inline + } +} + +// isolateParticle extracts a particle between d / and d+1 / without allocations. +func isolateParticle(filter string, d int) (particle string, hasNext bool) { + var next, end int + for i := 0; end > -1 && i <= d; i++ { + end = strings.IndexRune(filter, '/') + + switch { + case d > -1 && i == d && end > -1: + hasNext = true + particle = filter[next:end] + case end > -1: + hasNext = false + filter = filter[end+1:] + default: + hasNext = false + particle = filter[next:] + } + } + + return +} + +// IsSharedFilter returns true if the filter uses the share prefix. +func IsSharedFilter(filter string) bool { + prefix, _ := isolateParticle(filter, 0) + return strings.EqualFold(prefix, SharePrefix) +} + +// IsValidFilter returns true if the filter is valid. +func IsValidFilter(filter string, forPublish bool) bool { + if !forPublish && len(filter) == 0 { // publishing can accept zero-length topic filter if topic alias exists, so we don't enforce for publish. + return false // [MQTT-4.7.3-1] + } + + if forPublish { + if len(filter) >= len(SysPrefix) && strings.EqualFold(filter[0:len(SysPrefix)], SysPrefix) { + // 4.7.2 Non-normative - The Server SHOULD prevent Clients from using such Topic Names [$SYS] to exchange messages with other Clients. + return false + } + + if strings.ContainsRune(filter, '+') || strings.ContainsRune(filter, '#') { + return false //[MQTT-3.3.2-2] + } + } + + wildhash := strings.IndexRune(filter, '#') + if wildhash >= 0 && wildhash != len(filter)-1 { // [MQTT-4.7.1-2] + return false + } + + prefix, hasNext := isolateParticle(filter, 0) + if !hasNext && strings.EqualFold(prefix, SharePrefix) { + return false // [MQTT-4.8.2-1] + } + + if hasNext && strings.EqualFold(prefix, SharePrefix) { + group, hasNext := isolateParticle(filter, 1) + if !hasNext { + return false // [MQTT-4.8.2-1] + } + + if strings.ContainsRune(group, '+') || strings.ContainsRune(group, '#') { + return false // [MQTT-4.8.2-2] + } + } + + return true +} + +// particle is a child node on the tree. +type particle struct { + key string // the key of the particle + parent *particle // a pointer to the parent of the particle + particles particles // a map of child particles + subscriptions *Subscriptions // a map of subscriptions made by clients to this ending address + shared *SharedSubscriptions // a map of shared subscriptions keyed on group name + inlineSubscriptions *InlineSubscriptions // a map of inline subscriptions for this particle + retainPath string // path of a retained message + sync.Mutex // mutex for when making changes to the particle +} + +// newParticle returns a pointer to a new instance of particle. +func newParticle(key string, parent *particle) *particle { + return &particle{ + key: key, + parent: parent, + particles: newParticles(), + subscriptions: NewSubscriptions(), + shared: NewSharedSubscriptions(), + inlineSubscriptions: NewInlineSubscriptions(), + } +} + +// particles is a concurrency safe map of particles. +type particles struct { + internal map[string]*particle + sync.RWMutex +} + +// newParticles returns a map of particles. +func newParticles() particles { + return particles{ + internal: map[string]*particle{}, + } +} + +// add adds a new particle. +func (p *particles) add(val *particle) { + p.Lock() + p.internal[val.key] = val + p.Unlock() +} + +// getAll returns all particles. +func (p *particles) getAll() map[string]*particle { + p.RLock() + defer p.RUnlock() + m := map[string]*particle{} + for k, v := range p.internal { + m[k] = v + } + return m +} + +// get returns a particle by id (key). +func (p *particles) get(id string) *particle { + p.RLock() + defer p.RUnlock() + return p.internal[id] +} + +// len returns the number of particles. +func (p *particles) len() int { + p.RLock() + defer p.RUnlock() + val := len(p.internal) + return val +} + +// delete removes a particle. +func (p *particles) delete(id string) { + p.Lock() + defer p.Unlock() + delete(p.internal, id) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS b/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS new file mode 100644 index 000000000..4f189b708 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS @@ -0,0 +1,8 @@ +reviewers: + - mfojtik + - deads2k + - sttts +approvers: + - mfojtik + - deads2k + - sttts diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go new file mode 100644 index 000000000..f513a90f3 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go @@ -0,0 +1,238 @@ +package events + +import ( + "context" + "errors" + "fmt" + "os" + "time" + + "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" +) + +// Recorder is a simple event recording interface. +type Recorder interface { + Event(reason, message string) + Eventf(reason, messageFmt string, args ...interface{}) + Warning(reason, message string) + Warningf(reason, messageFmt string, args ...interface{}) + + // ForComponent allows to fiddle the component name before sending the event to sink. + // Making more unique components will prevent the spam filter in upstream event sink from dropping + // events. + ForComponent(componentName string) Recorder + + // WithComponentSuffix is similar to ForComponent except it just suffix the current component name instead of overriding. + WithComponentSuffix(componentNameSuffix string) Recorder + + // WithContext allows to set a context for event create API calls. + WithContext(ctx context.Context) Recorder + + // ComponentName returns the current source component name for the event. + // This allows to suffix the original component name with 'sub-component'. + ComponentName() string + + Shutdown() +} + +// podNameEnv is a name of environment variable inside container that specifies the name of the current replica set. +// This replica set name is then used as a source/involved object for operator events. +const podNameEnv = "POD_NAME" + +// podNameEnvFunc allows to override the way we get the environment variable value (for unit tests). +var podNameEnvFunc = func() string { + return os.Getenv(podNameEnv) +} + +// GetControllerReferenceForCurrentPod provides an object reference to a controller managing the pod/container where this process runs. +// The pod name must be provided via the POD_NAME name. +// Even if this method returns an error, it always return valid reference to the namespace. It allows the callers to control the logging +// and decide to fail or accept the namespace. +func GetControllerReferenceForCurrentPod(ctx context.Context, client kubernetes.Interface, targetNamespace string, reference *corev1.ObjectReference) (*corev1.ObjectReference, error) { + if reference == nil { + // Try to get the pod name via POD_NAME environment variable + reference := &corev1.ObjectReference{Kind: "Pod", Name: podNameEnvFunc(), Namespace: targetNamespace} + if len(reference.Name) != 0 { + return GetControllerReferenceForCurrentPod(ctx, client, targetNamespace, reference) + } + // If that fails, lets try to guess the pod by listing all pods in namespaces and using the first pod in the list + reference, err := guessControllerReferenceForNamespace(ctx, client.CoreV1().Pods(targetNamespace)) + if err != nil { + // If this fails, do not give up with error but instead use the namespace as controller reference for the pod + // NOTE: This is last resort, if we see this often it might indicate something is wrong in the cluster. + // In some cases this might help with flakes. + return getControllerReferenceForNamespace(targetNamespace), err + } + return GetControllerReferenceForCurrentPod(ctx, client, targetNamespace, reference) + } + + switch reference.Kind { + case "Pod": + pod, err := client.CoreV1().Pods(reference.Namespace).Get(ctx, reference.Name, metav1.GetOptions{}) + if err != nil { + return getControllerReferenceForNamespace(reference.Namespace), err + } + if podController := metav1.GetControllerOf(pod); podController != nil { + return GetControllerReferenceForCurrentPod(ctx, client, targetNamespace, makeObjectReference(podController, targetNamespace)) + } + // This is a bare pod without any ownerReference + return makeObjectReference(&metav1.OwnerReference{Kind: "Pod", Name: pod.Name, UID: pod.UID, APIVersion: "v1"}, pod.Namespace), nil + case "ReplicaSet": + rs, err := client.AppsV1().ReplicaSets(reference.Namespace).Get(ctx, reference.Name, metav1.GetOptions{}) + if err != nil { + return getControllerReferenceForNamespace(reference.Namespace), err + } + if rsController := metav1.GetControllerOf(rs); rsController != nil { + return GetControllerReferenceForCurrentPod(ctx, client, targetNamespace, makeObjectReference(rsController, targetNamespace)) + } + // This is a replicaSet without any ownerReference + return reference, nil + default: + return reference, nil + } +} + +// getControllerReferenceForNamespace returns an object reference to the given namespace. +func getControllerReferenceForNamespace(targetNamespace string) *corev1.ObjectReference { + return &corev1.ObjectReference{ + Kind: "Namespace", + Namespace: targetNamespace, + Name: targetNamespace, + APIVersion: "v1", + } +} + +// makeObjectReference makes object reference from ownerReference and target namespace +func makeObjectReference(owner *metav1.OwnerReference, targetNamespace string) *corev1.ObjectReference { + return &corev1.ObjectReference{ + Kind: owner.Kind, + Namespace: targetNamespace, + Name: owner.Name, + UID: owner.UID, + APIVersion: owner.APIVersion, + } +} + +// guessControllerReferenceForNamespace tries to guess what resource to reference. +func guessControllerReferenceForNamespace(ctx context.Context, client corev1client.PodInterface) (*corev1.ObjectReference, error) { + pods, err := client.List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + if len(pods.Items) == 0 { + return nil, fmt.Errorf("unable to setup event recorder as %q env variable is not set and there are no pods", podNameEnv) + } + + for _, pod := range pods.Items { + ownerRef := metav1.GetControllerOf(&pod) + if ownerRef == nil { + continue + } + return &corev1.ObjectReference{ + Kind: ownerRef.Kind, + Namespace: pod.Namespace, + Name: ownerRef.Name, + UID: ownerRef.UID, + APIVersion: ownerRef.APIVersion, + }, nil + } + return nil, errors.New("can't guess controller ref") +} + +// NewRecorder returns new event recorder. +func NewRecorder(client corev1client.EventInterface, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder { + return &recorder{ + eventClient: client, + involvedObjectRef: involvedObjectRef, + sourceComponent: sourceComponentName, + } +} + +// recorder is an implementation of Recorder interface. +type recorder struct { + eventClient corev1client.EventInterface + involvedObjectRef *corev1.ObjectReference + sourceComponent string + + // TODO: This is not the right way to pass the context, but there is no other way without breaking event interface + ctx context.Context +} + +func (r *recorder) ComponentName() string { + return r.sourceComponent +} + +func (r *recorder) Shutdown() {} + +func (r *recorder) ForComponent(componentName string) Recorder { + newRecorderForComponent := *r + newRecorderForComponent.sourceComponent = componentName + return &newRecorderForComponent +} + +func (r *recorder) WithContext(ctx context.Context) Recorder { + r.ctx = ctx + return r +} + +func (r *recorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +// Event emits the normal type event and allow formatting of message. +func (r *recorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Warning emits the warning type event and allow formatting of message. +func (r *recorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Event emits the normal type event. +func (r *recorder) Event(reason, message string) { + event := makeEvent(r.involvedObjectRef, r.sourceComponent, corev1.EventTypeNormal, reason, message) + ctx := context.Background() + if r.ctx != nil { + ctx = r.ctx + } + if _, err := r.eventClient.Create(ctx, event, metav1.CreateOptions{}); err != nil { + klog.Warningf("Error creating event %+v: %v", event, err) + } +} + +// Warning emits the warning type event. +func (r *recorder) Warning(reason, message string) { + event := makeEvent(r.involvedObjectRef, r.sourceComponent, corev1.EventTypeWarning, reason, message) + ctx := context.Background() + if r.ctx != nil { + ctx = r.ctx + } + if _, err := r.eventClient.Create(ctx, event, metav1.CreateOptions{}); err != nil { + klog.Warningf("Error creating event %+v: %v", event, err) + } +} + +func makeEvent(involvedObjRef *corev1.ObjectReference, sourceComponent string, eventType, reason, message string) *corev1.Event { + currentTime := metav1.Time{Time: time.Now()} + event := &corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", involvedObjRef.Name, currentTime.UnixNano()), + Namespace: involvedObjRef.Namespace, + }, + InvolvedObject: *involvedObjRef, + Reason: reason, + Message: message, + Type: eventType, + Count: 1, + FirstTimestamp: currentTime, + LastTimestamp: currentTime, + } + event.Source.Component = sourceComponent + return event +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go new file mode 100644 index 000000000..75efe3e19 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go @@ -0,0 +1,86 @@ +package events + +import ( + "context" + "fmt" + "sync" + + corev1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" +) + +type inMemoryEventRecorder struct { + events []*corev1.Event + source string + ctx context.Context + sync.Mutex +} + +// inMemoryDummyObjectReference is used for fake events. +var inMemoryDummyObjectReference = corev1.ObjectReference{ + Kind: "Pod", + Namespace: "dummy", + Name: "dummy", + APIVersion: "v1", +} + +type InMemoryRecorder interface { + Events() []*corev1.Event + Recorder +} + +// NewInMemoryRecorder provides event recorder that stores all events recorded in memory and allow to replay them using the Events() method. +// This recorder should be only used in unit tests. +func NewInMemoryRecorder(sourceComponent string) InMemoryRecorder { + return &inMemoryEventRecorder{events: []*corev1.Event{}, source: sourceComponent} +} + +func (r *inMemoryEventRecorder) ComponentName() string { + return r.source +} + +func (r *inMemoryEventRecorder) Shutdown() {} + +func (r *inMemoryEventRecorder) ForComponent(component string) Recorder { + r.Lock() + defer r.Unlock() + r.source = component + return r +} + +func (r *inMemoryEventRecorder) WithContext(ctx context.Context) Recorder { + r.ctx = ctx + return r +} + +func (r *inMemoryEventRecorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +// Events returns list of recorded events +func (r *inMemoryEventRecorder) Events() []*corev1.Event { + return r.events +} + +func (r *inMemoryEventRecorder) Event(reason, message string) { + r.Lock() + defer r.Unlock() + event := makeEvent(&inMemoryDummyObjectReference, r.source, corev1.EventTypeNormal, reason, message) + r.events = append(r.events, event) +} + +func (r *inMemoryEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +func (r *inMemoryEventRecorder) Warning(reason, message string) { + r.Lock() + defer r.Unlock() + event := makeEvent(&inMemoryDummyObjectReference, r.source, corev1.EventTypeWarning, reason, message) + klog.Info(event.String()) + r.events = append(r.events, event) +} + +func (r *inMemoryEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go new file mode 100644 index 000000000..90639f2d9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go @@ -0,0 +1,58 @@ +package events + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" +) + +type LoggingEventRecorder struct { + component string + ctx context.Context +} + +func (r *LoggingEventRecorder) WithContext(ctx context.Context) Recorder { + r.ctx = ctx + return r +} + +// NewLoggingEventRecorder provides event recorder that will log all recorded events via klog. +func NewLoggingEventRecorder(component string) Recorder { + return &LoggingEventRecorder{component: component} +} + +func (r *LoggingEventRecorder) ComponentName() string { + return r.component +} + +func (r *LoggingEventRecorder) ForComponent(component string) Recorder { + newRecorder := *r + newRecorder.component = component + return &newRecorder +} + +func (r *LoggingEventRecorder) Shutdown() {} + +func (r *LoggingEventRecorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +func (r *LoggingEventRecorder) Event(reason, message string) { + event := makeEvent(&inMemoryDummyObjectReference, "", corev1.EventTypeNormal, reason, message) + klog.Info(event.String()) +} + +func (r *LoggingEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +func (r *LoggingEventRecorder) Warning(reason, message string) { + event := makeEvent(&inMemoryDummyObjectReference, "", corev1.EventTypeWarning, reason, message) + klog.Warning(event.String()) +} + +func (r *LoggingEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go new file mode 100644 index 000000000..0e41949a7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go @@ -0,0 +1,173 @@ +package events + +import ( + "context" + "fmt" + "strings" + "sync" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes/scheme" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/record" + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/klog/v2" +) + +// NewKubeRecorder returns new event recorder with tweaked correlator options. +func NewKubeRecorderWithOptions(client corev1client.EventInterface, options record.CorrelatorOptions, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder { + return (&upstreamRecorder{ + client: client, + component: sourceComponentName, + involvedObjectRef: involvedObjectRef, + options: options, + fallbackRecorder: NewRecorder(client, sourceComponentName, involvedObjectRef), + }).ForComponent(sourceComponentName) +} + +// NewKubeRecorder returns new event recorder with default correlator options. +func NewKubeRecorder(client corev1client.EventInterface, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder { + return NewKubeRecorderWithOptions(client, record.CorrelatorOptions{}, sourceComponentName, involvedObjectRef) +} + +// upstreamRecorder is an implementation of Recorder interface. +type upstreamRecorder struct { + client corev1client.EventInterface + clientCtx context.Context + component string + broadcaster record.EventBroadcaster + eventRecorder record.EventRecorder + involvedObjectRef *corev1.ObjectReference + options record.CorrelatorOptions + + // shuttingDown indicates that the broadcaster for this recorder is being shut down + shuttingDown bool + shutdownMutex sync.RWMutex + + // fallbackRecorder is used when the kube recorder is shutting down + // in that case we create the events directly. + fallbackRecorder Recorder +} + +func (r *upstreamRecorder) WithContext(ctx context.Context) Recorder { + r.clientCtx = ctx + return r +} + +// RecommendedClusterSingletonCorrelatorOptions provides recommended event correlator options for components that produce +// many events (like operators). +func RecommendedClusterSingletonCorrelatorOptions() record.CorrelatorOptions { + return record.CorrelatorOptions{ + BurstSize: 60, // default: 25 (change allows a single source to send 50 events about object per minute) + QPS: 1. / 1., // default: 1/300 (change allows refill rate to 1 new event every 1s) + KeyFunc: func(event *corev1.Event) (aggregateKey string, localKey string) { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + event.Type, + event.Reason, + // By default, KeyFunc don't use message for aggregation, this cause events with different message, but same reason not be lost as "similar events". + event.Message, + }, ""), event.Message + }, + } +} + +var eventsCounterMetric = metrics.NewCounterVec(&metrics.CounterOpts{ + Subsystem: "event_recorder", + Name: "total_events_count", + Help: "Total count of events processed by this event recorder per involved object", + StabilityLevel: metrics.ALPHA, +}, []string{"severity"}) + +func init() { + (&sync.Once{}).Do(func() { + legacyregistry.MustRegister(eventsCounterMetric) + }) +} + +func (r *upstreamRecorder) ForComponent(componentName string) Recorder { + newRecorderForComponent := upstreamRecorder{ + client: r.client, + fallbackRecorder: r.fallbackRecorder.WithComponentSuffix(componentName), + options: r.options, + involvedObjectRef: r.involvedObjectRef, + shuttingDown: r.shuttingDown, + } + + // tweak the event correlator, so we don't loose important events. + broadcaster := record.NewBroadcasterWithCorrelatorOptions(r.options) + broadcaster.StartLogging(klog.Infof) + broadcaster.StartRecordingToSink(&corev1client.EventSinkImpl{Interface: newRecorderForComponent.client}) + + newRecorderForComponent.eventRecorder = broadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: componentName}) + newRecorderForComponent.broadcaster = broadcaster + newRecorderForComponent.component = componentName + + return &newRecorderForComponent +} + +func (r *upstreamRecorder) Shutdown() { + r.shutdownMutex.Lock() + r.shuttingDown = true + r.shutdownMutex.Unlock() + // Wait for broadcaster to flush events (this is blocking) + // TODO: There is still race condition in upstream that might cause panic() on events recorded after the shutdown + // is called as the event recording is not-blocking (go routine based). + r.broadcaster.Shutdown() +} + +func (r *upstreamRecorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +func (r *upstreamRecorder) ComponentName() string { + return r.component +} + +// Eventf emits the normal type event and allow formatting of message. +func (r *upstreamRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Warningf emits the warning type event and allow formatting of message. +func (r *upstreamRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} + +func (r *upstreamRecorder) incrementEventsCounter(severity string) { + if r.involvedObjectRef == nil { + return + } + eventsCounterMetric.WithLabelValues(severity).Inc() +} + +// Event emits the normal type event. +func (r *upstreamRecorder) Event(reason, message string) { + r.shutdownMutex.RLock() + defer r.shutdownMutex.RUnlock() + defer r.incrementEventsCounter(corev1.EventTypeNormal) + if r.shuttingDown { + r.fallbackRecorder.Event(reason, message) + return + } + r.eventRecorder.Event(r.involvedObjectRef, corev1.EventTypeNormal, reason, message) +} + +// Warning emits the warning type event. +func (r *upstreamRecorder) Warning(reason, message string) { + r.shutdownMutex.RLock() + defer r.shutdownMutex.RUnlock() + defer r.incrementEventsCounter(corev1.EventTypeWarning) + if r.shuttingDown { + r.fallbackRecorder.Warning(reason, message) + return + } + r.eventRecorder.Event(r.involvedObjectRef, corev1.EventTypeWarning, reason, message) +} diff --git a/vendor/github.com/rs/xid/.appveyor.yml b/vendor/github.com/rs/xid/.appveyor.yml new file mode 100644 index 000000000..c73bb33ba --- /dev/null +++ b/vendor/github.com/rs/xid/.appveyor.yml @@ -0,0 +1,27 @@ +version: 1.0.0.{build} + +platform: x64 + +branches: + only: + - master + +clone_folder: c:\gopath\src\github.com\rs\xid + +environment: + GOPATH: c:\gopath + +install: + - echo %PATH% + - echo %GOPATH% + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version + - go env + - go get -t . + +build_script: + - go build + +test_script: + - go test + diff --git a/vendor/github.com/rs/xid/.travis.yml b/vendor/github.com/rs/xid/.travis.yml new file mode 100644 index 000000000..b37da1594 --- /dev/null +++ b/vendor/github.com/rs/xid/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: +- "1.9" +- "1.10" +- "master" +matrix: + allow_failures: + - go: "master" diff --git a/vendor/github.com/rs/xid/LICENSE b/vendor/github.com/rs/xid/LICENSE new file mode 100644 index 000000000..47c5e9d2d --- /dev/null +++ b/vendor/github.com/rs/xid/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2015 Olivier Poitrey + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is furnished +to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/rs/xid/README.md b/vendor/github.com/rs/xid/README.md new file mode 100644 index 000000000..5bf462e83 --- /dev/null +++ b/vendor/github.com/rs/xid/README.md @@ -0,0 +1,116 @@ +# Globally Unique ID Generator + +[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/xid) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/xid/master/LICENSE) [![Build Status](https://travis-ci.org/rs/xid.svg?branch=master)](https://travis-ci.org/rs/xid) [![Coverage](http://gocover.io/_badge/github.com/rs/xid)](http://gocover.io/github.com/rs/xid) + +Package xid is a globally unique id generator library, ready to safely be used directly in your server code. + +Xid uses the Mongo Object ID algorithm to generate globally unique ids with a different serialization (base64) to make it shorter when transported as a string: +https://docs.mongodb.org/manual/reference/object-id/ + +- 4-byte value representing the seconds since the Unix epoch, +- 3-byte machine identifier, +- 2-byte process id, and +- 3-byte counter, starting with a random value. + +The binary representation of the id is compatible with Mongo 12 bytes Object IDs. +The string representation is using base32 hex (w/o padding) for better space efficiency +when stored in that form (20 bytes). The hex variant of base32 is used to retain the +sortable property of the id. + +Xid doesn't use base64 because case sensitivity and the 2 non alphanum chars may be an +issue when transported as a string between various systems. Base36 wasn't retained either +because 1/ it's not standard 2/ the resulting size is not predictable (not bit aligned) +and 3/ it would not remain sortable. To validate a base32 `xid`, expect a 20 chars long, +all lowercase sequence of `a` to `v` letters and `0` to `9` numbers (`[0-9a-v]{20}`). + +UUIDs are 16 bytes (128 bits) and 36 chars as string representation. Twitter Snowflake +ids are 8 bytes (64 bits) but require machine/data-center configuration and/or central +generator servers. xid stands in between with 12 bytes (96 bits) and a more compact +URL-safe string representation (20 chars). No configuration or central generator server +is required so it can be used directly in server's code. + +| Name | Binary Size | String Size | Features +|-------------|-------------|----------------|---------------- +| [UUID] | 16 bytes | 36 chars | configuration free, not sortable +| [shortuuid] | 16 bytes | 22 chars | configuration free, not sortable +| [Snowflake] | 8 bytes | up to 20 chars | needs machine/DC configuration, needs central server, sortable +| [MongoID] | 12 bytes | 24 chars | configuration free, sortable +| xid | 12 bytes | 20 chars | configuration free, sortable + +[UUID]: https://en.wikipedia.org/wiki/Universally_unique_identifier +[shortuuid]: https://github.com/stochastic-technologies/shortuuid +[Snowflake]: https://blog.twitter.com/2010/announcing-snowflake +[MongoID]: https://docs.mongodb.org/manual/reference/object-id/ + +Features: + +- Size: 12 bytes (96 bits), smaller than UUID, larger than snowflake +- Base32 hex encoded by default (20 chars when transported as printable string, still sortable) +- Non configured, you don't need set a unique machine and/or data center id +- K-ordered +- Embedded time with 1 second precision +- Unicity guaranteed for 16,777,216 (24 bits) unique ids per second and per host/process +- Lock-free (i.e.: unlike UUIDv1 and v2) + +Best used with [zerolog](https://github.com/rs/zerolog)'s +[RequestIDHandler](https://godoc.org/github.com/rs/zerolog/hlog#RequestIDHandler). + +Notes: + +- Xid is dependent on the system time, a monotonic counter and so is not cryptographically secure. If unpredictability of IDs is important, you should not use Xids. It is worth noting that most other UUID-like implementations are also not cryptographically secure. You should use libraries that rely on cryptographically secure sources (like /dev/urandom on unix, crypto/rand in golang), if you want a truly random ID generator. + +References: + +- http://www.slideshare.net/davegardnerisme/unique-id-generation-in-distributed-systems +- https://en.wikipedia.org/wiki/Universally_unique_identifier +- https://blog.twitter.com/2010/announcing-snowflake +- Python port by [Graham Abbott](https://github.com/graham): https://github.com/graham/python_xid +- Scala port by [Egor Kolotaev](https://github.com/kolotaev): https://github.com/kolotaev/ride +- Rust port by [Jérôme Renard](https://github.com/jeromer/): https://github.com/jeromer/libxid +- Ruby port by [Valar](https://github.com/valarpirai/): https://github.com/valarpirai/ruby_xid +- Java port by [0xShamil](https://github.com/0xShamil/): https://github.com/0xShamil/java-xid +- Dart port by [Peter Bwire](https://github.com/pitabwire): https://pub.dev/packages/xid + +## Install + + go get github.com/rs/xid + +## Usage + +```go +guid := xid.New() + +println(guid.String()) +// Output: 9m4e2mr0ui3e8a215n4g +``` + +Get `xid` embedded info: + +```go +guid.Machine() +guid.Pid() +guid.Time() +guid.Counter() +``` + +## Benchmark + +Benchmark against Go [Maxim Bublis](https://github.com/satori)'s [UUID](https://github.com/satori/go.uuid). + +``` +BenchmarkXID 20000000 91.1 ns/op 32 B/op 1 allocs/op +BenchmarkXID-2 20000000 55.9 ns/op 32 B/op 1 allocs/op +BenchmarkXID-4 50000000 32.3 ns/op 32 B/op 1 allocs/op +BenchmarkUUIDv1 10000000 204 ns/op 48 B/op 1 allocs/op +BenchmarkUUIDv1-2 10000000 160 ns/op 48 B/op 1 allocs/op +BenchmarkUUIDv1-4 10000000 195 ns/op 48 B/op 1 allocs/op +BenchmarkUUIDv4 1000000 1503 ns/op 64 B/op 2 allocs/op +BenchmarkUUIDv4-2 1000000 1427 ns/op 64 B/op 2 allocs/op +BenchmarkUUIDv4-4 1000000 1452 ns/op 64 B/op 2 allocs/op +``` + +Note: UUIDv1 requires a global lock, hence the performance degradation as we add more CPUs. + +## Licenses + +All source code is licensed under the [MIT License](https://raw.github.com/rs/xid/master/LICENSE). diff --git a/vendor/github.com/rs/xid/error.go b/vendor/github.com/rs/xid/error.go new file mode 100644 index 000000000..ea2537493 --- /dev/null +++ b/vendor/github.com/rs/xid/error.go @@ -0,0 +1,11 @@ +package xid + +const ( + // ErrInvalidID is returned when trying to unmarshal an invalid ID. + ErrInvalidID strErr = "xid: invalid ID" +) + +// strErr allows declaring errors as constants. +type strErr string + +func (err strErr) Error() string { return string(err) } diff --git a/vendor/github.com/rs/xid/hostid_darwin.go b/vendor/github.com/rs/xid/hostid_darwin.go new file mode 100644 index 000000000..08351ff72 --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_darwin.go @@ -0,0 +1,9 @@ +// +build darwin + +package xid + +import "syscall" + +func readPlatformMachineID() (string, error) { + return syscall.Sysctl("kern.uuid") +} diff --git a/vendor/github.com/rs/xid/hostid_fallback.go b/vendor/github.com/rs/xid/hostid_fallback.go new file mode 100644 index 000000000..7fbd3c004 --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_fallback.go @@ -0,0 +1,9 @@ +// +build !darwin,!linux,!freebsd,!windows + +package xid + +import "errors" + +func readPlatformMachineID() (string, error) { + return "", errors.New("not implemented") +} diff --git a/vendor/github.com/rs/xid/hostid_freebsd.go b/vendor/github.com/rs/xid/hostid_freebsd.go new file mode 100644 index 000000000..be25a039e --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_freebsd.go @@ -0,0 +1,9 @@ +// +build freebsd + +package xid + +import "syscall" + +func readPlatformMachineID() (string, error) { + return syscall.Sysctl("kern.hostuuid") +} diff --git a/vendor/github.com/rs/xid/hostid_linux.go b/vendor/github.com/rs/xid/hostid_linux.go new file mode 100644 index 000000000..837b20436 --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_linux.go @@ -0,0 +1,13 @@ +// +build linux + +package xid + +import "io/ioutil" + +func readPlatformMachineID() (string, error) { + b, err := ioutil.ReadFile("/etc/machine-id") + if err != nil || len(b) == 0 { + b, err = ioutil.ReadFile("/sys/class/dmi/id/product_uuid") + } + return string(b), err +} diff --git a/vendor/github.com/rs/xid/hostid_windows.go b/vendor/github.com/rs/xid/hostid_windows.go new file mode 100644 index 000000000..ec2593ee3 --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_windows.go @@ -0,0 +1,38 @@ +// +build windows + +package xid + +import ( + "fmt" + "syscall" + "unsafe" +) + +func readPlatformMachineID() (string, error) { + // source: https://github.com/shirou/gopsutil/blob/master/host/host_syscall.go + var h syscall.Handle + err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, syscall.StringToUTF16Ptr(`SOFTWARE\Microsoft\Cryptography`), 0, syscall.KEY_READ|syscall.KEY_WOW64_64KEY, &h) + if err != nil { + return "", err + } + defer syscall.RegCloseKey(h) + + const syscallRegBufLen = 74 // len(`{`) + len(`abcdefgh-1234-456789012-123345456671` * 2) + len(`}`) // 2 == bytes/UTF16 + const uuidLen = 36 + + var regBuf [syscallRegBufLen]uint16 + bufLen := uint32(syscallRegBufLen) + var valType uint32 + err = syscall.RegQueryValueEx(h, syscall.StringToUTF16Ptr(`MachineGuid`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen) + if err != nil { + return "", err + } + + hostID := syscall.UTF16ToString(regBuf[:]) + hostIDLen := len(hostID) + if hostIDLen != uuidLen { + return "", fmt.Errorf("HostID incorrect: %q\n", hostID) + } + + return hostID, nil +} diff --git a/vendor/github.com/rs/xid/id.go b/vendor/github.com/rs/xid/id.go new file mode 100644 index 000000000..1f536b415 --- /dev/null +++ b/vendor/github.com/rs/xid/id.go @@ -0,0 +1,392 @@ +// Package xid is a globally unique id generator suited for web scale +// +// Xid is using Mongo Object ID algorithm to generate globally unique ids: +// https://docs.mongodb.org/manual/reference/object-id/ +// +// - 4-byte value representing the seconds since the Unix epoch, +// - 3-byte machine identifier, +// - 2-byte process id, and +// - 3-byte counter, starting with a random value. +// +// The binary representation of the id is compatible with Mongo 12 bytes Object IDs. +// The string representation is using base32 hex (w/o padding) for better space efficiency +// when stored in that form (20 bytes). The hex variant of base32 is used to retain the +// sortable property of the id. +// +// Xid doesn't use base64 because case sensitivity and the 2 non alphanum chars may be an +// issue when transported as a string between various systems. Base36 wasn't retained either +// because 1/ it's not standard 2/ the resulting size is not predictable (not bit aligned) +// and 3/ it would not remain sortable. To validate a base32 `xid`, expect a 20 chars long, +// all lowercase sequence of `a` to `v` letters and `0` to `9` numbers (`[0-9a-v]{20}`). +// +// UUID is 16 bytes (128 bits), snowflake is 8 bytes (64 bits), xid stands in between +// with 12 bytes with a more compact string representation ready for the web and no +// required configuration or central generation server. +// +// Features: +// +// - Size: 12 bytes (96 bits), smaller than UUID, larger than snowflake +// - Base32 hex encoded by default (16 bytes storage when transported as printable string) +// - Non configured, you don't need set a unique machine and/or data center id +// - K-ordered +// - Embedded time with 1 second precision +// - Unicity guaranteed for 16,777,216 (24 bits) unique ids per second and per host/process +// +// Best used with xlog's RequestIDHandler (https://godoc.org/github.com/rs/xlog#RequestIDHandler). +// +// References: +// +// - http://www.slideshare.net/davegardnerisme/unique-id-generation-in-distributed-systems +// - https://en.wikipedia.org/wiki/Universally_unique_identifier +// - https://blog.twitter.com/2010/announcing-snowflake +package xid + +import ( + "bytes" + "crypto/md5" + "crypto/rand" + "database/sql/driver" + "encoding/binary" + "fmt" + "hash/crc32" + "io/ioutil" + "os" + "sort" + "sync/atomic" + "time" + "unsafe" +) + +// Code inspired from mgo/bson ObjectId + +// ID represents a unique request id +type ID [rawLen]byte + +const ( + encodedLen = 20 // string encoded len + rawLen = 12 // binary raw len + + // encoding stores a custom version of the base32 encoding with lower case + // letters. + encoding = "0123456789abcdefghijklmnopqrstuv" +) + +var ( + // objectIDCounter is atomically incremented when generating a new ObjectId + // using NewObjectId() function. It's used as a counter part of an id. + // This id is initialized with a random value. + objectIDCounter = randInt() + + // machineId stores machine id generated once and used in subsequent calls + // to NewObjectId function. + machineID = readMachineID() + + // pid stores the current process id + pid = os.Getpid() + + nilID ID + + // dec is the decoding map for base32 encoding + dec [256]byte +) + +func init() { + for i := 0; i < len(dec); i++ { + dec[i] = 0xFF + } + for i := 0; i < len(encoding); i++ { + dec[encoding[i]] = byte(i) + } + + // If /proc/self/cpuset exists and is not /, we can assume that we are in a + // form of container and use the content of cpuset xor-ed with the PID in + // order get a reasonable machine global unique PID. + b, err := ioutil.ReadFile("/proc/self/cpuset") + if err == nil && len(b) > 1 { + pid ^= int(crc32.ChecksumIEEE(b)) + } +} + +// readMachineId generates machine id and puts it into the machineId global +// variable. If this function fails to get the hostname, it will cause +// a runtime error. +func readMachineID() []byte { + id := make([]byte, 3) + hid, err := readPlatformMachineID() + if err != nil || len(hid) == 0 { + hid, err = os.Hostname() + } + if err == nil && len(hid) != 0 { + hw := md5.New() + hw.Write([]byte(hid)) + copy(id, hw.Sum(nil)) + } else { + // Fallback to rand number if machine id can't be gathered + if _, randErr := rand.Reader.Read(id); randErr != nil { + panic(fmt.Errorf("xid: cannot get hostname nor generate a random number: %v; %v", err, randErr)) + } + } + return id +} + +// randInt generates a random uint32 +func randInt() uint32 { + b := make([]byte, 3) + if _, err := rand.Reader.Read(b); err != nil { + panic(fmt.Errorf("xid: cannot generate random number: %v;", err)) + } + return uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]) +} + +// New generates a globally unique ID +func New() ID { + return NewWithTime(time.Now()) +} + +// NewWithTime generates a globally unique ID with the passed in time +func NewWithTime(t time.Time) ID { + var id ID + // Timestamp, 4 bytes, big endian + binary.BigEndian.PutUint32(id[:], uint32(t.Unix())) + // Machine, first 3 bytes of md5(hostname) + id[4] = machineID[0] + id[5] = machineID[1] + id[6] = machineID[2] + // Pid, 2 bytes, specs don't specify endianness, but we use big endian. + id[7] = byte(pid >> 8) + id[8] = byte(pid) + // Increment, 3 bytes, big endian + i := atomic.AddUint32(&objectIDCounter, 1) + id[9] = byte(i >> 16) + id[10] = byte(i >> 8) + id[11] = byte(i) + return id +} + +// FromString reads an ID from its string representation +func FromString(id string) (ID, error) { + i := &ID{} + err := i.UnmarshalText([]byte(id)) + return *i, err +} + +// String returns a base32 hex lowercased with no padding representation of the id (char set is 0-9, a-v). +func (id ID) String() string { + text := make([]byte, encodedLen) + encode(text, id[:]) + return *(*string)(unsafe.Pointer(&text)) +} + +// Encode encodes the id using base32 encoding, writing 20 bytes to dst and return it. +func (id ID) Encode(dst []byte) []byte { + encode(dst, id[:]) + return dst +} + +// MarshalText implements encoding/text TextMarshaler interface +func (id ID) MarshalText() ([]byte, error) { + text := make([]byte, encodedLen) + encode(text, id[:]) + return text, nil +} + +// MarshalJSON implements encoding/json Marshaler interface +func (id ID) MarshalJSON() ([]byte, error) { + if id.IsNil() { + return []byte("null"), nil + } + text := make([]byte, encodedLen+2) + encode(text[1:encodedLen+1], id[:]) + text[0], text[encodedLen+1] = '"', '"' + return text, nil +} + +// encode by unrolling the stdlib base32 algorithm + removing all safe checks +func encode(dst, id []byte) { + _ = dst[19] + _ = id[11] + + dst[19] = encoding[(id[11]<<4)&0x1F] + dst[18] = encoding[(id[11]>>1)&0x1F] + dst[17] = encoding[(id[11]>>6)&0x1F|(id[10]<<2)&0x1F] + dst[16] = encoding[id[10]>>3] + dst[15] = encoding[id[9]&0x1F] + dst[14] = encoding[(id[9]>>5)|(id[8]<<3)&0x1F] + dst[13] = encoding[(id[8]>>2)&0x1F] + dst[12] = encoding[id[8]>>7|(id[7]<<1)&0x1F] + dst[11] = encoding[(id[7]>>4)&0x1F|(id[6]<<4)&0x1F] + dst[10] = encoding[(id[6]>>1)&0x1F] + dst[9] = encoding[(id[6]>>6)&0x1F|(id[5]<<2)&0x1F] + dst[8] = encoding[id[5]>>3] + dst[7] = encoding[id[4]&0x1F] + dst[6] = encoding[id[4]>>5|(id[3]<<3)&0x1F] + dst[5] = encoding[(id[3]>>2)&0x1F] + dst[4] = encoding[id[3]>>7|(id[2]<<1)&0x1F] + dst[3] = encoding[(id[2]>>4)&0x1F|(id[1]<<4)&0x1F] + dst[2] = encoding[(id[1]>>1)&0x1F] + dst[1] = encoding[(id[1]>>6)&0x1F|(id[0]<<2)&0x1F] + dst[0] = encoding[id[0]>>3] +} + +// UnmarshalText implements encoding/text TextUnmarshaler interface +func (id *ID) UnmarshalText(text []byte) error { + if len(text) != encodedLen { + return ErrInvalidID + } + for _, c := range text { + if dec[c] == 0xFF { + return ErrInvalidID + } + } + if !decode(id, text) { + return ErrInvalidID + } + return nil +} + +// UnmarshalJSON implements encoding/json Unmarshaler interface +func (id *ID) UnmarshalJSON(b []byte) error { + s := string(b) + if s == "null" { + *id = nilID + return nil + } + // Check the slice length to prevent panic on passing it to UnmarshalText() + if len(b) < 2 { + return ErrInvalidID + } + return id.UnmarshalText(b[1 : len(b)-1]) +} + +// decode by unrolling the stdlib base32 algorithm + customized safe check. +func decode(id *ID, src []byte) bool { + _ = src[19] + _ = id[11] + + id[11] = dec[src[17]]<<6 | dec[src[18]]<<1 | dec[src[19]]>>4 + id[10] = dec[src[16]]<<3 | dec[src[17]]>>2 + id[9] = dec[src[14]]<<5 | dec[src[15]] + id[8] = dec[src[12]]<<7 | dec[src[13]]<<2 | dec[src[14]]>>3 + id[7] = dec[src[11]]<<4 | dec[src[12]]>>1 + id[6] = dec[src[9]]<<6 | dec[src[10]]<<1 | dec[src[11]]>>4 + id[5] = dec[src[8]]<<3 | dec[src[9]]>>2 + id[4] = dec[src[6]]<<5 | dec[src[7]] + id[3] = dec[src[4]]<<7 | dec[src[5]]<<2 | dec[src[6]]>>3 + id[2] = dec[src[3]]<<4 | dec[src[4]]>>1 + id[1] = dec[src[1]]<<6 | dec[src[2]]<<1 | dec[src[3]]>>4 + id[0] = dec[src[0]]<<3 | dec[src[1]]>>2 + + // Validate that there are no discarer bits (padding) in src that would + // cause the string-encoded id not to equal src. + var check [4]byte + + check[3] = encoding[(id[11]<<4)&0x1F] + check[2] = encoding[(id[11]>>1)&0x1F] + check[1] = encoding[(id[11]>>6)&0x1F|(id[10]<<2)&0x1F] + check[0] = encoding[id[10]>>3] + return bytes.Equal([]byte(src[16:20]), check[:]) +} + +// Time returns the timestamp part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Time() time.Time { + // First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch. + secs := int64(binary.BigEndian.Uint32(id[0:4])) + return time.Unix(secs, 0) +} + +// Machine returns the 3-byte machine id part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Machine() []byte { + return id[4:7] +} + +// Pid returns the process id part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Pid() uint16 { + return binary.BigEndian.Uint16(id[7:9]) +} + +// Counter returns the incrementing value part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Counter() int32 { + b := id[9:12] + // Counter is stored as big-endian 3-byte value + return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2])) +} + +// Value implements the driver.Valuer interface. +func (id ID) Value() (driver.Value, error) { + if id.IsNil() { + return nil, nil + } + b, err := id.MarshalText() + return string(b), err +} + +// Scan implements the sql.Scanner interface. +func (id *ID) Scan(value interface{}) (err error) { + switch val := value.(type) { + case string: + return id.UnmarshalText([]byte(val)) + case []byte: + return id.UnmarshalText(val) + case nil: + *id = nilID + return nil + default: + return fmt.Errorf("xid: scanning unsupported type: %T", value) + } +} + +// IsNil Returns true if this is a "nil" ID +func (id ID) IsNil() bool { + return id == nilID +} + +// NilID returns a zero value for `xid.ID`. +func NilID() ID { + return nilID +} + +// Bytes returns the byte array representation of `ID` +func (id ID) Bytes() []byte { + return id[:] +} + +// FromBytes convert the byte array representation of `ID` back to `ID` +func FromBytes(b []byte) (ID, error) { + var id ID + if len(b) != rawLen { + return id, ErrInvalidID + } + copy(id[:], b) + return id, nil +} + +// Compare returns an integer comparing two IDs. It behaves just like `bytes.Compare`. +// The result will be 0 if two IDs are identical, -1 if current id is less than the other one, +// and 1 if current id is greater than the other. +func (id ID) Compare(other ID) int { + return bytes.Compare(id[:], other[:]) +} + +type sorter []ID + +func (s sorter) Len() int { + return len(s) +} + +func (s sorter) Less(i, j int) bool { + return s[i].Compare(s[j]) < 0 +} + +func (s sorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Sort sorts an array of IDs inplace. +// It works by wrapping `[]ID` and use `sort.Sort`. +func Sort(ids []ID) { + sort.Sort(sorter(ids)) +} diff --git a/vendor/golang.org/x/net/internal/socks/client.go b/vendor/golang.org/x/net/internal/socks/client.go new file mode 100644 index 000000000..3d6f516a5 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socks/client.go @@ -0,0 +1,168 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" + "time" +) + +var ( + noDeadline = time.Time{} + aLongTimeAgo = time.Unix(1, 0) +) + +func (d *Dialer) connect(ctx context.Context, c net.Conn, address string) (_ net.Addr, ctxErr error) { + host, port, err := splitHostPort(address) + if err != nil { + return nil, err + } + if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() { + c.SetDeadline(deadline) + defer c.SetDeadline(noDeadline) + } + if ctx != context.Background() { + errCh := make(chan error, 1) + done := make(chan struct{}) + defer func() { + close(done) + if ctxErr == nil { + ctxErr = <-errCh + } + }() + go func() { + select { + case <-ctx.Done(): + c.SetDeadline(aLongTimeAgo) + errCh <- ctx.Err() + case <-done: + errCh <- nil + } + }() + } + + b := make([]byte, 0, 6+len(host)) // the size here is just an estimate + b = append(b, Version5) + if len(d.AuthMethods) == 0 || d.Authenticate == nil { + b = append(b, 1, byte(AuthMethodNotRequired)) + } else { + ams := d.AuthMethods + if len(ams) > 255 { + return nil, errors.New("too many authentication methods") + } + b = append(b, byte(len(ams))) + for _, am := range ams { + b = append(b, byte(am)) + } + } + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:2]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + am := AuthMethod(b[1]) + if am == AuthMethodNoAcceptableMethods { + return nil, errors.New("no acceptable authentication methods") + } + if d.Authenticate != nil { + if ctxErr = d.Authenticate(ctx, c, am); ctxErr != nil { + return + } + } + + b = b[:0] + b = append(b, Version5, byte(d.cmd), 0) + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + b = append(b, AddrTypeIPv4) + b = append(b, ip4...) + } else if ip6 := ip.To16(); ip6 != nil { + b = append(b, AddrTypeIPv6) + b = append(b, ip6...) + } else { + return nil, errors.New("unknown address type") + } + } else { + if len(host) > 255 { + return nil, errors.New("FQDN too long") + } + b = append(b, AddrTypeFQDN) + b = append(b, byte(len(host))) + b = append(b, host...) + } + b = append(b, byte(port>>8), byte(port)) + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:4]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + if cmdErr := Reply(b[1]); cmdErr != StatusSucceeded { + return nil, errors.New("unknown error " + cmdErr.String()) + } + if b[2] != 0 { + return nil, errors.New("non-zero reserved field") + } + l := 2 + var a Addr + switch b[3] { + case AddrTypeIPv4: + l += net.IPv4len + a.IP = make(net.IP, net.IPv4len) + case AddrTypeIPv6: + l += net.IPv6len + a.IP = make(net.IP, net.IPv6len) + case AddrTypeFQDN: + if _, err := io.ReadFull(c, b[:1]); err != nil { + return nil, err + } + l += int(b[0]) + default: + return nil, errors.New("unknown address type " + strconv.Itoa(int(b[3]))) + } + if cap(b) < l { + b = make([]byte, l) + } else { + b = b[:l] + } + if _, ctxErr = io.ReadFull(c, b); ctxErr != nil { + return + } + if a.IP != nil { + copy(a.IP, b) + } else { + a.Name = string(b[:len(b)-2]) + } + a.Port = int(b[len(b)-2])<<8 | int(b[len(b)-1]) + return &a, nil +} + +func splitHostPort(address string) (string, int, error) { + host, port, err := net.SplitHostPort(address) + if err != nil { + return "", 0, err + } + portnum, err := strconv.Atoi(port) + if err != nil { + return "", 0, err + } + if 1 > portnum || portnum > 0xffff { + return "", 0, errors.New("port number out of range " + port) + } + return host, portnum, nil +} diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go new file mode 100644 index 000000000..84fcc32b6 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socks/socks.go @@ -0,0 +1,317 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package socks provides a SOCKS version 5 client implementation. +// +// SOCKS protocol version 5 is defined in RFC 1928. +// Username/Password authentication for SOCKS version 5 is defined in +// RFC 1929. +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" +) + +// A Command represents a SOCKS command. +type Command int + +func (cmd Command) String() string { + switch cmd { + case CmdConnect: + return "socks connect" + case cmdBind: + return "socks bind" + default: + return "socks " + strconv.Itoa(int(cmd)) + } +} + +// An AuthMethod represents a SOCKS authentication method. +type AuthMethod int + +// A Reply represents a SOCKS command reply code. +type Reply int + +func (code Reply) String() string { + switch code { + case StatusSucceeded: + return "succeeded" + case 0x01: + return "general SOCKS server failure" + case 0x02: + return "connection not allowed by ruleset" + case 0x03: + return "network unreachable" + case 0x04: + return "host unreachable" + case 0x05: + return "connection refused" + case 0x06: + return "TTL expired" + case 0x07: + return "command not supported" + case 0x08: + return "address type not supported" + default: + return "unknown code: " + strconv.Itoa(int(code)) + } +} + +// Wire protocol constants. +const ( + Version5 = 0x05 + + AddrTypeIPv4 = 0x01 + AddrTypeFQDN = 0x03 + AddrTypeIPv6 = 0x04 + + CmdConnect Command = 0x01 // establishes an active-open forward proxy connection + cmdBind Command = 0x02 // establishes a passive-open forward proxy connection + + AuthMethodNotRequired AuthMethod = 0x00 // no authentication required + AuthMethodUsernamePassword AuthMethod = 0x02 // use username/password + AuthMethodNoAcceptableMethods AuthMethod = 0xff // no acceptable authentication methods + + StatusSucceeded Reply = 0x00 +) + +// An Addr represents a SOCKS-specific address. +// Either Name or IP is used exclusively. +type Addr struct { + Name string // fully-qualified domain name + IP net.IP + Port int +} + +func (a *Addr) Network() string { return "socks" } + +func (a *Addr) String() string { + if a == nil { + return "" + } + port := strconv.Itoa(a.Port) + if a.IP == nil { + return net.JoinHostPort(a.Name, port) + } + return net.JoinHostPort(a.IP.String(), port) +} + +// A Conn represents a forward proxy connection. +type Conn struct { + net.Conn + + boundAddr net.Addr +} + +// BoundAddr returns the address assigned by the proxy server for +// connecting to the command target address from the proxy server. +func (c *Conn) BoundAddr() net.Addr { + if c == nil { + return nil + } + return c.boundAddr +} + +// A Dialer holds SOCKS-specific options. +type Dialer struct { + cmd Command // either CmdConnect or cmdBind + proxyNetwork string // network between a proxy server and a client + proxyAddress string // proxy server address + + // ProxyDial specifies the optional dial function for + // establishing the transport connection. + ProxyDial func(context.Context, string, string) (net.Conn, error) + + // AuthMethods specifies the list of request authentication + // methods. + // If empty, SOCKS client requests only AuthMethodNotRequired. + AuthMethods []AuthMethod + + // Authenticate specifies the optional authentication + // function. It must be non-nil when AuthMethods is not empty. + // It must return an error when the authentication is failed. + Authenticate func(context.Context, io.ReadWriter, AuthMethod) error +} + +// DialContext connects to the provided address on the provided +// network. +// +// The returned error value may be a net.OpError. When the Op field of +// net.OpError contains "socks", the Source field contains a proxy +// server address and the Addr field contains a command target +// address. +// +// See func Dial of the net package of standard library for a +// description of the network and address parameters. +func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if ctx == nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} + } + var err error + var c net.Conn + if d.ProxyDial != nil { + c, err = d.ProxyDial(ctx, d.proxyNetwork, d.proxyAddress) + } else { + var dd net.Dialer + c, err = dd.DialContext(ctx, d.proxyNetwork, d.proxyAddress) + } + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + a, err := d.connect(ctx, c, address) + if err != nil { + c.Close() + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return &Conn{Conn: c, boundAddr: a}, nil +} + +// DialWithConn initiates a connection from SOCKS server to the target +// network and address using the connection c that is already +// connected to the SOCKS server. +// +// It returns the connection's local address assigned by the SOCKS +// server. +func (d *Dialer) DialWithConn(ctx context.Context, c net.Conn, network, address string) (net.Addr, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if ctx == nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} + } + a, err := d.connect(ctx, c, address) + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return a, nil +} + +// Dial connects to the provided address on the provided network. +// +// Unlike DialContext, it returns a raw transport connection instead +// of a forward proxy connection. +// +// Deprecated: Use DialContext or DialWithConn instead. +func (d *Dialer) Dial(network, address string) (net.Conn, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + var err error + var c net.Conn + if d.ProxyDial != nil { + c, err = d.ProxyDial(context.Background(), d.proxyNetwork, d.proxyAddress) + } else { + c, err = net.Dial(d.proxyNetwork, d.proxyAddress) + } + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if _, err := d.DialWithConn(context.Background(), c, network, address); err != nil { + c.Close() + return nil, err + } + return c, nil +} + +func (d *Dialer) validateTarget(network, address string) error { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return errors.New("network not implemented") + } + switch d.cmd { + case CmdConnect, cmdBind: + default: + return errors.New("command not implemented") + } + return nil +} + +func (d *Dialer) pathAddrs(address string) (proxy, dst net.Addr, err error) { + for i, s := range []string{d.proxyAddress, address} { + host, port, err := splitHostPort(s) + if err != nil { + return nil, nil, err + } + a := &Addr{Port: port} + a.IP = net.ParseIP(host) + if a.IP == nil { + a.Name = host + } + if i == 0 { + proxy = a + } else { + dst = a + } + } + return +} + +// NewDialer returns a new Dialer that dials through the provided +// proxy server's network and address. +func NewDialer(network, address string) *Dialer { + return &Dialer{proxyNetwork: network, proxyAddress: address, cmd: CmdConnect} +} + +const ( + authUsernamePasswordVersion = 0x01 + authStatusSucceeded = 0x00 +) + +// UsernamePassword are the credentials for the username/password +// authentication method. +type UsernamePassword struct { + Username string + Password string +} + +// Authenticate authenticates a pair of username and password with the +// proxy server. +func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, auth AuthMethod) error { + switch auth { + case AuthMethodNotRequired: + return nil + case AuthMethodUsernamePassword: + if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) > 255 { + return errors.New("invalid username/password") + } + b := []byte{authUsernamePasswordVersion} + b = append(b, byte(len(up.Username))) + b = append(b, up.Username...) + b = append(b, byte(len(up.Password))) + b = append(b, up.Password...) + // TODO(mikio): handle IO deadlines and cancelation if + // necessary + if _, err := rw.Write(b); err != nil { + return err + } + if _, err := io.ReadFull(rw, b[:2]); err != nil { + return err + } + if b[0] != authUsernamePasswordVersion { + return errors.New("invalid username/password version") + } + if b[1] != authStatusSucceeded { + return errors.New("username/password authentication failed") + } + return nil + } + return errors.New("unsupported authentication method " + strconv.Itoa(int(auth))) +} diff --git a/vendor/golang.org/x/net/proxy/dial.go b/vendor/golang.org/x/net/proxy/dial.go new file mode 100644 index 000000000..811c2e4e9 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/dial.go @@ -0,0 +1,54 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" +) + +// A ContextDialer dials using a context. +type ContextDialer interface { + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +// Dial works like DialContext on net.Dialer but using a dialer returned by FromEnvironment. +// +// The passed ctx is only used for returning the Conn, not the lifetime of the Conn. +// +// Custom dialers (registered via RegisterDialerType) that do not implement ContextDialer +// can leak a goroutine for as long as it takes the underlying Dialer implementation to timeout. +// +// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. +func Dial(ctx context.Context, network, address string) (net.Conn, error) { + d := FromEnvironment() + if xd, ok := d.(ContextDialer); ok { + return xd.DialContext(ctx, network, address) + } + return dialContext(ctx, d, network, address) +} + +// WARNING: this can leak a goroutine for as long as the underlying Dialer implementation takes to timeout +// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. +func dialContext(ctx context.Context, d Dialer, network, address string) (net.Conn, error) { + var ( + conn net.Conn + done = make(chan struct{}, 1) + err error + ) + go func() { + conn, err = d.Dial(network, address) + close(done) + if conn != nil && ctx.Err() != nil { + conn.Close() + } + }() + select { + case <-ctx.Done(): + err = ctx.Err() + case <-done: + } + return conn, err +} diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go new file mode 100644 index 000000000..3d66bdef9 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/direct.go @@ -0,0 +1,31 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" +) + +type direct struct{} + +// Direct implements Dialer by making network connections directly using net.Dial or net.DialContext. +var Direct = direct{} + +var ( + _ Dialer = Direct + _ ContextDialer = Direct +) + +// Dial directly invokes net.Dial with the supplied parameters. +func (direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} + +// DialContext instantiates a net.Dialer and invokes its DialContext receiver with the supplied parameters. +func (direct) DialContext(ctx context.Context, network, addr string) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, network, addr) +} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go new file mode 100644 index 000000000..573fe79e8 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -0,0 +1,155 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" + "strings" +) + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type PerHost struct { + def, bypass Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func NewPerHost(defaultDialer, bypass Dialer) *PerHost { + return &PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +// DialContext connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + d := p.dialerForRequest(host) + if x, ok := d.(ContextDialer); ok { + return x.DialContext(ctx, network, addr) + } + return dialContext(ctx, d, network, addr) +} + +func (p *PerHost) dialerForRequest(host string) Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go new file mode 100644 index 000000000..9ff4b9a77 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/proxy.go @@ -0,0 +1,149 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proxy provides support for a variety of protocols to proxy network +// data. +package proxy // import "golang.org/x/net/proxy" + +import ( + "errors" + "net" + "net/url" + "os" + "sync" +) + +// A Dialer is a means to establish a connection. +// Custom dialers should also implement ContextDialer. +type Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy-related +// variables in the environment and makes underlying connections +// directly. +func FromEnvironment() Dialer { + return FromEnvironmentUsing(Direct) +} + +// FromEnvironmentUsing returns the dialer specify by the proxy-related +// variables in the environment and makes underlying connections +// using the provided forwarding Dialer (for instance, a *net.Dialer +// with desired configuration). +func FromEnvironmentUsing(forward Dialer) Dialer { + allProxy := allProxyEnv.Get() + if len(allProxy) == 0 { + return forward + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return forward + } + proxy, err := FromURL(proxyURL, forward) + if err != nil { + return forward + } + + noProxy := noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := NewPerHost(proxy, forward) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) { + if proxySchemes == nil { + proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error)) + } + proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func FromURL(u *url.URL, forward Dialer) (Dialer, error) { + var auth *Auth + if u.User != nil { + auth = new(Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5", "socks5h": + addr := u.Hostname() + port := u.Port() + if port == "" { + port = "1080" + } + return SOCKS5("tcp", net.JoinHostPort(addr, port), auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxySchemes != nil { + if f, ok := proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + allProxyEnv = &envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + noProxyEnv = &envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type envOnce struct { + names []string + once sync.Once + val string +} + +func (e *envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// reset is used by tests +func (e *envOnce) reset() { + e.once = sync.Once{} + e.val = "" +} diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go new file mode 100644 index 000000000..c91651f96 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/socks5.go @@ -0,0 +1,42 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" + + "golang.org/x/net/internal/socks" +) + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given +// address with an optional username and password. +// See RFC 1928 and RFC 1929. +func SOCKS5(network, address string, auth *Auth, forward Dialer) (Dialer, error) { + d := socks.NewDialer(network, address) + if forward != nil { + if f, ok := forward.(ContextDialer); ok { + d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { + return f.DialContext(ctx, network, address) + } + } else { + d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { + return dialContext(ctx, forward, network, address) + } + } + } + if auth != nil { + up := socks.UsernamePassword{ + Username: auth.User, + Password: auth.Password, + } + d.AuthMethods = []socks.AuthMethod{ + socks.AuthMethodNotRequired, + socks.AuthMethodUsernamePassword, + } + d.Authenticate = up.Authenticate + } + return d, nil +} diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go new file mode 100644 index 000000000..30f632c57 --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -0,0 +1,136 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semaphore provides a weighted semaphore implementation. +package semaphore // import "golang.org/x/sync/semaphore" + +import ( + "container/list" + "context" + "sync" +) + +type waiter struct { + n int64 + ready chan<- struct{} // Closed when semaphore acquired. +} + +// NewWeighted creates a new weighted semaphore with the given +// maximum combined weight for concurrent access. +func NewWeighted(n int64) *Weighted { + w := &Weighted{size: n} + return w +} + +// Weighted provides a way to bound concurrent access to a resource. +// The callers can request access with a given weight. +type Weighted struct { + size int64 + cur int64 + mu sync.Mutex + waiters list.List +} + +// Acquire acquires the semaphore with a weight of n, blocking until resources +// are available or ctx is done. On success, returns nil. On failure, returns +// ctx.Err() and leaves the semaphore unchanged. +// +// If ctx is already done, Acquire may still succeed without blocking. +func (s *Weighted) Acquire(ctx context.Context, n int64) error { + s.mu.Lock() + if s.size-s.cur >= n && s.waiters.Len() == 0 { + s.cur += n + s.mu.Unlock() + return nil + } + + if n > s.size { + // Don't make other Acquire calls block on one that's doomed to fail. + s.mu.Unlock() + <-ctx.Done() + return ctx.Err() + } + + ready := make(chan struct{}) + w := waiter{n: n, ready: ready} + elem := s.waiters.PushBack(w) + s.mu.Unlock() + + select { + case <-ctx.Done(): + err := ctx.Err() + s.mu.Lock() + select { + case <-ready: + // Acquired the semaphore after we were canceled. Rather than trying to + // fix up the queue, just pretend we didn't notice the cancelation. + err = nil + default: + isFront := s.waiters.Front() == elem + s.waiters.Remove(elem) + // If we're at the front and there're extra tokens left, notify other waiters. + if isFront && s.size > s.cur { + s.notifyWaiters() + } + } + s.mu.Unlock() + return err + + case <-ready: + return nil + } +} + +// TryAcquire acquires the semaphore with a weight of n without blocking. +// On success, returns true. On failure, returns false and leaves the semaphore unchanged. +func (s *Weighted) TryAcquire(n int64) bool { + s.mu.Lock() + success := s.size-s.cur >= n && s.waiters.Len() == 0 + if success { + s.cur += n + } + s.mu.Unlock() + return success +} + +// Release releases the semaphore with a weight of n. +func (s *Weighted) Release(n int64) { + s.mu.Lock() + s.cur -= n + if s.cur < 0 { + s.mu.Unlock() + panic("semaphore: released more than held") + } + s.notifyWaiters() + s.mu.Unlock() +} + +func (s *Weighted) notifyWaiters() { + for { + next := s.waiters.Front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value.(waiter) + if s.size-s.cur < w.n { + // Not enough tokens for the next waiter. We could keep going (to try to + // find a waiter with a smaller request), but under load that could cause + // starvation for large requests; instead, we leave all remaining waiters + // blocked. + // + // Consider a semaphore used as a read-write lock, with N tokens, N + // readers, and one writer. Each reader can Acquire(1) to obtain a read + // lock. The writer can Acquire(N) to obtain a write lock, excluding all + // of the readers. If we allow the readers to jump ahead in the queue, + // the writer will starve — there is always one token available for every + // reader. + break + } + + s.cur += w.n + s.waiters.Remove(next) + close(w.ready) + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 9e40d5b4f..a25ed314b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -26,12 +26,34 @@ github.com/beorn7/perks/quantile # github.com/blang/semver/v4 v4.0.0 ## explicit; go 1.14 github.com/blang/semver/v4 +# github.com/bwmarrin/snowflake v0.3.0 +## explicit +github.com/bwmarrin/snowflake # github.com/cenkalti/backoff/v4 v4.2.1 ## explicit; go 1.18 github.com/cenkalti/backoff/v4 # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 +# github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995 +## explicit; go 1.18 +github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 +# github.com/cloudevents/sdk-go/v2 v2.15.2 +## explicit; go 1.18 +github.com/cloudevents/sdk-go/v2 +github.com/cloudevents/sdk-go/v2/binding +github.com/cloudevents/sdk-go/v2/binding/format +github.com/cloudevents/sdk-go/v2/binding/spec +github.com/cloudevents/sdk-go/v2/client +github.com/cloudevents/sdk-go/v2/context +github.com/cloudevents/sdk-go/v2/event +github.com/cloudevents/sdk-go/v2/event/datacodec +github.com/cloudevents/sdk-go/v2/event/datacodec/json +github.com/cloudevents/sdk-go/v2/event/datacodec/text +github.com/cloudevents/sdk-go/v2/event/datacodec/xml +github.com/cloudevents/sdk-go/v2/protocol +github.com/cloudevents/sdk-go/v2/protocol/http +github.com/cloudevents/sdk-go/v2/types # github.com/coreos/go-semver v0.3.1 ## explicit; go 1.8 github.com/coreos/go-semver/semver @@ -45,6 +67,10 @@ github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew +# github.com/eclipse/paho.golang v0.11.0 +## explicit; go 1.15 +github.com/eclipse/paho.golang/packets +github.com/eclipse/paho.golang/paho # github.com/emicklei/go-restful/v3 v3.11.0 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 @@ -107,6 +133,7 @@ github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration +github.com/golang/protobuf/ptypes/empty github.com/golang/protobuf/ptypes/timestamp # github.com/google/cel-go v0.17.7 ## explicit; go 1.18 @@ -153,6 +180,9 @@ github.com/google/gofuzz/bytesource # github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid +# github.com/gorilla/websocket v1.5.1 +## explicit; go 1.20 +github.com/gorilla/websocket # github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 ## explicit github.com/grpc-ecosystem/go-grpc-prometheus @@ -190,6 +220,15 @@ github.com/mitchellh/copystructure # github.com/mitchellh/reflectwalk v1.0.2 ## explicit github.com/mitchellh/reflectwalk +# github.com/mochi-mqtt/server/v2 v2.4.6 +## explicit; go 1.21 +github.com/mochi-mqtt/server/v2 +github.com/mochi-mqtt/server/v2/hooks/auth +github.com/mochi-mqtt/server/v2/hooks/storage +github.com/mochi-mqtt/server/v2/listeners +github.com/mochi-mqtt/server/v2/mempool +github.com/mochi-mqtt/server/v2/packets +github.com/mochi-mqtt/server/v2/system # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd ## explicit github.com/modern-go/concurrent @@ -254,6 +293,7 @@ github.com/openshift/build-machinery-go/scripts ## explicit; go 1.21 github.com/openshift/library-go/pkg/certs github.com/openshift/library-go/pkg/crypto +github.com/openshift/library-go/pkg/operator/events # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors @@ -282,6 +322,9 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util +# github.com/rs/xid v1.4.0 +## explicit; go 1.12 +github.com/rs/xid # github.com/shopspring/decimal v1.3.1 ## explicit; go 1.13 github.com/shopspring/decimal @@ -428,7 +471,9 @@ golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna +golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries +golang.org/x/net/proxy golang.org/x/net/trace golang.org/x/net/websocket # golang.org/x/oauth2 v0.16.0 @@ -437,6 +482,7 @@ golang.org/x/oauth2 golang.org/x/oauth2/internal # golang.org/x/sync v0.6.0 ## explicit; go 1.18 +golang.org/x/sync/semaphore golang.org/x/sync/singleflight # golang.org/x/sys v0.17.0 ## explicit; go 1.18 @@ -1340,13 +1386,36 @@ open-cluster-management.io/api/cluster/v1 open-cluster-management.io/api/cluster/v1alpha1 open-cluster-management.io/api/cluster/v1beta1 open-cluster-management.io/api/cluster/v1beta2 +open-cluster-management.io/api/utils/work/v1/utils open-cluster-management.io/api/utils/work/v1/workapplier +open-cluster-management.io/api/utils/work/v1/workvalidator open-cluster-management.io/api/work/v1 open-cluster-management.io/api/work/v1alpha1 -# open-cluster-management.io/sdk-go v0.13.1-0.20240416030555-aa744f426379 +# open-cluster-management.io/sdk-go v0.13.1-0.20240416062924-20307e6fe090 ## explicit; go 1.21 open-cluster-management.io/sdk-go/pkg/apis/work/v1/applier open-cluster-management.io/sdk-go/pkg/apis/work/v1/builder +open-cluster-management.io/sdk-go/pkg/apis/work/v1/validator +open-cluster-management.io/sdk-go/pkg/cloudevents/generic +open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options +open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc +open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1 +open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol +open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt +open-cluster-management.io/sdk-go/pkg/cloudevents/generic/payload +open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types +open-cluster-management.io/sdk-go/pkg/cloudevents/work +open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client +open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec +open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/handler +open-cluster-management.io/sdk-go/pkg/cloudevents/work/common +open-cluster-management.io/sdk-go/pkg/cloudevents/work/internal +open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload +open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client +open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/codec +open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/handler +open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils +open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher open-cluster-management.io/sdk-go/pkg/patcher # sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 ## explicit; go 1.20 diff --git a/vendor/open-cluster-management.io/api/utils/work/v1/utils/utils.go b/vendor/open-cluster-management.io/api/utils/work/v1/utils/utils.go new file mode 100644 index 000000000..32b4557c1 --- /dev/null +++ b/vendor/open-cluster-management.io/api/utils/work/v1/utils/utils.go @@ -0,0 +1,76 @@ +package utils + +import ( + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/scheme" + + workv1 "open-cluster-management.io/api/work/v1" +) + +var genericScheme = runtime.NewScheme() + +// BuildResourceMeta builds manifest resource meta for the object +func BuildResourceMeta( + index int, + object runtime.Object, + restMapper meta.RESTMapper) (workv1.ManifestResourceMeta, schema.GroupVersionResource, error) { + resourceMeta := workv1.ManifestResourceMeta{ + Ordinal: int32(index), + } + + if object == nil || reflect.ValueOf(object).IsNil() { + return resourceMeta, schema.GroupVersionResource{}, nil + } + + // set gvk + gvk, err := GuessObjectGroupVersionKind(object) + if err != nil { + return resourceMeta, schema.GroupVersionResource{}, err + } + resourceMeta.Group = gvk.Group + resourceMeta.Version = gvk.Version + resourceMeta.Kind = gvk.Kind + + // set namespace/name + if accessor, e := meta.Accessor(object); e != nil { + err = fmt.Errorf("cannot access metadata of %v: %w", object, e) + } else { + resourceMeta.Namespace = accessor.GetNamespace() + resourceMeta.Name = accessor.GetName() + } + + // set resource + if restMapper == nil { + return resourceMeta, schema.GroupVersionResource{}, err + } + mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return resourceMeta, schema.GroupVersionResource{}, fmt.Errorf("the server doesn't have a resource type %q", gvk.Kind) + } + + resourceMeta.Resource = mapping.Resource.Resource + return resourceMeta, mapping.Resource, err +} + +// GuessObjectGroupVersionKind returns GVK for the passed runtime object. +func GuessObjectGroupVersionKind(object runtime.Object) (*schema.GroupVersionKind, error) { + if gvk := object.GetObjectKind().GroupVersionKind(); len(gvk.Kind) > 0 { + return &gvk, nil + } + + if kinds, _, _ := scheme.Scheme.ObjectKinds(object); len(kinds) > 0 { + return &kinds[0], nil + } + + // otherwise fall back to genericScheme + if kinds, _, _ := genericScheme.ObjectKinds(object); len(kinds) > 0 { + return &kinds[0], nil + } + + return nil, fmt.Errorf("cannot get gvk of %v", object) +} diff --git a/vendor/open-cluster-management.io/api/utils/work/v1/workvalidator/validator.go b/vendor/open-cluster-management.io/api/utils/work/v1/workvalidator/validator.go new file mode 100644 index 000000000..04559069c --- /dev/null +++ b/vendor/open-cluster-management.io/api/utils/work/v1/workvalidator/validator.go @@ -0,0 +1,64 @@ +package workvalidator + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + workv1 "open-cluster-management.io/api/work/v1" +) + +type Validator struct { + limit int +} + +var ManifestValidator = &Validator{limit: 500 * 1024} // the default manifest limit is 500k. + +func (m *Validator) WithLimit(limit int) { + m.limit = limit +} + +func (m *Validator) ValidateManifests(manifests []workv1.Manifest) error { + if len(manifests) == 0 { + return errors.NewBadRequest("Workload manifests should not be empty") + } + + totalSize := 0 + for _, manifest := range manifests { + totalSize = totalSize + manifest.Size() + } + + if totalSize > m.limit { + return fmt.Errorf("the size of manifests is %v bytes which exceeds the %v limit", totalSize, m.limit) + } + + for _, manifest := range manifests { + err := validateManifest(manifest.Raw) + if err != nil { + return err + } + } + + return nil +} + +func validateManifest(manifest []byte) error { + // If the manifest cannot be decoded, return err + unstructuredObj := &unstructured.Unstructured{} + err := unstructuredObj.UnmarshalJSON(manifest) + if err != nil { + return err + } + + // The object must have name specified, generateName is not allowed in manifestwork + if unstructuredObj.GetName() == "" { + return fmt.Errorf("name must be set in manifest") + } + + if unstructuredObj.GetGenerateName() != "" { + return fmt.Errorf("generateName must not be set in manifest") + } + + return nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/apis/work/v1/validator/validator.go b/vendor/open-cluster-management.io/sdk-go/pkg/apis/work/v1/validator/validator.go new file mode 100644 index 000000000..4895bf5c2 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/apis/work/v1/validator/validator.go @@ -0,0 +1,64 @@ +package validator + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + workv1 "open-cluster-management.io/api/work/v1" +) + +type Validator struct { + limit int +} + +var ManifestValidator = &Validator{limit: 500 * 1024} // the default manifest limit is 500k. + +func (m *Validator) WithLimit(limit int) { + m.limit = limit +} + +func (m *Validator) ValidateManifests(manifests []workv1.Manifest) error { + if len(manifests) == 0 { + return errors.NewBadRequest("Workload manifests should not be empty") + } + + totalSize := 0 + for _, manifest := range manifests { + totalSize = totalSize + manifest.Size() + } + + if totalSize > m.limit { + return fmt.Errorf("the size of manifests is %v bytes which exceeds the %v limit", totalSize, m.limit) + } + + for _, manifest := range manifests { + err := validateManifest(manifest.Raw) + if err != nil { + return err + } + } + + return nil +} + +func validateManifest(manifest []byte) error { + // If the manifest cannot be decoded, return err + unstructuredObj := &unstructured.Unstructured{} + err := unstructuredObj.UnmarshalJSON(manifest) + if err != nil { + return err + } + + // The object must have name specified, generateName is not allowed in manifestwork + if unstructuredObj.GetName() == "" { + return fmt.Errorf("name must be set in manifest") + } + + if unstructuredObj.GetGenerateName() != "" { + return fmt.Errorf("generateName must not be set in manifest") + } + + return nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/agentclient.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/agentclient.go new file mode 100644 index 000000000..417b31665 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/agentclient.go @@ -0,0 +1,326 @@ +package generic + +import ( + "context" + "fmt" + "strconv" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + "k8s.io/klog/v2" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/payload" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" +) + +// CloudEventAgentClient is a client for an agent to resync/send/receive its resources with cloud events. +// +// An agent is a component that handles the deployment of requested resources on the managed cluster and status report +// to the source. +type CloudEventAgentClient[T ResourceObject] struct { + *baseClient + lister Lister[T] + codecs map[types.CloudEventsDataType]Codec[T] + statusHashGetter StatusHashGetter[T] + agentID string + clusterName string +} + +// NewCloudEventAgentClient returns an instance for CloudEventAgentClient. The following arguments are required to +// create a client. +// - agentOptions provides the clusterName and agentID and the cloudevents clients that are based on different event +// protocols for sending/receiving the cloudevents. +// - lister gets the resources from a cache/store of an agent. +// - statusHashGetter calculates the resource status hash. +// - codecs is list of codecs for encoding/decoding a resource objet/cloudevent to/from a cloudevent/resource objet. +func NewCloudEventAgentClient[T ResourceObject]( + ctx context.Context, + agentOptions *options.CloudEventsAgentOptions, + lister Lister[T], + statusHashGetter StatusHashGetter[T], + codecs ...Codec[T], +) (*CloudEventAgentClient[T], error) { + baseClient := &baseClient{ + cloudEventsOptions: agentOptions.CloudEventsOptions, + cloudEventsRateLimiter: NewRateLimiter(agentOptions.EventRateLimit), + reconnectedChan: make(chan struct{}), + } + + if err := baseClient.connect(ctx); err != nil { + return nil, err + } + + evtCodes := make(map[types.CloudEventsDataType]Codec[T]) + for _, codec := range codecs { + evtCodes[codec.EventDataType()] = codec + } + + return &CloudEventAgentClient[T]{ + baseClient: baseClient, + lister: lister, + codecs: evtCodes, + statusHashGetter: statusHashGetter, + agentID: agentOptions.AgentID, + clusterName: agentOptions.ClusterName, + }, nil +} + +// ReconnectedChan returns a chan which indicates the source/agent client is reconnected. +// The source/agent client callers should consider sending a resync request when receiving this signal. +func (c *CloudEventAgentClient[T]) ReconnectedChan() <-chan struct{} { + return c.reconnectedChan +} + +// Resync the resources spec by sending a spec resync request from the current to the given source. +func (c *CloudEventAgentClient[T]) Resync(ctx context.Context, source string) error { + // list the resource objects that are maintained by the current agent with the given source + objs, err := c.lister.List(types.ListOptions{Source: source, ClusterName: c.clusterName}) + if err != nil { + return err + } + + resources := &payload.ResourceVersionList{Versions: make([]payload.ResourceVersion, len(objs))} + for i, obj := range objs { + resourceVersion, err := strconv.ParseInt(obj.GetResourceVersion(), 10, 64) + if err != nil { + return err + } + + resources.Versions[i] = payload.ResourceVersion{ + ResourceID: string(obj.GetUID()), + ResourceVersion: resourceVersion, + } + } + + // only resync the resources whose event data type is registered + for eventDataType := range c.codecs { + eventType := types.CloudEventsType{ + CloudEventsDataType: eventDataType, + SubResource: types.SubResourceSpec, + Action: types.ResyncRequestAction, + } + + evt := types.NewEventBuilder(c.agentID, eventType). + WithOriginalSource(source). + WithClusterName(c.clusterName). + NewEvent() + if err := evt.SetData(cloudevents.ApplicationJSON, resources); err != nil { + return fmt.Errorf("failed to set data to cloud event: %v", err) + } + + if err := c.publish(ctx, evt); err != nil { + return err + } + } + + return nil +} + +// Publish a resource status from an agent to a source. +func (c *CloudEventAgentClient[T]) Publish(ctx context.Context, eventType types.CloudEventsType, obj T) error { + codec, ok := c.codecs[eventType.CloudEventsDataType] + if !ok { + return fmt.Errorf("failed to find a codec for event %s", eventType.CloudEventsDataType) + } + + if eventType.SubResource != types.SubResourceStatus { + return fmt.Errorf("unsupported event eventType %s", eventType) + } + + evt, err := codec.Encode(c.agentID, eventType, obj) + if err != nil { + return err + } + + if err := c.publish(ctx, *evt); err != nil { + return err + } + + return nil +} + +// Subscribe the events that are from the source status resync request or source resource spec request. +// For status resync request, agent publish the current resources status back as response. +// For resource spec request, agent receives resource spec and handles the spec with resource handlers. +func (c *CloudEventAgentClient[T]) Subscribe(ctx context.Context, handlers ...ResourceHandler[T]) { + c.subscribe(ctx, func(ctx context.Context, evt cloudevents.Event) { + c.receive(ctx, evt, handlers...) + }) +} + +func (c *CloudEventAgentClient[T]) receive(ctx context.Context, evt cloudevents.Event, handlers ...ResourceHandler[T]) { + klog.V(4).Infof("Received event:\n%s", evt) + + eventType, err := types.ParseCloudEventsType(evt.Type()) + if err != nil { + klog.Errorf("failed to parse cloud event type %s, %v", evt.Type(), err) + return + } + + if eventType.Action == types.ResyncRequestAction { + if eventType.SubResource != types.SubResourceStatus { + klog.Warningf("unsupported resync event type %s, ignore", eventType) + return + } + + if err := c.respondResyncStatusRequest(ctx, eventType.CloudEventsDataType, evt); err != nil { + klog.Errorf("failed to resync manifestsstatus, %v", err) + } + + return + } + + if eventType.SubResource != types.SubResourceSpec { + klog.Warningf("unsupported event type %s, ignore", eventType) + return + } + + codec, ok := c.codecs[eventType.CloudEventsDataType] + if !ok { + klog.Warningf("failed to find the codec for event %s, ignore", eventType.CloudEventsDataType) + return + } + + obj, err := codec.Decode(&evt) + if err != nil { + klog.Errorf("failed to decode spec, %v", err) + return + } + + action, err := c.specAction(evt.Source(), obj) + if err != nil { + klog.Errorf("failed to generate spec action %s, %v", evt, err) + return + } + + if len(action) == 0 { + // no action is required, ignore + return + } + + for _, handler := range handlers { + if err := handler(action, obj); err != nil { + klog.Errorf("failed to handle spec event %s, %v", evt, err) + } + } +} + +// Upon receiving the status resync event, the agent responds by sending resource status events to the broker as +// follows: +// - If the event payload is empty, the agent returns the status of all resources it maintains. +// - If the event payload is not empty, the agent retrieves the resource with the specified ID and compares the +// received resource status hash with the current resource status hash. If they are not equal, the agent sends the +// resource status message. +func (c *CloudEventAgentClient[T]) respondResyncStatusRequest( + ctx context.Context, eventDataType types.CloudEventsDataType, evt cloudevents.Event) error { + objs, err := c.lister.List(types.ListOptions{ClusterName: c.clusterName, Source: evt.Source()}) + if err != nil { + return err + } + + statusHashes, err := payload.DecodeStatusResyncRequest(evt) + if err != nil { + return err + } + + eventType := types.CloudEventsType{ + CloudEventsDataType: eventDataType, + SubResource: types.SubResourceStatus, + Action: types.ResyncResponseAction, + } + + if len(statusHashes.Hashes) == 0 { + // publish all resources status + for _, obj := range objs { + if err := c.Publish(ctx, eventType, obj); err != nil { + return err + } + } + + return nil + } + + for _, obj := range objs { + lastHash, ok := findStatusHash(string(obj.GetUID()), statusHashes.Hashes) + if !ok { + // ignore the resource that is not on the source, but exists on the agent, wait for the source deleting it + klog.Infof("The resource %s is not found from the source, ignore", obj.GetUID()) + continue + } + + currentHash, err := c.statusHashGetter(obj) + if err != nil { + continue + } + + if currentHash == lastHash { + // the status is not changed, do nothing + continue + } + + if err := c.Publish(ctx, eventType, obj); err != nil { + return err + } + } + + return nil +} + +func (c *CloudEventAgentClient[T]) specAction(source string, obj T) (evt types.ResourceAction, err error) { + objs, err := c.lister.List(types.ListOptions{ClusterName: c.clusterName, Source: source}) + if err != nil { + return evt, err + } + + lastObj, exists := getObj(string(obj.GetUID()), objs) + if !exists { + return types.Added, nil + } + + if !obj.GetDeletionTimestamp().IsZero() { + return types.Deleted, nil + } + + // if both the current and the last object have the resource version "0", then object + // is considered as modified, the message broker guarantees the order of the messages + if obj.GetResourceVersion() == "0" && lastObj.GetResourceVersion() == "0" { + return types.Modified, nil + } + + resourceVersion, err := strconv.ParseInt(obj.GetResourceVersion(), 10, 64) + if err != nil { + return evt, err + } + + lastResourceVersion, err := strconv.ParseInt(lastObj.GetResourceVersion(), 10, 64) + if err != nil { + return evt, err + } + + if resourceVersion <= lastResourceVersion { + return evt, nil + } + + return types.Modified, nil +} + +func getObj[T ResourceObject](resourceID string, objs []T) (obj T, exists bool) { + for _, obj := range objs { + if string(obj.GetUID()) == resourceID { + return obj, true + } + } + + return obj, false +} + +func findStatusHash(id string, hashes []payload.ResourceStatusHash) (string, bool) { + for _, hash := range hashes { + if id == hash.ResourceID { + return hash.StatusHash, true + } + } + + return "", false +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/baseclient.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/baseclient.go new file mode 100644 index 000000000..0ba5c7187 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/baseclient.go @@ -0,0 +1,243 @@ +package generic + +import ( + "context" + "fmt" + "sync" + "time" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog/v2" + "k8s.io/utils/clock" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" +) + +const ( + restartReceiverSignal = iota + stopReceiverSignal +) + +type receiveFn func(ctx context.Context, evt cloudevents.Event) + +type baseClient struct { + sync.RWMutex + cloudEventsOptions options.CloudEventsOptions + cloudEventsProtocol options.CloudEventsProtocol + cloudEventsClient cloudevents.Client + cloudEventsRateLimiter flowcontrol.RateLimiter + receiverChan chan int + reconnectedChan chan struct{} +} + +func (c *baseClient) connect(ctx context.Context) error { + var err error + c.cloudEventsClient, err = c.newCloudEventsClient(ctx) + if err != nil { + return err + } + + // start a go routine to handle cloudevents client connection errors + go func() { + var err error + + // the reconnect backoff will stop at [1,5) min interval. If we don't backoff for 10min, we reset the backoff. + delayFn := wait.Backoff{ + Duration: 5 * time.Second, + Cap: 1 * time.Minute, + Steps: 12, // now a required argument + Factor: 5.0, + Jitter: 1.0, + }.DelayWithReset(&clock.RealClock{}, 10*time.Minute) + cloudEventsClient := c.cloudEventsClient + + for { + if cloudEventsClient == nil { + klog.V(4).Infof("reconnecting the cloudevents client") + + c.cloudEventsClient, err = c.newCloudEventsClient(ctx) + // TODO enhance the cloudevents SKD to avoid wrapping the error type to distinguish the net connection + // errors + if err != nil { + // failed to reconnect, try agin + runtime.HandleError(fmt.Errorf("the cloudevents client reconnect failed, %v", err)) + <-wait.RealTimer(delayFn()).C() + continue + } + + // the cloudevents network connection is back, refresh the current cloudevents client and send the + // receiver restart signal + klog.V(4).Infof("the cloudevents client is reconnected") + c.resetClient(cloudEventsClient) + c.sendReceiverSignal(restartReceiverSignal) + c.sendReconnectedSignal() + } + + select { + case <-ctx.Done(): + if c.receiverChan != nil { + close(c.receiverChan) + } + return + case err, ok := <-c.cloudEventsOptions.ErrorChan(): + if !ok { + // error channel is closed, do nothing + return + } + + runtime.HandleError(fmt.Errorf("the cloudevents client is disconnected, %v", err)) + + // the cloudevents client network connection is closed, send the receiver stop signal, set the current + // client to nil and retry + c.sendReceiverSignal(stopReceiverSignal) + + err = c.cloudEventsProtocol.Close(ctx) + if err != nil { + runtime.HandleError(fmt.Errorf("failed to close the cloudevents protocol, %v", err)) + } + cloudEventsClient = nil + + c.resetClient(cloudEventsClient) + + <-wait.RealTimer(delayFn()).C() + } + } + }() + + return nil +} + +func (c *baseClient) publish(ctx context.Context, evt cloudevents.Event) error { + now := time.Now() + + if err := c.cloudEventsRateLimiter.Wait(ctx); err != nil { + return fmt.Errorf("client rate limiter Wait returned an error: %w", err) + } + + latency := time.Since(now) + if latency > longThrottleLatency { + klog.Warningf(fmt.Sprintf("Waited for %v due to client-side throttling, not priority and fairness, request: %s", + latency, evt)) + } + + sendingCtx, err := c.cloudEventsOptions.WithContext(ctx, evt.Context) + if err != nil { + return err + } + + klog.V(4).Infof("Sent event: %v\n%s", ctx, evt) + + // make sure the current client is the newest + c.RLock() + defer c.RUnlock() + + if c.cloudEventsClient == nil { + return fmt.Errorf("the cloudevents client is not ready") + } + + if result := c.cloudEventsClient.Send(sendingCtx, evt); cloudevents.IsUndelivered(result) { + return fmt.Errorf("failed to send event %s, %v", evt, result) + } + + return nil +} + +func (c *baseClient) subscribe(ctx context.Context, receive receiveFn) { + c.Lock() + defer c.Unlock() + + // make sure there is only one subscription go routine starting for one client. + if c.receiverChan != nil { + klog.Warningf("the subscription has already started") + return + } + + c.receiverChan = make(chan int) + + // start a go routine to handle cloudevents subscription + go func() { + receiverCtx, receiverCancel := context.WithCancel(context.TODO()) + cloudEventsClient := c.cloudEventsClient + + for { + if cloudEventsClient != nil { + go func() { + if err := cloudEventsClient.StartReceiver(receiverCtx, func(evt cloudevents.Event) { + receive(receiverCtx, evt) + }); err != nil { + runtime.HandleError(fmt.Errorf("failed to receive cloudevents, %v", err)) + } + }() + } + + select { + case <-ctx.Done(): + receiverCancel() + return + case signal, ok := <-c.receiverChan: + if !ok { + // receiver channel is closed, stop the receiver + receiverCancel() + return + } + + switch signal { + case restartReceiverSignal: + klog.V(4).Infof("restart the cloudevents receiver") + // make sure the current client is the newest + c.RLock() + cloudEventsClient = c.cloudEventsClient + c.RUnlock() + + // rebuild the receiver context + receiverCtx, receiverCancel = context.WithCancel(context.TODO()) + case stopReceiverSignal: + klog.V(4).Infof("stop the cloudevents receiver") + receiverCancel() + cloudEventsClient = nil + default: + runtime.HandleError(fmt.Errorf("unknown receiver signal %d", signal)) + } + } + } + }() +} + +func (c *baseClient) resetClient(client cloudevents.Client) { + c.Lock() + defer c.Unlock() + + c.cloudEventsClient = client +} + +func (c *baseClient) sendReceiverSignal(signal int) { + c.RLock() + defer c.RUnlock() + + if c.receiverChan != nil { + c.receiverChan <- signal + } +} + +func (c *baseClient) sendReconnectedSignal() { + c.RLock() + defer c.RUnlock() + c.reconnectedChan <- struct{}{} +} + +func (c *baseClient) newCloudEventsClient(ctx context.Context) (cloudevents.Client, error) { + var err error + c.cloudEventsProtocol, err = c.cloudEventsOptions.Protocol(ctx) + if err != nil { + return nil, err + } + c.cloudEventsClient, err = cloudevents.NewClient(c.cloudEventsProtocol) + if err != nil { + return nil, err + } + return c.cloudEventsClient, nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/interface.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/interface.go new file mode 100644 index 000000000..06cb8fd45 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/interface.go @@ -0,0 +1,74 @@ +package generic + +import ( + "context" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubetypes "k8s.io/apimachinery/pkg/types" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" +) + +// ResourceHandler handles the received resource object. +type ResourceHandler[T ResourceObject] func(action types.ResourceAction, obj T) error + +// StatusHashGetter gets the status hash of one resource object. +type StatusHashGetter[T ResourceObject] func(obj T) (string, error) + +type ResourceObject interface { + // GetUID returns the resource ID of this object. The resource ID represents the unique identifier for this object. + // The source should ensure its uniqueness and consistency. + GetUID() kubetypes.UID + + // GetResourceVersion returns the resource version of this object. The resource version is a required int64 sequence + // number property that must be incremented by the source whenever this resource changes. + // The source should guarantee its incremental nature. + GetResourceVersion() string + + // GetDeletionTimestamp returns the deletion timestamp of this object. The deletiontimestamp is an optional + // timestamp property representing the resource is deleting from the source, the agent needs to clean up the + // resource from its cluster. + GetDeletionTimestamp() *metav1.Time +} + +type Lister[T ResourceObject] interface { + // List returns the list of resource objects that are maintained by source/agent. + List(options types.ListOptions) ([]T, error) +} + +type Codec[T ResourceObject] interface { + // EventDataType indicates which type of the event data the codec is used for. + EventDataType() types.CloudEventsDataType + + // Encode a resource object to cloudevents event. + // Each event should have the following extensions: `resourceid`, `resourceversion` and `clustername`. + // The source set the `deletiontimestamp` extension to indicate one resource object is deleting from a source. + // The agent set the `originalsource` extension to indicate one resource belonged to which source. + Encode(source string, eventType types.CloudEventsType, obj T) (*cloudevents.Event, error) + + // Decode a cloudevents event to a resource object. + Decode(event *cloudevents.Event) (T, error) +} + +type CloudEventsClient[T ResourceObject] interface { + // Resync the resources of one source/agent by sending resync request. + // The second parameter is used to specify cluster name/source ID for a source/agent. + // - A source sends the resource status resync request to a cluster with the given cluster name. + // If setting this parameter to `types.ClusterAll`, the source will broadcast the resync request to all clusters. + // - An agent sends the resources spec resync request to a source with the given source ID. + // If setting this parameter to `types.SourceAll`, the agent will broadcast the resync request to all sources. + Resync(context.Context, string) error + + // Publish the resources spec/status event to the broker. + Publish(ctx context.Context, eventType types.CloudEventsType, obj T) error + + // Subscribe the resources status/spec event to the broker to receive the resources status/spec and use + // ResourceHandler to handle them. + Subscribe(ctx context.Context, handlers ...ResourceHandler[T]) + + // ReconnectedChan returns a chan which indicates the source/agent client is reconnected. + // The source/agent client callers should consider sending a resync request when receiving this signal. + ReconnectedChan() <-chan struct{} +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/agentoptions.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/agentoptions.go new file mode 100644 index 000000000..7e94551bd --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/agentoptions.go @@ -0,0 +1,57 @@ +package grpc + +import ( + "context" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" +) + +type grpcAgentOptions struct { + GRPCOptions + errorChan chan error // grpc client connection doesn't have error channel, it will handle reconnecting automatically + clusterName string +} + +func NewAgentOptions(grpcOptions *GRPCOptions, clusterName, agentID string) *options.CloudEventsAgentOptions { + return &options.CloudEventsAgentOptions{ + CloudEventsOptions: &grpcAgentOptions{ + GRPCOptions: *grpcOptions, + errorChan: make(chan error), + clusterName: clusterName, + }, + AgentID: agentID, + ClusterName: clusterName, + } +} + +func (o *grpcAgentOptions) WithContext(ctx context.Context, evtCtx cloudevents.EventContext) (context.Context, error) { + // grpc agent client doesn't need to update topic in the context + return ctx, nil +} + +func (o *grpcAgentOptions) Protocol(ctx context.Context) (options.CloudEventsProtocol, error) { + receiver, err := o.GetCloudEventsProtocol( + ctx, + func(err error) { + o.errorChan <- err + }, + protocol.WithSubscribeOption(&protocol.SubscribeOption{ + // TODO: Update this code to determine the subscription source for the agent client. + // Currently, the grpc agent client is not utilized, and the 'Source' field serves + // as a placeholder with all the sources. + Source: types.SourceAll, + }), + ) + if err != nil { + return nil, err + } + return receiver, nil +} + +func (o *grpcAgentOptions) ErrorChan() <-chan error { + return o.errorChan +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/options.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/options.go new file mode 100644 index 000000000..5f0f8ecd6 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/options.go @@ -0,0 +1,149 @@ +package grpc + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "os" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "gopkg.in/yaml.v2" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol" +) + +// GRPCOptions holds the options that are used to build gRPC client. +type GRPCOptions struct { + URL string + CAFile string + ClientCertFile string + ClientKeyFile string +} + +// GRPCConfig holds the information needed to build connect to gRPC server as a given user. +type GRPCConfig struct { + // URL is the address of the gRPC server (host:port). + URL string `json:"url" yaml:"url"` + // CAFile is the file path to a cert file for the gRPC server certificate authority. + CAFile string `json:"caFile,omitempty" yaml:"caFile,omitempty"` + // ClientCertFile is the file path to a client cert file for TLS. + ClientCertFile string `json:"clientCertFile,omitempty" yaml:"clientCertFile,omitempty"` + // ClientKeyFile is the file path to a client key file for TLS. + ClientKeyFile string `json:"clientKeyFile,omitempty" yaml:"clientKeyFile,omitempty"` +} + +// BuildGRPCOptionsFromFlags builds configs from a config filepath. +func BuildGRPCOptionsFromFlags(configPath string) (*GRPCOptions, error) { + configData, err := os.ReadFile(configPath) + if err != nil { + return nil, err + } + + config := &GRPCConfig{} + if err := yaml.Unmarshal(configData, config); err != nil { + return nil, err + } + + if config.URL == "" { + return nil, fmt.Errorf("url is required") + } + + if (config.ClientCertFile == "" && config.ClientKeyFile != "") || + (config.ClientCertFile != "" && config.ClientKeyFile == "") { + return nil, fmt.Errorf("either both or none of clientCertFile and clientKeyFile must be set") + } + if config.ClientCertFile != "" && config.ClientKeyFile != "" && config.CAFile == "" { + return nil, fmt.Errorf("setting clientCertFile and clientKeyFile requires caFile") + } + + return &GRPCOptions{ + URL: config.URL, + CAFile: config.CAFile, + ClientCertFile: config.ClientCertFile, + ClientKeyFile: config.ClientKeyFile, + }, nil +} + +func NewGRPCOptions() *GRPCOptions { + return &GRPCOptions{} +} + +func (o *GRPCOptions) GetGRPCClientConn() (*grpc.ClientConn, error) { + if len(o.CAFile) != 0 { + certPool, err := x509.SystemCertPool() + if err != nil { + return nil, err + } + + caPEM, err := os.ReadFile(o.CAFile) + if err != nil { + return nil, err + } + + if ok := certPool.AppendCertsFromPEM(caPEM); !ok { + return nil, fmt.Errorf("invalid CA %s", o.CAFile) + } + + clientCerts, err := tls.LoadX509KeyPair(o.ClientCertFile, o.ClientKeyFile) + if err != nil { + return nil, err + } + + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{clientCerts}, + RootCAs: certPool, + MinVersion: tls.VersionTLS13, + MaxVersion: tls.VersionTLS13, + } + + conn, err := grpc.Dial(o.URL, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) + if err != nil { + return nil, fmt.Errorf("failed to connect to grpc server %s, %v", o.URL, err) + } + + return conn, nil + } + + conn, err := grpc.Dial(o.URL, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return nil, fmt.Errorf("failed to connect to grpc server %s, %v", o.URL, err) + } + + return conn, nil +} + +func (o *GRPCOptions) GetCloudEventsProtocol(ctx context.Context, errorHandler func(error), clientOpts ...protocol.Option) (options.CloudEventsProtocol, error) { + conn, err := o.GetGRPCClientConn() + if err != nil { + return nil, err + } + + // Periodically (every 100ms) check the connection status and reconnect if necessary. + go func() { + ticker := time.NewTicker(100 * time.Millisecond) + for { + select { + case <-ctx.Done(): + ticker.Stop() + conn.Close() + case <-ticker.C: + if conn.GetState() == connectivity.TransientFailure { + errorHandler(fmt.Errorf("grpc connection is disconnected")) + ticker.Stop() + conn.Close() + return // exit the goroutine as the error handler function will handle the reconnection. + } + } + } + }() + + opts := []protocol.Option{} + opts = append(opts, clientOpts...) + return protocol.NewProtocol(conn, opts...) +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/README.md b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/README.md new file mode 100644 index 000000000..0bc2eeba6 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/README.md @@ -0,0 +1,30 @@ +# CloudEvent gRPC Protobuf Definitions + +## Overview + +This repository includes the protobuf message and RPC method definitions for CloudEvent gRPC service, along with the corresponding Go code generated from these definitions. + +## Getting Started + +### Prerequisites + +Make sure you have the following tools installed: + +- [Protocol Compiler (protoc)](https://grpc.io/docs/protoc-installation/) +- Go plugins for the protocol compiler: + +```bash +$ go install google.golang.org/protobuf/cmd/protoc-gen-go +$ go install google.golang.org/grpc/cmd/protoc-gen-go-grpc +``` + +### Updating CloudEvent gRPC Service + +1. Modify the `*.proto` files to reflect your desired changes. +2. Run the following command to update the generated code: + + ```bash + go generate + ``` + + This step is crucial to ensure that your changes are applied to both the gRPC server and client stub. diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.pb.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.pb.go new file mode 100644 index 000000000..5cc30a3cb --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.pb.go @@ -0,0 +1,649 @@ +// After making changes to the *.proto files, always run the following +// command in current directory to update the generated code: +// go generate + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.12.4 +// source: cloudevent.proto + +package v1 + +import ( + any1 "github.com/golang/protobuf/ptypes/any" + empty "github.com/golang/protobuf/ptypes/empty" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// CloudEvent is copied from +// https://github.com/cloudevents/spec/blob/main/cloudevents/formats/protobuf-format.md. +type CloudEvent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unique event identifier. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // URI of the event source. + Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` + // Version of the spec in use. + SpecVersion string `protobuf:"bytes,3,opt,name=spec_version,json=specVersion,proto3" json:"spec_version,omitempty"` + // Event type identifier. + Type string `protobuf:"bytes,4,opt,name=type,proto3" json:"type,omitempty"` + // Optional & Extension Attributes + Attributes map[string]*CloudEventAttributeValue `protobuf:"bytes,5,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // CloudEvent Data (Bytes, Text, or Proto) + // + // Types that are assignable to Data: + // + // *CloudEvent_BinaryData + // *CloudEvent_TextData + // *CloudEvent_ProtoData + Data isCloudEvent_Data `protobuf_oneof:"data"` +} + +func (x *CloudEvent) Reset() { + *x = CloudEvent{} + if protoimpl.UnsafeEnabled { + mi := &file_cloudevent_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CloudEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CloudEvent) ProtoMessage() {} + +func (x *CloudEvent) ProtoReflect() protoreflect.Message { + mi := &file_cloudevent_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CloudEvent.ProtoReflect.Descriptor instead. +func (*CloudEvent) Descriptor() ([]byte, []int) { + return file_cloudevent_proto_rawDescGZIP(), []int{0} +} + +func (x *CloudEvent) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *CloudEvent) GetSource() string { + if x != nil { + return x.Source + } + return "" +} + +func (x *CloudEvent) GetSpecVersion() string { + if x != nil { + return x.SpecVersion + } + return "" +} + +func (x *CloudEvent) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *CloudEvent) GetAttributes() map[string]*CloudEventAttributeValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (m *CloudEvent) GetData() isCloudEvent_Data { + if m != nil { + return m.Data + } + return nil +} + +func (x *CloudEvent) GetBinaryData() []byte { + if x, ok := x.GetData().(*CloudEvent_BinaryData); ok { + return x.BinaryData + } + return nil +} + +func (x *CloudEvent) GetTextData() string { + if x, ok := x.GetData().(*CloudEvent_TextData); ok { + return x.TextData + } + return "" +} + +func (x *CloudEvent) GetProtoData() *any1.Any { + if x, ok := x.GetData().(*CloudEvent_ProtoData); ok { + return x.ProtoData + } + return nil +} + +type isCloudEvent_Data interface { + isCloudEvent_Data() +} + +type CloudEvent_BinaryData struct { + // If the event is binary data then the datacontenttype attribute + // should be set to an appropriate media-type. + BinaryData []byte `protobuf:"bytes,6,opt,name=binary_data,json=binaryData,proto3,oneof"` +} + +type CloudEvent_TextData struct { + // If the event is string data then the datacontenttype attribute + // should be set to an appropriate media-type such as application/json. + TextData string `protobuf:"bytes,7,opt,name=text_data,json=textData,proto3,oneof"` +} + +type CloudEvent_ProtoData struct { + // If the event is a protobuf then it must be encoded using this Any + // type. The datacontenttype attribute should be set to + // application/protobuf and the dataschema attribute set to the message + // type. + ProtoData *any1.Any `protobuf:"bytes,8,opt,name=proto_data,json=protoData,proto3,oneof"` +} + +func (*CloudEvent_BinaryData) isCloudEvent_Data() {} + +func (*CloudEvent_TextData) isCloudEvent_Data() {} + +func (*CloudEvent_ProtoData) isCloudEvent_Data() {} + +// CloudEventAttribute enables extensions to use any of the seven allowed +// data types as the value of an envelope key. +type CloudEventAttributeValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The value can be any one of these types. + // + // Types that are assignable to Attr: + // + // *CloudEventAttributeValue_CeBoolean + // *CloudEventAttributeValue_CeInteger + // *CloudEventAttributeValue_CeString + // *CloudEventAttributeValue_CeBytes + // *CloudEventAttributeValue_CeUri + // *CloudEventAttributeValue_CeUriRef + // *CloudEventAttributeValue_CeTimestamp + Attr isCloudEventAttributeValue_Attr `protobuf_oneof:"attr"` +} + +func (x *CloudEventAttributeValue) Reset() { + *x = CloudEventAttributeValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cloudevent_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CloudEventAttributeValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CloudEventAttributeValue) ProtoMessage() {} + +func (x *CloudEventAttributeValue) ProtoReflect() protoreflect.Message { + mi := &file_cloudevent_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CloudEventAttributeValue.ProtoReflect.Descriptor instead. +func (*CloudEventAttributeValue) Descriptor() ([]byte, []int) { + return file_cloudevent_proto_rawDescGZIP(), []int{1} +} + +func (m *CloudEventAttributeValue) GetAttr() isCloudEventAttributeValue_Attr { + if m != nil { + return m.Attr + } + return nil +} + +func (x *CloudEventAttributeValue) GetCeBoolean() bool { + if x, ok := x.GetAttr().(*CloudEventAttributeValue_CeBoolean); ok { + return x.CeBoolean + } + return false +} + +func (x *CloudEventAttributeValue) GetCeInteger() int32 { + if x, ok := x.GetAttr().(*CloudEventAttributeValue_CeInteger); ok { + return x.CeInteger + } + return 0 +} + +func (x *CloudEventAttributeValue) GetCeString() string { + if x, ok := x.GetAttr().(*CloudEventAttributeValue_CeString); ok { + return x.CeString + } + return "" +} + +func (x *CloudEventAttributeValue) GetCeBytes() []byte { + if x, ok := x.GetAttr().(*CloudEventAttributeValue_CeBytes); ok { + return x.CeBytes + } + return nil +} + +func (x *CloudEventAttributeValue) GetCeUri() string { + if x, ok := x.GetAttr().(*CloudEventAttributeValue_CeUri); ok { + return x.CeUri + } + return "" +} + +func (x *CloudEventAttributeValue) GetCeUriRef() string { + if x, ok := x.GetAttr().(*CloudEventAttributeValue_CeUriRef); ok { + return x.CeUriRef + } + return "" +} + +func (x *CloudEventAttributeValue) GetCeTimestamp() *timestamp.Timestamp { + if x, ok := x.GetAttr().(*CloudEventAttributeValue_CeTimestamp); ok { + return x.CeTimestamp + } + return nil +} + +type isCloudEventAttributeValue_Attr interface { + isCloudEventAttributeValue_Attr() +} + +type CloudEventAttributeValue_CeBoolean struct { + // Boolean value. + CeBoolean bool `protobuf:"varint,1,opt,name=ce_boolean,json=ceBoolean,proto3,oneof"` +} + +type CloudEventAttributeValue_CeInteger struct { + // Integer value. + CeInteger int32 `protobuf:"varint,2,opt,name=ce_integer,json=ceInteger,proto3,oneof"` +} + +type CloudEventAttributeValue_CeString struct { + // String value. + CeString string `protobuf:"bytes,3,opt,name=ce_string,json=ceString,proto3,oneof"` +} + +type CloudEventAttributeValue_CeBytes struct { + // Byte string value. + CeBytes []byte `protobuf:"bytes,4,opt,name=ce_bytes,json=ceBytes,proto3,oneof"` +} + +type CloudEventAttributeValue_CeUri struct { + // URI value. + CeUri string `protobuf:"bytes,5,opt,name=ce_uri,json=ceUri,proto3,oneof"` +} + +type CloudEventAttributeValue_CeUriRef struct { + // URI reference value. + CeUriRef string `protobuf:"bytes,6,opt,name=ce_uri_ref,json=ceUriRef,proto3,oneof"` +} + +type CloudEventAttributeValue_CeTimestamp struct { + // Timestamp value. + CeTimestamp *timestamp.Timestamp `protobuf:"bytes,7,opt,name=ce_timestamp,json=ceTimestamp,proto3,oneof"` +} + +func (*CloudEventAttributeValue_CeBoolean) isCloudEventAttributeValue_Attr() {} + +func (*CloudEventAttributeValue_CeInteger) isCloudEventAttributeValue_Attr() {} + +func (*CloudEventAttributeValue_CeString) isCloudEventAttributeValue_Attr() {} + +func (*CloudEventAttributeValue_CeBytes) isCloudEventAttributeValue_Attr() {} + +func (*CloudEventAttributeValue_CeUri) isCloudEventAttributeValue_Attr() {} + +func (*CloudEventAttributeValue_CeUriRef) isCloudEventAttributeValue_Attr() {} + +func (*CloudEventAttributeValue_CeTimestamp) isCloudEventAttributeValue_Attr() {} + +type PublishRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Define the CloudEvent to be published + Event *CloudEvent `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` +} + +func (x *PublishRequest) Reset() { + *x = PublishRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_cloudevent_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishRequest) ProtoMessage() {} + +func (x *PublishRequest) ProtoReflect() protoreflect.Message { + mi := &file_cloudevent_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishRequest.ProtoReflect.Descriptor instead. +func (*PublishRequest) Descriptor() ([]byte, []int) { + return file_cloudevent_proto_rawDescGZIP(), []int{2} +} + +func (x *PublishRequest) GetEvent() *CloudEvent { + if x != nil { + return x.Event + } + return nil +} + +type SubscriptionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The original source of the respond CloudEvent(s). + Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` +} + +func (x *SubscriptionRequest) Reset() { + *x = SubscriptionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_cloudevent_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscriptionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriptionRequest) ProtoMessage() {} + +func (x *SubscriptionRequest) ProtoReflect() protoreflect.Message { + mi := &file_cloudevent_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionRequest.ProtoReflect.Descriptor instead. +func (*SubscriptionRequest) Descriptor() ([]byte, []int) { + return file_cloudevent_proto_rawDescGZIP(), []int{3} +} + +func (x *SubscriptionRequest) GetSource() string { + if x != nil { + return x.Source + } + return "" +} + +var File_cloudevent_proto protoreflect.FileDescriptor + +var file_cloudevent_proto_rawDesc = []byte{ + 0x0a, 0x10, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x11, 0x69, 0x6f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa7, + 0x03, 0x0a, 0x0a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, + 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x70, 0x65, 0x63, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x70, 0x65, + 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x4d, 0x0a, 0x0a, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, + 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0b, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, + 0x48, 0x00, 0x52, 0x0a, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1d, + 0x0a, 0x09, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x48, 0x00, 0x52, 0x08, 0x74, 0x65, 0x78, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x35, 0x0a, + 0x0a, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x44, 0x61, 0x74, 0x61, 0x1a, 0x6a, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x41, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x9a, 0x02, 0x0a, 0x18, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x63, 0x65, 0x5f, 0x62, 0x6f, 0x6f, 0x6c, + 0x65, 0x61, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x63, 0x65, 0x42, + 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x12, 0x1f, 0x0a, 0x0a, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x65, 0x67, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x09, 0x63, 0x65, + 0x49, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x09, 0x63, 0x65, 0x5f, 0x73, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x63, 0x65, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x1b, 0x0a, 0x08, 0x63, 0x65, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x07, 0x63, 0x65, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x17, 0x0a, 0x06, 0x63, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x63, 0x65, 0x55, 0x72, 0x69, 0x12, 0x1e, 0x0a, 0x0a, + 0x63, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x08, 0x63, 0x65, 0x55, 0x72, 0x69, 0x52, 0x65, 0x66, 0x12, 0x3f, 0x0a, 0x0c, + 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, + 0x52, 0x0b, 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x06, 0x0a, + 0x04, 0x61, 0x74, 0x74, 0x72, 0x22, 0x45, 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x2d, 0x0a, 0x13, + 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x32, 0xb3, 0x01, 0x0a, 0x11, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x46, 0x0a, 0x07, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x21, 0x2e, 0x69, + 0x6f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x09, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x26, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x30, + 0x01, 0x42, 0x50, 0x5a, 0x4e, 0x6f, 0x70, 0x65, 0x6e, 0x2d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x2d, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x69, 0x6f, 0x2f, + 0x73, 0x64, 0x6b, 0x2d, 0x67, 0x6f, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_cloudevent_proto_rawDescOnce sync.Once + file_cloudevent_proto_rawDescData = file_cloudevent_proto_rawDesc +) + +func file_cloudevent_proto_rawDescGZIP() []byte { + file_cloudevent_proto_rawDescOnce.Do(func() { + file_cloudevent_proto_rawDescData = protoimpl.X.CompressGZIP(file_cloudevent_proto_rawDescData) + }) + return file_cloudevent_proto_rawDescData +} + +var file_cloudevent_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_cloudevent_proto_goTypes = []interface{}{ + (*CloudEvent)(nil), // 0: io.cloudevents.v1.CloudEvent + (*CloudEventAttributeValue)(nil), // 1: io.cloudevents.v1.CloudEventAttributeValue + (*PublishRequest)(nil), // 2: io.cloudevents.v1.PublishRequest + (*SubscriptionRequest)(nil), // 3: io.cloudevents.v1.SubscriptionRequest + nil, // 4: io.cloudevents.v1.CloudEvent.AttributesEntry + (*any1.Any)(nil), // 5: google.protobuf.Any + (*timestamp.Timestamp)(nil), // 6: google.protobuf.Timestamp + (*empty.Empty)(nil), // 7: google.protobuf.Empty +} +var file_cloudevent_proto_depIdxs = []int32{ + 4, // 0: io.cloudevents.v1.CloudEvent.attributes:type_name -> io.cloudevents.v1.CloudEvent.AttributesEntry + 5, // 1: io.cloudevents.v1.CloudEvent.proto_data:type_name -> google.protobuf.Any + 6, // 2: io.cloudevents.v1.CloudEventAttributeValue.ce_timestamp:type_name -> google.protobuf.Timestamp + 0, // 3: io.cloudevents.v1.PublishRequest.event:type_name -> io.cloudevents.v1.CloudEvent + 1, // 4: io.cloudevents.v1.CloudEvent.AttributesEntry.value:type_name -> io.cloudevents.v1.CloudEventAttributeValue + 2, // 5: io.cloudevents.v1.CloudEventService.Publish:input_type -> io.cloudevents.v1.PublishRequest + 3, // 6: io.cloudevents.v1.CloudEventService.Subscribe:input_type -> io.cloudevents.v1.SubscriptionRequest + 7, // 7: io.cloudevents.v1.CloudEventService.Publish:output_type -> google.protobuf.Empty + 0, // 8: io.cloudevents.v1.CloudEventService.Subscribe:output_type -> io.cloudevents.v1.CloudEvent + 7, // [7:9] is the sub-list for method output_type + 5, // [5:7] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_cloudevent_proto_init() } +func file_cloudevent_proto_init() { + if File_cloudevent_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cloudevent_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CloudEvent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cloudevent_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CloudEventAttributeValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cloudevent_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cloudevent_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cloudevent_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*CloudEvent_BinaryData)(nil), + (*CloudEvent_TextData)(nil), + (*CloudEvent_ProtoData)(nil), + } + file_cloudevent_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*CloudEventAttributeValue_CeBoolean)(nil), + (*CloudEventAttributeValue_CeInteger)(nil), + (*CloudEventAttributeValue_CeString)(nil), + (*CloudEventAttributeValue_CeBytes)(nil), + (*CloudEventAttributeValue_CeUri)(nil), + (*CloudEventAttributeValue_CeUriRef)(nil), + (*CloudEventAttributeValue_CeTimestamp)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cloudevent_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_cloudevent_proto_goTypes, + DependencyIndexes: file_cloudevent_proto_depIdxs, + MessageInfos: file_cloudevent_proto_msgTypes, + }.Build() + File_cloudevent_proto = out.File + file_cloudevent_proto_rawDesc = nil + file_cloudevent_proto_goTypes = nil + file_cloudevent_proto_depIdxs = nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.proto b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.proto new file mode 100644 index 000000000..30cf9c9fd --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent.proto @@ -0,0 +1,81 @@ +// After making changes to the *.proto files, always run the following +// command in current directory to update the generated code: +// go generate + +syntax = "proto3"; + +package io.cloudevents.v1; + +option go_package = "open-cluster-management.io/sdk-go/cloudevents/generic/options/grpc/protobuf/v1"; + +import "google/protobuf/empty.proto"; +import "google/protobuf/any.proto"; +import "google/protobuf/timestamp.proto"; + +// CloudEvent is copied from +// https://github.com/cloudevents/spec/blob/main/cloudevents/formats/protobuf-format.md. +message CloudEvent { + // Unique event identifier. + string id = 1; + // URI of the event source. + string source = 2; + // Version of the spec in use. + string spec_version = 3; + // Event type identifier. + string type = 4; + + // Optional & Extension Attributes + map attributes = 5; + + // CloudEvent Data (Bytes, Text, or Proto) + oneof data { + // If the event is binary data then the datacontenttype attribute + // should be set to an appropriate media-type. + bytes binary_data = 6; + // If the event is string data then the datacontenttype attribute + // should be set to an appropriate media-type such as application/json. + string text_data = 7; + // If the event is a protobuf then it must be encoded using this Any + // type. The datacontenttype attribute should be set to + // application/protobuf and the dataschema attribute set to the message + // type. + google.protobuf.Any proto_data = 8; + } +} + +// CloudEventAttribute enables extensions to use any of the seven allowed +// data types as the value of an envelope key. +message CloudEventAttributeValue { + // The value can be any one of these types. + oneof attr { + // Boolean value. + bool ce_boolean = 1; + // Integer value. + int32 ce_integer = 2; + // String value. + string ce_string = 3; + // Byte string value. + bytes ce_bytes = 4; + // URI value. + string ce_uri = 5; + // URI reference value. + string ce_uri_ref = 6; + // Timestamp value. + google.protobuf.Timestamp ce_timestamp = 7; + } +} + +message PublishRequest { + // Required. Define the CloudEvent to be published + CloudEvent event = 1; +} + +message SubscriptionRequest { + // Required. The original source of the respond CloudEvent(s). + string source = 1; +} + +service CloudEventService { + rpc Publish(PublishRequest) returns (google.protobuf.Empty) {} + rpc Subscribe(SubscriptionRequest) returns (stream CloudEvent) {} +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent_grpc.pb.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent_grpc.pb.go new file mode 100644 index 000000000..79b138153 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/cloudevent_grpc.pb.go @@ -0,0 +1,179 @@ +// After making changes to the *.proto files, always run the following +// command in current directory to update the generated code: +// go generate + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v3.12.4 +// source: cloudevent.proto + +package v1 + +import ( + context "context" + empty "github.com/golang/protobuf/ptypes/empty" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + CloudEventService_Publish_FullMethodName = "/io.cloudevents.v1.CloudEventService/Publish" + CloudEventService_Subscribe_FullMethodName = "/io.cloudevents.v1.CloudEventService/Subscribe" +) + +// CloudEventServiceClient is the client API for CloudEventService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type CloudEventServiceClient interface { + Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*empty.Empty, error) + Subscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (CloudEventService_SubscribeClient, error) +} + +type cloudEventServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewCloudEventServiceClient(cc grpc.ClientConnInterface) CloudEventServiceClient { + return &cloudEventServiceClient{cc} +} + +func (c *cloudEventServiceClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, CloudEventService_Publish_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cloudEventServiceClient) Subscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (CloudEventService_SubscribeClient, error) { + stream, err := c.cc.NewStream(ctx, &CloudEventService_ServiceDesc.Streams[0], CloudEventService_Subscribe_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &cloudEventServiceSubscribeClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type CloudEventService_SubscribeClient interface { + Recv() (*CloudEvent, error) + grpc.ClientStream +} + +type cloudEventServiceSubscribeClient struct { + grpc.ClientStream +} + +func (x *cloudEventServiceSubscribeClient) Recv() (*CloudEvent, error) { + m := new(CloudEvent) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// CloudEventServiceServer is the server API for CloudEventService service. +// All implementations must embed UnimplementedCloudEventServiceServer +// for forward compatibility +type CloudEventServiceServer interface { + Publish(context.Context, *PublishRequest) (*empty.Empty, error) + Subscribe(*SubscriptionRequest, CloudEventService_SubscribeServer) error + mustEmbedUnimplementedCloudEventServiceServer() +} + +// UnimplementedCloudEventServiceServer must be embedded to have forward compatible implementations. +type UnimplementedCloudEventServiceServer struct { +} + +func (UnimplementedCloudEventServiceServer) Publish(context.Context, *PublishRequest) (*empty.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Publish not implemented") +} +func (UnimplementedCloudEventServiceServer) Subscribe(*SubscriptionRequest, CloudEventService_SubscribeServer) error { + return status.Errorf(codes.Unimplemented, "method Subscribe not implemented") +} +func (UnimplementedCloudEventServiceServer) mustEmbedUnimplementedCloudEventServiceServer() {} + +// UnsafeCloudEventServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to CloudEventServiceServer will +// result in compilation errors. +type UnsafeCloudEventServiceServer interface { + mustEmbedUnimplementedCloudEventServiceServer() +} + +func RegisterCloudEventServiceServer(s grpc.ServiceRegistrar, srv CloudEventServiceServer) { + s.RegisterService(&CloudEventService_ServiceDesc, srv) +} + +func _CloudEventService_Publish_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PublishRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CloudEventServiceServer).Publish(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: CloudEventService_Publish_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CloudEventServiceServer).Publish(ctx, req.(*PublishRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CloudEventService_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscriptionRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(CloudEventServiceServer).Subscribe(m, &cloudEventServiceSubscribeServer{stream}) +} + +type CloudEventService_SubscribeServer interface { + Send(*CloudEvent) error + grpc.ServerStream +} + +type cloudEventServiceSubscribeServer struct { + grpc.ServerStream +} + +func (x *cloudEventServiceSubscribeServer) Send(m *CloudEvent) error { + return x.ServerStream.SendMsg(m) +} + +// CloudEventService_ServiceDesc is the grpc.ServiceDesc for CloudEventService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var CloudEventService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "io.cloudevents.v1.CloudEventService", + HandlerType: (*CloudEventServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Publish", + Handler: _CloudEventService_Publish_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Subscribe", + Handler: _CloudEventService_Subscribe_Handler, + ServerStreams: true, + }, + }, + Metadata: "cloudevent.proto", +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/gen.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/gen.go new file mode 100644 index 000000000..f643ef818 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1/gen.go @@ -0,0 +1,3 @@ +package v1 + +//go:generate protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative cloudevent.proto diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/message.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/message.go new file mode 100644 index 000000000..b03e77dc9 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/message.go @@ -0,0 +1,205 @@ +package protocol + +import ( + "bytes" + "context" + "fmt" + "net/url" + "strings" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/types" + + pbv1 "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1" +) + +const ( + prefix = "ce-" + contenttype = "contenttype" + // dataSchema = "dataschema" + subject = "subject" + time = "time" +) + +var specs = spec.WithPrefix(prefix) + +// Message represents a gRPC message. +// This message *can* be read several times safely +type Message struct { + internal *pbv1.CloudEvent + version spec.Version + format format.Format +} + +// Check if Message implements binding.Message +var ( + _ binding.Message = (*Message)(nil) + _ binding.MessageMetadataReader = (*Message)(nil) +) + +func NewMessage(msg *pbv1.CloudEvent) *Message { + var f format.Format + var v spec.Version + if msg.Attributes != nil { + if contentType, ok := msg.Attributes[contenttype]; ok && format.IsFormat(contentType.GetCeString()) { + f = format.Lookup(contentType.GetCeString()) + } else if s := msg.SpecVersion; s != "" { + v = specs.Version(s) + } + } + return &Message{ + internal: msg, + version: v, + format: f, + } +} + +func (m *Message) ReadEncoding() binding.Encoding { + if m.version != nil { + return binding.EncodingBinary + } + if m.format != nil { + return binding.EncodingStructured + } + + return binding.EncodingUnknown +} + +func (m *Message) ReadStructured(ctx context.Context, encoder binding.StructuredWriter) error { + if m.format == nil { + return binding.ErrNotStructured + } + + return encoder.SetStructuredEvent(ctx, m.format, bytes.NewReader(m.internal.GetBinaryData())) +} + +func (m *Message) ReadBinary(ctx context.Context, encoder binding.BinaryWriter) error { + if m.version == nil { + return binding.ErrNotBinary + } + + if m.format != nil { + return binding.ErrNotBinary + } + + if m.internal.SpecVersion != "" { + err := encoder.SetAttribute(m.version.AttributeFromKind(spec.SpecVersion), m.internal.SpecVersion) + if err != nil { + return err + } + } + if m.internal.Id != "" { + err := encoder.SetAttribute(m.version.AttributeFromKind(spec.ID), m.internal.Id) + if err != nil { + return err + } + } + if m.internal.Source != "" { + err := encoder.SetAttribute(m.version.AttributeFromKind(spec.Source), m.internal.Source) + if err != nil { + return err + } + } + if m.internal.Type != "" { + err := encoder.SetAttribute(m.version.AttributeFromKind(spec.Type), m.internal.Type) + if err != nil { + return err + } + } + + for name, value := range m.internal.Attributes { + attrVal, err := valueFrom(value) + if err != nil { + return fmt.Errorf("failed to convert attribute %s: %s", name, err) + } + + if strings.HasPrefix(name, prefix) { + attr := m.version.Attribute(name) + if attr != nil { + err = encoder.SetAttribute(attr, attrVal) + if err != nil { + return err + } + } else { + err = encoder.SetExtension(strings.TrimPrefix(name, prefix), attrVal) + if err != nil { + return err + } + } + } else if name == contenttype { + err = encoder.SetAttribute(m.version.AttributeFromKind(spec.DataContentType), attrVal) + if err != nil { + return err + } + } + } + + if m.internal.GetBinaryData() != nil { + return encoder.SetData(bytes.NewBuffer(m.internal.GetBinaryData())) + } + + return nil +} + +func (m *Message) Finish(error) error { + return nil +} + +func (m *Message) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) { + attr := m.version.AttributeFromKind(k) + if attr != nil { + switch attr.Kind() { + case spec.SpecVersion: + return attr, m.internal.SpecVersion + case spec.Type: + return attr, m.internal.Type + case spec.Source: + return attr, m.internal.Source + case spec.ID: + return attr, m.internal.Id + // case spec.DataContentType: + // return attr, m.internal.Attributes[contenttype].GetCeString() + default: + return attr, m.internal.Attributes[prefix+attr.Name()] + } + } + + return nil, nil +} + +func (m *Message) GetExtension(name string) interface{} { + return m.internal.Attributes[prefix+name] +} + +func valueFrom(attr *pbv1.CloudEventAttributeValue) (interface{}, error) { + var v interface{} + switch vt := attr.Attr.(type) { + case *pbv1.CloudEventAttributeValue_CeBoolean: + v = vt.CeBoolean + case *pbv1.CloudEventAttributeValue_CeInteger: + v = vt.CeInteger + case *pbv1.CloudEventAttributeValue_CeString: + v = vt.CeString + case *pbv1.CloudEventAttributeValue_CeBytes: + v = vt.CeBytes + case *pbv1.CloudEventAttributeValue_CeUri: + uri, err := url.Parse(vt.CeUri) + if err != nil { + return nil, fmt.Errorf("failed to parse URI value %s: %s", vt.CeUri, err.Error()) + } + v = uri + case *pbv1.CloudEventAttributeValue_CeUriRef: + uri, err := url.Parse(vt.CeUriRef) + if err != nil { + return nil, fmt.Errorf("failed to parse URIRef value %s: %s", vt.CeUriRef, err.Error()) + } + v = types.URIRef{URL: *uri} + case *pbv1.CloudEventAttributeValue_CeTimestamp: + v = vt.CeTimestamp.AsTime() + default: + return nil, fmt.Errorf("unsupported attribute type: %T", vt) + } + return types.Validate(v) +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/option.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/option.go new file mode 100644 index 000000000..361b1b5de --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/option.go @@ -0,0 +1,24 @@ +package protocol + +import ( + "fmt" +) + +// Option is the function signature +type Option func(*Protocol) error + +// SubscribeOption +type SubscribeOption struct { + Source string +} + +// WithSubscribeOption sets the Subscribe configuration for the client. +func WithSubscribeOption(subscribeOpt *SubscribeOption) Option { + return func(p *Protocol) error { + if subscribeOpt == nil { + return fmt.Errorf("the subscribe option must not be nil") + } + p.subscribeOption = subscribeOpt + return nil + } +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/protocol.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/protocol.go new file mode 100644 index 000000000..467fc2455 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/protocol.go @@ -0,0 +1,149 @@ +package protocol + +import ( + "context" + "fmt" + "io" + "sync" + + "google.golang.org/grpc" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/protocol" + + pbv1 "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1" +) + +// protocol for grpc +// define protocol for grpc + +type Protocol struct { + client pbv1.CloudEventServiceClient + subscribeOption *SubscribeOption + // receiver + incoming chan *pbv1.CloudEvent + // inOpen + openerMutex sync.Mutex + + closeChan chan struct{} +} + +var ( + _ protocol.Sender = (*Protocol)(nil) + _ protocol.Opener = (*Protocol)(nil) + _ protocol.Receiver = (*Protocol)(nil) + _ protocol.Closer = (*Protocol)(nil) +) + +// new create grpc protocol +func NewProtocol(clientConn grpc.ClientConnInterface, opts ...Option) (*Protocol, error) { + if clientConn == nil { + return nil, fmt.Errorf("the client connection must not be nil") + } + + // TODO: support clientID and error handling in grpc connection + p := &Protocol{ + client: pbv1.NewCloudEventServiceClient(clientConn), + // subClient: + incoming: make(chan *pbv1.CloudEvent), + closeChan: make(chan struct{}), + } + + if err := p.applyOptions(opts...); err != nil { + return nil, err + } + + return p, nil +} + +func (p *Protocol) applyOptions(opts ...Option) error { + for _, fn := range opts { + if err := fn(p); err != nil { + return err + } + } + return nil +} + +func (p *Protocol) Send(ctx context.Context, m binding.Message, transformers ...binding.Transformer) error { + var err error + defer func() { + err = m.Finish(err) + }() + + msg := &pbv1.CloudEvent{} + err = WritePBMessage(ctx, m, msg, transformers...) + if err != nil { + return err + } + + logger := cecontext.LoggerFrom(ctx) + logger.Infof("publishing event with id: %v", msg.Id) + _, err = p.client.Publish(ctx, &pbv1.PublishRequest{ + Event: msg, + }) + if err != nil { + return err + } + return err +} + +func (p *Protocol) OpenInbound(ctx context.Context) error { + if p.subscribeOption == nil { + return fmt.Errorf("the subscribe option must not be nil") + } + + if len(p.subscribeOption.Source) == 0 { + return fmt.Errorf("the subscribe option source must not be empty") + } + + p.openerMutex.Lock() + defer p.openerMutex.Unlock() + + logger := cecontext.LoggerFrom(ctx) + subClient, err := p.client.Subscribe(ctx, &pbv1.SubscriptionRequest{ + Source: p.subscribeOption.Source, + }) + if err != nil { + return err + } + + logger.Infof("subscribing events for: %v", p.subscribeOption.Source) + go func() { + for { + msg, err := subClient.Recv() + if err != nil { + return + } + p.incoming <- msg + } + }() + + // Wait until external or internal context done + select { + case <-ctx.Done(): + case <-p.closeChan: + } + + return nil +} + +// Receive implements Receiver.Receive +func (p *Protocol) Receive(ctx context.Context) (binding.Message, error) { + select { + case m, ok := <-p.incoming: + if !ok { + return nil, io.EOF + } + msg := NewMessage(m) + return msg, nil + case <-ctx.Done(): + return nil, io.EOF + } +} + +func (p *Protocol) Close(ctx context.Context) error { + close(p.closeChan) + return nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/write_message.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/write_message.go new file mode 100644 index 000000000..1c6149344 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol/write_message.go @@ -0,0 +1,215 @@ +package protocol + +import ( + "bytes" + "context" + "fmt" + "io" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/types" + "google.golang.org/protobuf/types/known/timestamppb" + + pbv1 "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protobuf/v1" +) + +// WritePBMessage fills the provided pubMessage with the message m. +// Using context you can tweak the encoding processing (more details on binding.Write documentation). +func WritePBMessage(ctx context.Context, m binding.Message, pbEvt *pbv1.CloudEvent, transformers ...binding.Transformer) error { + structuredWriter := (*pbEventWriter)(pbEvt) + binaryWriter := (*pbEventWriter)(pbEvt) + + _, err := binding.Write( + ctx, + m, + structuredWriter, + binaryWriter, + transformers..., + ) + return err +} + +type pbEventWriter pbv1.CloudEvent + +var ( + _ binding.StructuredWriter = (*pbEventWriter)(nil) + _ binding.BinaryWriter = (*pbEventWriter)(nil) +) + +func (b *pbEventWriter) SetStructuredEvent(ctx context.Context, f format.Format, event io.Reader) error { + if b.Attributes == nil { + b.Attributes = make(map[string]*pbv1.CloudEventAttributeValue) + } + + b.Attributes[contenttype], _ = attributeFor(f.MediaType()) + + var buf bytes.Buffer + _, err := io.Copy(&buf, event) + if err != nil { + return err + } + + // TODO: check the data content type and set the right data format + b.Data = &pbv1.CloudEvent_BinaryData{ + BinaryData: buf.Bytes(), + } + + return nil +} + +func (b *pbEventWriter) Start(ctx context.Context) error { + if b.Attributes == nil { + b.Attributes = make(map[string]*pbv1.CloudEventAttributeValue) + } + + return nil +} + +func (b *pbEventWriter) End(ctx context.Context) error { + return nil +} + +func (b *pbEventWriter) SetData(reader io.Reader) error { + buf, ok := reader.(*bytes.Buffer) + if !ok { + buf = new(bytes.Buffer) + _, err := io.Copy(buf, reader) + if err != nil { + return err + } + } + + b.Data = &pbv1.CloudEvent_BinaryData{ + BinaryData: buf.Bytes(), + } + + return nil +} + +func (b *pbEventWriter) SetAttribute(attribute spec.Attribute, value interface{}) error { + switch attribute.Kind() { + case spec.SpecVersion: + val, ok := value.(string) + if !ok { + return fmt.Errorf("invalid SpecVersion type, expected string got %T", value) + } + b.SpecVersion = val + case spec.ID: + val, ok := value.(string) + if !ok { + return fmt.Errorf("invalid ID type, expected string got %T", value) + } + b.Id = val + case spec.Source: + val, ok := value.(string) + if !ok { + return fmt.Errorf("invalid Source type, expected string got %T", value) + } + b.Source = val + case spec.Type: + val, ok := value.(string) + if !ok { + return fmt.Errorf("invalid Type type, expected string got %T", value) + } + b.Type = val + case spec.DataContentType: + if value == nil { + delete(b.Attributes, contenttype) + } else { + attrVal, err := attributeFor(value) + if err != nil { + return err + } + b.Attributes[contenttype] = attrVal + } + case spec.Subject: + if value == nil { + delete(b.Attributes, prefix+subject) + } else { + attrVal, err := attributeFor(value) + if err != nil { + return err + } + b.Attributes[prefix+subject] = attrVal + } + case spec.Time: + if value == nil { + delete(b.Attributes, prefix+time) + } else { + attrVal, err := attributeFor(value) + if err != nil { + return err + } + b.Attributes[prefix+time] = attrVal + } + default: + if value == nil { + delete(b.Attributes, prefix+attribute.Name()) + } else { + attrVal, err := attributeFor(value) + if err != nil { + return err + } + b.Attributes[prefix+attribute.Name()] = attrVal + } + } + + return nil +} + +func (b *pbEventWriter) SetExtension(name string, value interface{}) error { + if value == nil { + delete(b.Attributes, prefix+name) + } else { + attrVal, err := attributeFor(value) + if err != nil { + return err + } + b.Attributes[prefix+name] = attrVal + } + + return nil +} + +func attributeFor(v interface{}) (*pbv1.CloudEventAttributeValue, error) { + vv, err := types.Validate(v) + if err != nil { + return nil, err + } + attr := &pbv1.CloudEventAttributeValue{} + switch vt := vv.(type) { + case bool: + attr.Attr = &pbv1.CloudEventAttributeValue_CeBoolean{ + CeBoolean: vt, + } + case int32: + attr.Attr = &pbv1.CloudEventAttributeValue_CeInteger{ + CeInteger: vt, + } + case string: + attr.Attr = &pbv1.CloudEventAttributeValue_CeString{ + CeString: vt, + } + case []byte: + attr.Attr = &pbv1.CloudEventAttributeValue_CeBytes{ + CeBytes: vt, + } + case types.URI: + attr.Attr = &pbv1.CloudEventAttributeValue_CeUri{ + CeUri: vt.String(), + } + case types.URIRef: + attr.Attr = &pbv1.CloudEventAttributeValue_CeUriRef{ + CeUriRef: vt.String(), + } + case types.Timestamp: + attr.Attr = &pbv1.CloudEventAttributeValue_CeTimestamp{ + CeTimestamp: timestamppb.New(vt.Time), + } + default: + return nil, fmt.Errorf("unsupported attribute type: %T", v) + } + return attr, nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/sourceoptions.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/sourceoptions.go new file mode 100644 index 000000000..1d71cfe7d --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/sourceoptions.go @@ -0,0 +1,52 @@ +package grpc + +import ( + "context" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc/protocol" +) + +type gRPCSourceOptions struct { + GRPCOptions + errorChan chan error // grpc client connection doesn't have error channel, it will handle reconnecting automatically + sourceID string +} + +func NewSourceOptions(gRPCOptions *GRPCOptions, sourceID string) *options.CloudEventsSourceOptions { + return &options.CloudEventsSourceOptions{ + CloudEventsOptions: &gRPCSourceOptions{ + GRPCOptions: *gRPCOptions, + errorChan: make(chan error), + sourceID: sourceID, + }, + SourceID: sourceID, + } +} + +func (o *gRPCSourceOptions) WithContext(ctx context.Context, evtCtx cloudevents.EventContext) (context.Context, error) { + // grpc source client doesn't need to update topic in the context + return ctx, nil +} + +func (o *gRPCSourceOptions) Protocol(ctx context.Context) (options.CloudEventsProtocol, error) { + receiver, err := o.GetCloudEventsProtocol( + ctx, + func(err error) { + o.errorChan <- err + }, + protocol.WithSubscribeOption(&protocol.SubscribeOption{ + Source: o.sourceID, + }), + ) + if err != nil { + return nil, err + } + return receiver, nil +} + +func (o *gRPCSourceOptions) ErrorChan() <-chan error { + return o.errorChan +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/agentoptions.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/agentoptions.go new file mode 100644 index 000000000..9d1ef702e --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/agentoptions.go @@ -0,0 +1,113 @@ +package mqtt + +import ( + "context" + "fmt" + "strings" + + cloudeventsmqtt "github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2" + cloudevents "github.com/cloudevents/sdk-go/v2" + cloudeventscontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/eclipse/paho.golang/paho" + "k8s.io/klog/v2" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" +) + +type mqttAgentOptions struct { + MQTTOptions + errorChan chan error + clusterName string + agentID string +} + +func NewAgentOptions(mqttOptions *MQTTOptions, clusterName, agentID string) *options.CloudEventsAgentOptions { + mqttAgentOptions := &mqttAgentOptions{ + MQTTOptions: *mqttOptions, + errorChan: make(chan error), + clusterName: clusterName, + agentID: agentID, + } + + return &options.CloudEventsAgentOptions{ + CloudEventsOptions: mqttAgentOptions, + AgentID: mqttAgentOptions.agentID, + ClusterName: mqttAgentOptions.clusterName, + } +} + +func (o *mqttAgentOptions) WithContext(ctx context.Context, evtCtx cloudevents.EventContext) (context.Context, error) { + topic, err := getAgentPubTopic(ctx) + if err != nil { + return nil, err + } + + if topic != nil { + return cloudeventscontext.WithTopic(ctx, string(*topic)), nil + } + + eventType, err := types.ParseCloudEventsType(evtCtx.GetType()) + if err != nil { + return nil, fmt.Errorf("unsupported event type %s, %v", eventType, err) + } + + originalSource, err := evtCtx.GetExtension(types.ExtensionOriginalSource) + if err != nil { + return nil, err + } + + // agent request to sync resource spec from all sources + if eventType.Action == types.ResyncRequestAction && originalSource == types.SourceAll { + if len(o.Topics.AgentBroadcast) == 0 { + klog.Warningf("the agent broadcast topic not set, fall back to the agent events topic") + + // TODO after supporting multiple sources, we should list each source + eventsTopic := replaceLast(o.Topics.AgentEvents, "+", o.clusterName) + return cloudeventscontext.WithTopic(ctx, eventsTopic), nil + } + + resyncTopic := strings.Replace(o.Topics.AgentBroadcast, "+", o.clusterName, 1) + return cloudeventscontext.WithTopic(ctx, resyncTopic), nil + } + + topicSource, err := getSourceFromEventsTopic(o.Topics.AgentEvents) + if err != nil { + return nil, err + } + + // agent publishes status events or spec resync events + eventsTopic := replaceLast(o.Topics.AgentEvents, "+", o.clusterName) + eventsTopic = replaceLast(eventsTopic, "+", topicSource) + return cloudeventscontext.WithTopic(ctx, eventsTopic), nil +} + +func (o *mqttAgentOptions) Protocol(ctx context.Context) (options.CloudEventsProtocol, error) { + subscribe := &paho.Subscribe{ + Subscriptions: map[string]paho.SubscribeOptions{ + // TODO support multiple sources, currently the client require the source events topic has a sourceID, in + // the future, client may need a source list, it will subscribe to each source + // receiving the sources events + replaceLast(o.Topics.SourceEvents, "+", o.clusterName): {QoS: byte(o.SubQoS)}, + }, + } + + if len(o.Topics.SourceBroadcast) != 0 { + // receiving status resync events from all sources + subscribe.Subscriptions[o.Topics.SourceBroadcast] = paho.SubscribeOptions{QoS: byte(o.SubQoS)} + } + + return o.GetCloudEventsProtocol( + ctx, + fmt.Sprintf("%s-client", o.agentID), + func(err error) { + o.errorChan <- err + }, + cloudeventsmqtt.WithPublish(&paho.Publish{QoS: byte(o.PubQoS)}), + cloudeventsmqtt.WithSubscribe(subscribe), + ) +} + +func (o *mqttAgentOptions) ErrorChan() <-chan error { + return o.errorChan +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/options.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/options.go new file mode 100644 index 000000000..cfce3004c --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/options.go @@ -0,0 +1,321 @@ +package mqtt + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "os" + "regexp" + "strings" + "time" + + cloudeventsmqtt "github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2" + "github.com/eclipse/paho.golang/packets" + "github.com/eclipse/paho.golang/paho" + "gopkg.in/yaml.v2" + "k8s.io/apimachinery/pkg/util/errors" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" +) + +type TopicKey string +type PubTopic string + +const ( + MQTT_SOURCE_PUB_TOPIC_KEY TopicKey = "mqtt_source_pub_topic" + MQTT_AGENT_PUB_TOPIC_KEY TopicKey = "mqtt_agent_pub_topic" +) + +// MQTTOptions holds the options that are used to build MQTT client. +type MQTTOptions struct { + Topics types.Topics + BrokerHost string + Username string + Password string + CAFile string + ClientCertFile string + ClientKeyFile string + KeepAlive uint16 + DialTimeout time.Duration + PubQoS int + SubQoS int +} + +// MQTTConfig holds the information needed to build connect to MQTT broker as a given user. +type MQTTConfig struct { + // BrokerHost is the host of the MQTT broker (hostname:port). + BrokerHost string `json:"brokerHost" yaml:"brokerHost"` + + // Username is the username for basic authentication to connect the MQTT broker. + Username string `json:"username,omitempty" yaml:"username,omitempty"` + // Password is the password for basic authentication to connect the MQTT broker. + Password string `json:"password,omitempty" yaml:"password,omitempty"` + + // CAFile is the file path to a cert file for the MQTT broker certificate authority. + CAFile string `json:"caFile,omitempty" yaml:"caFile,omitempty"` + // ClientCertFile is the file path to a client cert file for TLS. + ClientCertFile string `json:"clientCertFile,omitempty" yaml:"clientCertFile,omitempty"` + // ClientKeyFile is the file path to a client key file for TLS. + ClientKeyFile string `json:"clientKeyFile,omitempty" yaml:"clientKeyFile,omitempty"` + + // KeepAlive is the keep alive time in seconds for MQTT clients, by default is 60s + KeepAlive *uint16 `json:"keepAlive,omitempty" yaml:"keepAlive,omitempty"` + + // DialTimeout is the timeout when establishing a MQTT TCP connection, by default is 60s + DialTimeout *time.Duration `json:"dialTimeout,omitempty" yaml:"dialTimeout,omitempty"` + + // PubQoS is the QoS for publish, by default is 1 + PubQoS *int `json:"pubQoS,omitempty" yaml:"pubQoS,omitempty"` + // SubQoS is the Qos for subscribe, by default is 1 + SubQoS *int `json:"subQoS,omitempty" yaml:"subQoS,omitempty"` + + // Topics are MQTT topics for resource spec, status and resync. + Topics *types.Topics `json:"topics,omitempty" yaml:"topics,omitempty"` +} + +// BuildMQTTOptionsFromFlags builds configs from a config filepath. +func BuildMQTTOptionsFromFlags(configPath string) (*MQTTOptions, error) { + configData, err := os.ReadFile(configPath) + if err != nil { + return nil, err + } + + config := &MQTTConfig{} + if err := yaml.Unmarshal(configData, config); err != nil { + return nil, err + } + + if config.BrokerHost == "" { + return nil, fmt.Errorf("brokerHost is required") + } + + if (config.ClientCertFile == "" && config.ClientKeyFile != "") || + (config.ClientCertFile != "" && config.ClientKeyFile == "") { + return nil, fmt.Errorf("either both or none of clientCertFile and clientKeyFile must be set") + } + if config.ClientCertFile != "" && config.ClientKeyFile != "" && config.CAFile == "" { + return nil, fmt.Errorf("setting clientCertFile and clientKeyFile requires caFile") + } + + if err := validateTopics(config.Topics); err != nil { + return nil, err + } + + options := &MQTTOptions{ + BrokerHost: config.BrokerHost, + Username: config.Username, + Password: config.Password, + CAFile: config.CAFile, + ClientCertFile: config.ClientCertFile, + ClientKeyFile: config.ClientKeyFile, + KeepAlive: 60, + PubQoS: 1, + SubQoS: 1, + DialTimeout: 60 * time.Second, + Topics: *config.Topics, + } + + if config.KeepAlive != nil { + options.KeepAlive = *config.KeepAlive + } + + if config.DialTimeout != nil { + options.DialTimeout = *config.DialTimeout + } + + if config.PubQoS != nil { + options.PubQoS = *config.PubQoS + } + + if config.SubQoS != nil { + options.SubQoS = *config.SubQoS + } + + return options, nil +} + +func (o *MQTTOptions) GetNetConn() (net.Conn, error) { + if len(o.CAFile) != 0 { + certPool, err := x509.SystemCertPool() + if err != nil { + return nil, err + } + + caPEM, err := os.ReadFile(o.CAFile) + if err != nil { + return nil, err + } + + if ok := certPool.AppendCertsFromPEM(caPEM); !ok { + return nil, fmt.Errorf("invalid CA %s", o.CAFile) + } + + clientCerts, err := tls.LoadX509KeyPair(o.ClientCertFile, o.ClientKeyFile) + if err != nil { + return nil, err + } + + conn, err := tls.DialWithDialer(&net.Dialer{Timeout: o.DialTimeout}, "tcp", o.BrokerHost, &tls.Config{ + RootCAs: certPool, + Certificates: []tls.Certificate{clientCerts}, + }) + if err != nil { + return nil, fmt.Errorf("failed to connect to MQTT broker %s, %v", o.BrokerHost, err) + } + + // ensure parallel writes are thread-Safe + return packets.NewThreadSafeConn(conn), nil + } + + conn, err := net.DialTimeout("tcp", o.BrokerHost, o.DialTimeout) + if err != nil { + return nil, fmt.Errorf("failed to connect to MQTT broker %s, %v", o.BrokerHost, err) + } + + // ensure parallel writes are thread-Safe + return packets.NewThreadSafeConn(conn), nil +} + +func (o *MQTTOptions) GetMQTTConnectOption(clientID string) *paho.Connect { + connect := &paho.Connect{ + ClientID: clientID, + KeepAlive: o.KeepAlive, + CleanStart: true, + } + + if len(o.Username) != 0 { + connect.Username = o.Username + connect.UsernameFlag = true + } + + if len(o.Password) != 0 { + connect.Password = []byte(o.Password) + connect.PasswordFlag = true + } + + return connect +} + +func (o *MQTTOptions) GetCloudEventsProtocol( + ctx context.Context, + clientID string, + errorHandler func(error), + clientOpts ...cloudeventsmqtt.Option, +) (options.CloudEventsProtocol, error) { + netConn, err := o.GetNetConn() + if err != nil { + return nil, err + } + + config := &paho.ClientConfig{ + ClientID: clientID, + Conn: netConn, + OnClientError: errorHandler, + } + + opts := []cloudeventsmqtt.Option{cloudeventsmqtt.WithConnect(o.GetMQTTConnectOption(clientID))} + opts = append(opts, clientOpts...) + return cloudeventsmqtt.New(ctx, config, opts...) +} + +func validateTopics(topics *types.Topics) error { + if topics == nil { + return fmt.Errorf("the topics must be set") + } + + var errs []error + if !regexp.MustCompile(types.SourceEventsTopicPattern).MatchString(topics.SourceEvents) { + errs = append(errs, fmt.Errorf("invalid source events topic %q, it should match `%s`", + topics.SourceEvents, types.SourceEventsTopicPattern)) + } + + if !regexp.MustCompile(types.AgentEventsTopicPattern).MatchString(topics.AgentEvents) { + errs = append(errs, fmt.Errorf("invalid agent events topic %q, it should match `%s`", + topics.AgentEvents, types.AgentEventsTopicPattern)) + } + + if len(topics.SourceBroadcast) != 0 { + if !regexp.MustCompile(types.SourceBroadcastTopicPattern).MatchString(topics.SourceBroadcast) { + errs = append(errs, fmt.Errorf("invalid source broadcast topic %q, it should match `%s`", + topics.SourceBroadcast, types.SourceBroadcastTopicPattern)) + } + } + + if len(topics.AgentBroadcast) != 0 { + if !regexp.MustCompile(types.AgentBroadcastTopicPattern).MatchString(topics.AgentBroadcast) { + errs = append(errs, fmt.Errorf("invalid agent broadcast topic %q, it should match `%s`", + topics.AgentBroadcast, types.AgentBroadcastTopicPattern)) + } + } + + return errors.NewAggregate(errs) +} + +func getSourceFromEventsTopic(topic string) (string, error) { + if !regexp.MustCompile(types.EventsTopicPattern).MatchString(topic) { + return "", fmt.Errorf("failed to get source from topic: %q", topic) + } + + subTopics := strings.Split(topic, "/") + // get source form share topic, e.g. $share/group/sources/+/consumers/+/agentevents + if strings.HasPrefix(topic, "$share") { + return subTopics[3], nil + } + + // get source form topic, e.g. sources/+/consumers/+/agentevents + return subTopics[1], nil +} + +func replaceLast(str, old, new string) string { + last := strings.LastIndex(str, old) + if last == -1 { + return str + } + return str[:last] + new + str[last+len(old):] +} + +func getSourcePubTopic(ctx context.Context) (*PubTopic, error) { + ctxTopic := ctx.Value(MQTT_SOURCE_PUB_TOPIC_KEY) + if ctxTopic == nil { + return nil, nil + } + + topic, ok := ctxTopic.(PubTopic) + if !ok { + return nil, fmt.Errorf("source pub topic should be a string") + } + + if regexp.MustCompile(types.SourceEventsTopicPattern).MatchString(string(topic)) { + return &topic, nil + } + + if regexp.MustCompile(types.SourceBroadcastTopicPattern).MatchString(string(topic)) { + return &topic, nil + } + + return nil, fmt.Errorf("invalid source pub topic") +} + +func getAgentPubTopic(ctx context.Context) (*PubTopic, error) { + ctxTopic := ctx.Value(MQTT_AGENT_PUB_TOPIC_KEY) + if ctxTopic == nil { + return nil, nil + } + + topic, ok := ctxTopic.(PubTopic) + if !ok { + return nil, fmt.Errorf("agent pub topic should be a string") + } + + if regexp.MustCompile(types.AgentEventsTopicPattern).MatchString(string(topic)) { + return &topic, nil + } + + if regexp.MustCompile(types.AgentBroadcastTopicPattern).MatchString(string(topic)) { + return &topic, nil + } + + return nil, fmt.Errorf("invalid agent pub topic") +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/sourceoptions.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/sourceoptions.go new file mode 100644 index 000000000..9c245809a --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt/sourceoptions.go @@ -0,0 +1,113 @@ +package mqtt + +import ( + "context" + "fmt" + "strings" + + cloudeventsmqtt "github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2" + cloudevents "github.com/cloudevents/sdk-go/v2" + cloudeventscontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/eclipse/paho.golang/paho" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" +) + +type mqttSourceOptions struct { + MQTTOptions + errorChan chan error + sourceID string + clientID string +} + +func NewSourceOptions(mqttOptions *MQTTOptions, clientID, sourceID string) *options.CloudEventsSourceOptions { + mqttSourceOptions := &mqttSourceOptions{ + MQTTOptions: *mqttOptions, + errorChan: make(chan error), + sourceID: sourceID, + clientID: clientID, + } + + return &options.CloudEventsSourceOptions{ + CloudEventsOptions: mqttSourceOptions, + SourceID: mqttSourceOptions.sourceID, + } +} + +func (o *mqttSourceOptions) WithContext(ctx context.Context, evtCtx cloudevents.EventContext) (context.Context, error) { + topic, err := getSourcePubTopic(ctx) + if err != nil { + return nil, err + } + + if topic != nil { + return cloudeventscontext.WithTopic(ctx, string(*topic)), nil + } + + eventType, err := types.ParseCloudEventsType(evtCtx.GetType()) + if err != nil { + return nil, fmt.Errorf("unsupported event type %s, %v", eventType, err) + } + + clusterName, err := evtCtx.GetExtension(types.ExtensionClusterName) + if err != nil { + return nil, err + } + + if eventType.Action == types.ResyncRequestAction && clusterName == types.ClusterAll { + // source request to get resources status from all agents + if len(o.Topics.SourceBroadcast) == 0 { + return nil, fmt.Errorf("the source broadcast topic not set") + } + + resyncTopic := strings.Replace(o.Topics.SourceBroadcast, "+", o.sourceID, 1) + return cloudeventscontext.WithTopic(ctx, resyncTopic), nil + } + + // source publishes spec events or status resync events + eventsTopic := strings.Replace(o.Topics.SourceEvents, "+", fmt.Sprintf("%s", clusterName), 1) + return cloudeventscontext.WithTopic(ctx, eventsTopic), nil +} + +func (o *mqttSourceOptions) Protocol(ctx context.Context) (options.CloudEventsProtocol, error) { + topicSource, err := getSourceFromEventsTopic(o.Topics.AgentEvents) + if err != nil { + return nil, err + } + + if topicSource != o.sourceID { + return nil, fmt.Errorf("the topic source %q does not match with the client sourceID %q", + o.Topics.AgentEvents, o.sourceID) + } + + subscribe := &paho.Subscribe{ + Subscriptions: map[string]paho.SubscribeOptions{ + // receiving the agent events + o.Topics.AgentEvents: {QoS: byte(o.SubQoS)}, + }, + } + + if len(o.Topics.AgentBroadcast) != 0 { + // receiving spec resync events from all agents + subscribe.Subscriptions[o.Topics.AgentBroadcast] = paho.SubscribeOptions{QoS: byte(o.SubQoS)} + } + + receiver, err := o.GetCloudEventsProtocol( + ctx, + o.clientID, + func(err error) { + o.errorChan <- err + }, + cloudeventsmqtt.WithPublish(&paho.Publish{QoS: byte(o.PubQoS)}), + cloudeventsmqtt.WithSubscribe(subscribe), + ) + if err != nil { + return nil, err + } + return receiver, nil +} + +func (o *mqttSourceOptions) ErrorChan() <-chan error { + return o.errorChan +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/options.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/options.go new file mode 100644 index 000000000..9bd231c89 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/options.go @@ -0,0 +1,75 @@ +package options + +import ( + "context" + + cloudevents "github.com/cloudevents/sdk-go/v2" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// CloudEventsOptions provides cloudevents clients to send/receive cloudevents based on different event protocol. +// +// Available implementations: +// - MQTT +type CloudEventsOptions interface { + // WithContext returns back a new context with the given cloudevent context. The new context will be used when + // sending a cloudevent.The new context is protocol-dependent, for example, for MQTT, the new context should contain + // the MQTT topic, for Kafka, the context should contain the message key, etc. + WithContext(ctx context.Context, evtContext cloudevents.EventContext) (context.Context, error) + + // Protocol returns a specific protocol to initialize the cloudevents client + Protocol(ctx context.Context) (CloudEventsProtocol, error) + + // ErrorChan returns a chan which will receive the cloudevents connection error. The source/agent client will try to + // reconnect the when this error occurs. + ErrorChan() <-chan error +} + +// CloudEventsProtocol is a set of interfaces for a specific binding need to implemented +// Reference: https://cloudevents.github.io/sdk-go/protocol_implementations.html#protocol-interfaces +type CloudEventsProtocol interface { + protocol.Sender + protocol.Receiver + protocol.Closer +} + +// EventRateLimit for limiting the event sending rate. +type EventRateLimit struct { + // QPS indicates the maximum QPS to send the event. + // If it's less than or equal to zero, the DefaultQPS (50) will be used. + QPS float32 + + // Maximum burst for throttle. + // If it's less than or equal to zero, the DefaultBurst (100) will be used. + Burst int +} + +// CloudEventsSourceOptions provides the required options to build a source CloudEventsClient +type CloudEventsSourceOptions struct { + // CloudEventsOptions provides cloudevents clients to send/receive cloudevents based on different event protocol. + CloudEventsOptions CloudEventsOptions + + // SourceID is a unique identifier for a source, for example, it can generate a source ID by hashing the hub cluster + // URL and appending the controller name. Similarly, a RESTful service can select a unique name or generate a unique + // ID in the associated database for its source identification. + SourceID string + + // EventRateLimit limits the event sending rate. + EventRateLimit EventRateLimit +} + +// CloudEventsAgentOptions provides the required options to build an agent CloudEventsClient +type CloudEventsAgentOptions struct { + // CloudEventsOptions provides cloudevents clients to send/receive cloudevents based on different event protocol. + CloudEventsOptions CloudEventsOptions + + // AgentID is a unique identifier for an agent, for example, it can consist of a managed cluster name and an agent + // name. + AgentID string + + // ClusterName is the name of a managed cluster on which the agent runs. + ClusterName string + + // EventRateLimit limits the event sending rate. + EventRateLimit EventRateLimit +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/payload/payload.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/payload/payload.go new file mode 100644 index 000000000..cacfcd950 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/payload/payload.go @@ -0,0 +1,48 @@ +package payload + +import ( + "encoding/json" + "fmt" + + cloudevents "github.com/cloudevents/sdk-go/v2" +) + +type ResourceVersion struct { + ResourceID string `json:"resourceID"` + ResourceVersion int64 `json:"resourceVersion"` +} + +type ResourceStatusHash struct { + ResourceID string `json:"resourceID"` + StatusHash string `json:"statusHash"` +} + +// ResourceVersionList represents the resource versions of the resources maintained by the agent. +// The item of this list includes the resource ID and resource version. +type ResourceVersionList struct { + Versions []ResourceVersion `json:"resourceVersions"` +} + +// ResourceStatusHashList represents the status hash of the resources maintained by the source. +// The item of this list includes the resource ID and resource status hash. +type ResourceStatusHashList struct { + Hashes []ResourceStatusHash `json:"statusHashes"` +} + +func DecodeSpecResyncRequest(evt cloudevents.Event) (*ResourceVersionList, error) { + versions := &ResourceVersionList{} + data := evt.Data() + if err := json.Unmarshal(data, versions); err != nil { + return nil, fmt.Errorf("failed to unmarshal spec resync request payload %s, %v", string(data), err) + } + return versions, nil +} + +func DecodeStatusResyncRequest(evt cloudevents.Event) (*ResourceStatusHashList, error) { + hashes := &ResourceStatusHashList{} + data := evt.Data() + if err := json.Unmarshal(data, hashes); err != nil { + return nil, fmt.Errorf("failed to unmarshal status resync request payload %s, %v", string(data), err) + } + return hashes, nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/ratelimiter.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/ratelimiter.go new file mode 100644 index 000000000..ac1e2c188 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/ratelimiter.go @@ -0,0 +1,34 @@ +package generic + +import ( + "time" + + "k8s.io/client-go/util/flowcontrol" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" +) + +// longThrottleLatency defines threshold for logging requests. All requests being +// throttled (via the provided rateLimiter) for more than longThrottleLatency will +// be logged. +const longThrottleLatency = 1 * time.Second + +const ( + // TODO we may adjust these after performance test + DefaultQPS float32 = 50.0 + DefaultBurst int = 100 +) + +func NewRateLimiter(limit options.EventRateLimit) flowcontrol.RateLimiter { + qps := limit.QPS + if qps <= 0.0 { + qps = DefaultQPS + } + + burst := limit.Burst + if burst <= 0 { + burst = DefaultBurst + } + + return flowcontrol.NewTokenBucketRateLimiter(qps, burst) +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/sourceclient.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/sourceclient.go new file mode 100644 index 000000000..58da81556 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/sourceclient.go @@ -0,0 +1,314 @@ +package generic + +import ( + "context" + "fmt" + "strconv" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/payload" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" +) + +// CloudEventSourceClient is a client for a source to resync/send/receive its resources with cloud events. +// +// A source is a component that runs on a server, it can be a controller on the hub cluster or a RESTful service +// handling resource requests. +type CloudEventSourceClient[T ResourceObject] struct { + *baseClient + lister Lister[T] + codecs map[types.CloudEventsDataType]Codec[T] + statusHashGetter StatusHashGetter[T] + sourceID string +} + +// NewCloudEventSourceClient returns an instance for CloudEventSourceClient. The following arguments are required to +// create a client +// - sourceOptions provides the sourceID and the cloudevents clients that are based on different event protocols for +// sending/receiving the cloudevents. +// - lister gets the resources from a cache/store of a source. +// - statusHashGetter calculates the resource status hash. +// - codecs is list of codecs for encoding/decoding a resource objet/cloudevent to/from a cloudevent/resource objet. +func NewCloudEventSourceClient[T ResourceObject]( + ctx context.Context, + sourceOptions *options.CloudEventsSourceOptions, + lister Lister[T], + statusHashGetter StatusHashGetter[T], + codecs ...Codec[T], +) (*CloudEventSourceClient[T], error) { + baseClient := &baseClient{ + cloudEventsOptions: sourceOptions.CloudEventsOptions, + cloudEventsRateLimiter: NewRateLimiter(sourceOptions.EventRateLimit), + reconnectedChan: make(chan struct{}), + } + + if err := baseClient.connect(ctx); err != nil { + return nil, err + } + + evtCodes := make(map[types.CloudEventsDataType]Codec[T]) + for _, codec := range codecs { + evtCodes[codec.EventDataType()] = codec + } + + return &CloudEventSourceClient[T]{ + baseClient: baseClient, + lister: lister, + codecs: evtCodes, + statusHashGetter: statusHashGetter, + sourceID: sourceOptions.SourceID, + }, nil +} + +func (c *CloudEventSourceClient[T]) ReconnectedChan() <-chan struct{} { + return c.reconnectedChan +} + +// Resync the resources status by sending a status resync request from the current source to a specified cluster. +func (c *CloudEventSourceClient[T]) Resync(ctx context.Context, clusterName string) error { + // list the resource objects that are maintained by the current source with a specified cluster + objs, err := c.lister.List(types.ListOptions{Source: c.sourceID, ClusterName: clusterName}) + if err != nil { + return err + } + + hashes := &payload.ResourceStatusHashList{Hashes: make([]payload.ResourceStatusHash, len(objs))} + for i, obj := range objs { + statusHash, err := c.statusHashGetter(obj) + if err != nil { + return err + } + + hashes.Hashes[i] = payload.ResourceStatusHash{ + ResourceID: string(obj.GetUID()), + StatusHash: statusHash, + } + } + + // only resync the resources whose event data type is registered + for eventDataType := range c.codecs { + eventType := types.CloudEventsType{ + CloudEventsDataType: eventDataType, + SubResource: types.SubResourceStatus, + Action: types.ResyncRequestAction, + } + + evt := types.NewEventBuilder(c.sourceID, eventType).WithClusterName(clusterName).NewEvent() + if err := evt.SetData(cloudevents.ApplicationJSON, hashes); err != nil { + return fmt.Errorf("failed to set data to cloud event: %v", err) + } + + if err := c.publish(ctx, evt); err != nil { + return err + } + } + + return nil +} + +// Publish a resource spec from a source to an agent. +func (c *CloudEventSourceClient[T]) Publish(ctx context.Context, eventType types.CloudEventsType, obj T) error { + if eventType.SubResource != types.SubResourceSpec { + return fmt.Errorf("unsupported event eventType %s", eventType) + } + + codec, ok := c.codecs[eventType.CloudEventsDataType] + if !ok { + return fmt.Errorf("failed to find the codec for event %s", eventType.CloudEventsDataType) + } + + evt, err := codec.Encode(c.sourceID, eventType, obj) + if err != nil { + return err + } + + if err := c.publish(ctx, *evt); err != nil { + return err + } + + return nil +} + +// Subscribe the events that are from the agent spec resync request or agent resource status request. +// For spec resync request, source publish the current resources spec back as response. +// For resource status request, source receives resource status and handles the status with resource handlers. +func (c *CloudEventSourceClient[T]) Subscribe(ctx context.Context, handlers ...ResourceHandler[T]) { + c.subscribe(ctx, func(ctx context.Context, evt cloudevents.Event) { + c.receive(ctx, evt, handlers...) + }) +} + +func (c *CloudEventSourceClient[T]) receive(ctx context.Context, evt cloudevents.Event, handlers ...ResourceHandler[T]) { + klog.V(4).Infof("Received event:\n%s", evt) + + eventType, err := types.ParseCloudEventsType(evt.Type()) + if err != nil { + klog.Errorf("failed to parse cloud event type, %v", err) + return + } + + if eventType.Action == types.ResyncRequestAction { + if eventType.SubResource != types.SubResourceSpec { + klog.Warningf("unsupported event type %s, ignore", eventType) + return + } + + if err := c.respondResyncSpecRequest(ctx, eventType.CloudEventsDataType, evt); err != nil { + klog.Errorf("failed to resync resources spec, %v", err) + } + + return + } + + codec, ok := c.codecs[eventType.CloudEventsDataType] + if !ok { + klog.Warningf("failed to find the codec for event %s, ignore", eventType.CloudEventsDataType) + return + } + + if eventType.SubResource != types.SubResourceStatus { + klog.Warningf("unsupported event type %s, ignore", eventType) + return + } + + clusterName, err := evt.Context.GetExtension(types.ExtensionClusterName) + if err != nil { + klog.Errorf("failed to find cluster name, %v", err) + return + } + + obj, err := codec.Decode(&evt) + if err != nil { + klog.Errorf("failed to decode status, %v", err) + return + } + + action, err := c.statusAction(fmt.Sprintf("%s", clusterName), obj) + if err != nil { + klog.Errorf("failed to generate status event %s, %v", evt, err) + return + } + + if len(action) == 0 { + // no action is required, ignore + return + } + + for _, handler := range handlers { + if err := handler(action, obj); err != nil { + klog.Errorf("failed to handle status event %s, %v", evt, err) + } + } +} + +// Upon receiving the spec resync event, the source responds by sending resource status events to the broker as follows: +// - If the request event message is empty, the source returns all resources associated with the work agent. +// - If the request event message contains resource IDs and versions, the source retrieves the resource with the +// specified ID and compares the versions. +// - If the requested resource version matches the source's current maintained resource version, the source does not +// resend the resource. +// - If the requested resource version is older than the source's current maintained resource version, the source +// sends the resource. +func (c *CloudEventSourceClient[T]) respondResyncSpecRequest( + ctx context.Context, evtDataType types.CloudEventsDataType, evt cloudevents.Event) error { + resourceVersions, err := payload.DecodeSpecResyncRequest(evt) + if err != nil { + return err + } + + eventType := types.CloudEventsType{ + CloudEventsDataType: evtDataType, + SubResource: types.SubResourceSpec, + Action: types.ResyncResponseAction, + } + + clusterName, err := evt.Context.GetExtension(types.ExtensionClusterName) + if err != nil { + return err + } + + objs, err := c.lister.List(types.ListOptions{ClusterName: fmt.Sprintf("%s", clusterName), Source: c.sourceID}) + if err != nil { + return err + } + + for _, obj := range objs { + lastResourceVersion := findResourceVersion(string(obj.GetUID()), resourceVersions.Versions) + currentResourceVersion, err := strconv.ParseInt(obj.GetResourceVersion(), 10, 64) + if err != nil { + continue + } + + if currentResourceVersion > lastResourceVersion { + if err := c.Publish(ctx, eventType, obj); err != nil { + return err + } + } + } + + // the resources do not exist on the source, but exist on the agent, delete them + for _, rv := range resourceVersions.Versions { + _, exists := getObj(rv.ResourceID, objs) + if exists { + continue + } + + // send a delete event for the current resource + evt := types.NewEventBuilder(c.sourceID, eventType). + WithResourceID(rv.ResourceID). + WithResourceVersion(rv.ResourceVersion). + WithClusterName(fmt.Sprintf("%s", clusterName)). + WithDeletionTimestamp(metav1.Now().Time). + NewEvent() + if err := c.publish(ctx, evt); err != nil { + return err + } + } + + return nil +} + +func (c *CloudEventSourceClient[T]) statusAction(clusterName string, obj T) (evt types.ResourceAction, err error) { + objs, err := c.lister.List(types.ListOptions{ClusterName: clusterName, Source: c.sourceID}) + if err != nil { + return evt, err + } + + lastObj, exists := getObj(string(obj.GetUID()), objs) + if !exists { + return evt, nil + } + + lastStatusHash, err := c.statusHashGetter(lastObj) + if err != nil { + klog.Warningf("failed to hash object %s status, %v", lastObj.GetUID(), err) + return evt, err + } + + currentStatusHash, err := c.statusHashGetter(obj) + if err != nil { + klog.Warningf("failed to hash object %s status, %v", obj.GetUID(), err) + return evt, nil + } + + if lastStatusHash == currentStatusHash { + return evt, nil + } + + return types.StatusModified, nil +} + +func findResourceVersion(id string, versions []payload.ResourceVersion) int64 { + for _, version := range versions { + if id == version.ResourceID { + return version.ResourceVersion + } + } + + return 0 +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types/types.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types/types.go new file mode 100644 index 000000000..b39361162 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types/types.go @@ -0,0 +1,283 @@ +package types + +import ( + "fmt" + "strings" + "time" + + cloudevents "github.com/cloudevents/sdk-go/v2" + "github.com/google/uuid" +) + +const ( + // ClusterAll is the default argument to specify on a context when you want to list or filter resources across all + // managed clusters. + ClusterAll = "" + + // SourceAll is the default argument to specify on a context when you want to list or filter resources across all + // sources. + SourceAll = "" +) + +// EventSubResource describes the subresource of a cloud event. Only `spec` and `status` are supported. +type EventSubResource string + +const ( + // SubResourceSpec represents the cloud event data is from the resource spec. + SubResourceSpec EventSubResource = "spec" + + // SubResourceSpec represents the cloud event data is from the resource status. + SubResourceStatus EventSubResource = "status" +) + +// EventAction describes the expected action of a cloud event. +type EventAction string + +const ( + // ResyncRequestAction represents the cloud event is for the resync request. + ResyncRequestAction EventAction = "resync_request" + + // ResyncRequestAction represents the cloud event is for the resync response. + ResyncResponseAction EventAction = "resync_response" +) + +const ( + // ExtensionResourceID is the cloud event extension key of the resource ID. + ExtensionResourceID = "resourceid" + + // ExtensionResourceVersion is the cloud event extension key of the resource version. + ExtensionResourceVersion = "resourceversion" + + // ExtensionStatusUpdateSequenceID is the cloud event extension key of the status update event sequence ID. + // The status update event sequence id represents the order in which status update events occur on a single agent. + ExtensionStatusUpdateSequenceID = "sequenceid" + + // ExtensionDeletionTimestamp is the cloud event extension key of the deletion timestamp. + ExtensionDeletionTimestamp = "deletiontimestamp" + + // ExtensionClusterName is the cloud event extension key of the cluster name. + ExtensionClusterName = "clustername" + + // ExtensionOriginalSource is the cloud event extension key of the original source. + ExtensionOriginalSource = "originalsource" +) + +// ResourceAction represents an action on a resource object on the source or agent. +type ResourceAction string + +const ( + // Added represents a resource is added on the source part. + Added ResourceAction = "ADDED" + + // Modified represents a resource is modified on the source part. + Modified ResourceAction = "MODIFIED" + + // StatusModified represents the status of a resource is modified on the agent part. + StatusModified ResourceAction = "STATUSMODIFIED" + + // Deleted represents a resource is deleted from the source prat. + Deleted ResourceAction = "DELETED" +) + +const ( + EventsTopicPattern = `^(\$share/[a-z0-9-]+/)?([a-z]+)/([a-z0-9-]+|\+)/([a-z]+)/([a-z0-9-]+|\+)/(sourceevents|agentevents)$` + SourceEventsTopicPattern = `^(\$share/[a-z0-9-]+/)?([a-z]+)/([a-z0-9-]+|\+)/([a-z]+)/([a-z0-9-]+|\+)/sourceevents$` + AgentEventsTopicPattern = `^(\$share/[a-z0-9-]+/)?([a-z]+)/([a-z0-9-]+|\+)/([a-z]+)/([a-z0-9-]+|\+)/agentevents$` + SourceBroadcastTopicPattern = `^(\$share/[a-z0-9-]+/)?([a-z]+)/([a-z0-9-]+|\+)/sourcebroadcast$` + AgentBroadcastTopicPattern = `^(\$share/[a-z0-9-]+/)?([a-z]+)/([a-z0-9-]+|\+)/agentbroadcast$` +) + +// Topics represents required messaging system topics for a source or agent. +type Topics struct { + // SourceEvents topic is a topic for sources to publish their resource create/update/delete events or status resync events + // - A source uses this topic to publish its resource create/update/delete request or status resync request with + // its sourceID to a specified agent + // - An agent subscribes to this topic with its cluster name to response sources resource create/update/delete + // request or status resync request + // The topic format is `^(\$share/[a-z0-9-]+/)?([a-z]+)/([a-z0-9-]+|\+)/([a-z]+)/([a-z0-9-]+|\+)/sourceevents$`, e.g. + // sources/+/clusters/+/sourceevents, sources/source1/clusters/+/sourceevents, sources/source1/clusters/cluster1/sourceevents + // or $share/source-group/sources/+/clusters/+/sourceevents + SourceEvents string `json:"sourceEvents" yaml:"sourceEvents"` + + // AgentEvents topic is a topic for agents to publish their resource status update events or spec resync events + // - An agent using this topic to publish the resource status update request or spec resync request with its + // cluster name to a specified source. + // - A source subscribe to this topic with its sourceID to response agents resource status update request or spec + // resync request + // The topic format is `^(\$share/[a-z0-9-]+/)?([a-z]+)/([a-z0-9-]+|\+)/([a-z]+)/([a-z0-9-]+|\+)/agentevents$`, e.g. + // sources/+/clusters/+/agentevents, sources/source1/clusters/+/agentevents, sources/source1/clusters/cluster1/agentevents + // or $share/agent-group/+/clusters/+/agentevents + AgentEvents string `json:"agentEvents" yaml:"agentEvents"` + + // SourceBroadcast is an optional topic, it is for a source to publish its events to all agents, currently, we use + // this topic to resync resource status from all agents for a source that does not known the exact agents, e.g. + // - A source uses this topic to publish its resource status resync request with its sourceID to all the agents + // - Each agent subscribes to this topic to response sources resource status resync request + // The topic format is `^(\$share/[a-z0-9-]+/)?([a-z]+)/([a-z0-9-]+|\+)/sourcebroadcast$`, e.g. + // sources/+/sourcebroadcast, sources/source1/sourcebroadcast or $share/source-group/sources/+/sourcebroadcast + SourceBroadcast string `json:"sourceBroadcast,omitempty" yaml:"sourceBroadcast,omitempty"` + + // AgentBroadcast is an optional topic, it is for a agent to publish its events to all sources, currently, we use + // this topic to resync resources from all sources for an agent that does not known the exact sources, e.g. + // - An agent using this topic to publish the spec resync request with its cluster name to all the sources. + // - Each source subscribe to this topic to response agents spec resync request + // The topic format is `^(\$share/[a-z0-9-]+/)?([a-z]+)/([a-z0-9-]+|\+)/agentbroadcast$`, e.g. + // clusters/+/agentbroadcast, clusters/cluster1/agentbroadcast or $share/agent-group/clusters/+/agentbroadcast + AgentBroadcast string `json:"agentBroadcast,omitempty" yaml:"agentBroadcast,omitempty"` +} + +// ListOptions is the query options for listing the resource objects from the source/agent. +type ListOptions struct { + // Source use the cluster name to restrict the list of returned objects by their cluster name. + // Defaults to all clusters. + ClusterName string + + // Agent use the source ID to restrict the list of returned objects by their source ID. + // Defaults to all sources. + Source string +} + +// CloudEventsDataType uniquely identifies the type of cloud event data. +type CloudEventsDataType struct { + Group string + Version string + Resource string +} + +func (t CloudEventsDataType) String() string { + return fmt.Sprintf("%s.%s.%s", t.Group, t.Version, t.Resource) +} + +// CloudEventsType represents the type of cloud events, which describes the type of cloud event data. +type CloudEventsType struct { + // CloudEventsDataType uniquely identifies the type of cloud event data. + CloudEventsDataType + + // SubResource represents the cloud event data is from the resource spec or status. + SubResource EventSubResource + + // Action represents the expected action for this cloud event. + Action EventAction +} + +func (t CloudEventsType) String() string { + return fmt.Sprintf("%s.%s.%s.%s.%s", t.Group, t.Version, t.Resource, t.SubResource, t.Action) +} + +// ParseCloudEventsDataType parse the cloud event data type to a struct object. +// The type format is `..`. +func ParseCloudEventsDataType(cloudEventsDataType string) (*CloudEventsDataType, error) { + types := strings.Split(cloudEventsDataType, ".") + length := len(types) + if length < 3 { + return nil, fmt.Errorf("unsupported cloudevents data type format") + } + return &CloudEventsDataType{ + Group: strings.Join(types[0:length-2], "."), + Version: types[length-2], + Resource: types[length-1], + }, nil +} + +// ParseCloudEventsType parse the cloud event type to a struct object. +// The type format is `....`. +// The `` must be one of "spec" and "status". +func ParseCloudEventsType(cloudEventsType string) (*CloudEventsType, error) { + types := strings.Split(cloudEventsType, ".") + length := len(types) + if length < 5 { + return nil, fmt.Errorf("unsupported cloudevents type format") + } + + subResource := EventSubResource(types[length-2]) + if subResource != SubResourceSpec && subResource != SubResourceStatus { + return nil, fmt.Errorf("unsupported subresource %s", subResource) + } + + return &CloudEventsType{ + CloudEventsDataType: CloudEventsDataType{ + Group: strings.Join(types[0:length-4], "."), + Version: types[length-4], + Resource: types[length-3], + }, + SubResource: subResource, + Action: EventAction(types[length-1]), + }, nil +} + +type EventBuilder struct { + source string + clusterName string + originalSource string + resourceID string + sequenceID string + resourceVersion *int64 + eventType CloudEventsType + deletionTimestamp time.Time +} + +func NewEventBuilder(source string, eventType CloudEventsType) *EventBuilder { + return &EventBuilder{ + source: source, + eventType: eventType, + } +} + +func (b *EventBuilder) WithResourceID(resourceID string) *EventBuilder { + b.resourceID = resourceID + return b +} + +func (b *EventBuilder) WithResourceVersion(resourceVersion int64) *EventBuilder { + b.resourceVersion = &resourceVersion + return b +} + +func (b *EventBuilder) WithStatusUpdateSequenceID(sequenceID string) *EventBuilder { + b.sequenceID = sequenceID + return b +} + +func (b *EventBuilder) WithClusterName(clusterName string) *EventBuilder { + b.clusterName = clusterName + return b +} + +func (b *EventBuilder) WithOriginalSource(originalSource string) *EventBuilder { + b.originalSource = originalSource + return b +} + +func (b *EventBuilder) WithDeletionTimestamp(timestamp time.Time) *EventBuilder { + b.deletionTimestamp = timestamp + return b +} + +func (b *EventBuilder) NewEvent() cloudevents.Event { + evt := cloudevents.NewEvent() + evt.SetID(uuid.New().String()) + evt.SetType(b.eventType.String()) + evt.SetTime(time.Now()) + evt.SetSource(b.source) + + evt.SetExtension(ExtensionClusterName, b.clusterName) + evt.SetExtension(ExtensionOriginalSource, b.originalSource) + + if len(b.resourceID) != 0 { + evt.SetExtension(ExtensionResourceID, b.resourceID) + } + + if b.resourceVersion != nil { + evt.SetExtension(ExtensionResourceVersion, *b.resourceVersion) + } + + if len(b.sequenceID) != 0 { + evt.SetExtension(ExtensionStatusUpdateSequenceID, b.sequenceID) + } + + if !b.deletionTimestamp.IsZero() { + evt.SetExtension(ExtensionDeletionTimestamp, b.deletionTimestamp) + } + + return evt +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client/manifestwork.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client/manifestwork.go new file mode 100644 index 000000000..2b0624241 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client/manifestwork.go @@ -0,0 +1,160 @@ +package client + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubetypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog/v2" + + workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" + workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher" +) + +// ManifestWorkAgentClient implements the ManifestWorkInterface. It sends the manifestworks status back to source by +// CloudEventAgentClient. +type ManifestWorkAgentClient struct { + cloudEventsClient *generic.CloudEventAgentClient[*workv1.ManifestWork] + watcher *watcher.ManifestWorkWatcher + lister workv1lister.ManifestWorkNamespaceLister +} + +var _ workv1client.ManifestWorkInterface = &ManifestWorkAgentClient{} + +func NewManifestWorkAgentClient(cloudEventsClient *generic.CloudEventAgentClient[*workv1.ManifestWork], watcher *watcher.ManifestWorkWatcher) *ManifestWorkAgentClient { + return &ManifestWorkAgentClient{ + cloudEventsClient: cloudEventsClient, + watcher: watcher, + } +} + +func (c *ManifestWorkAgentClient) SetLister(lister workv1lister.ManifestWorkNamespaceLister) { + c.lister = lister +} + +func (c *ManifestWorkAgentClient) Create(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.CreateOptions) (*workv1.ManifestWork, error) { + return nil, errors.NewMethodNotSupported(common.ManifestWorkGR, "create") +} + +func (c *ManifestWorkAgentClient) Update(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.UpdateOptions) (*workv1.ManifestWork, error) { + return nil, errors.NewMethodNotSupported(common.ManifestWorkGR, "update") +} + +func (c *ManifestWorkAgentClient) UpdateStatus(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.UpdateOptions) (*workv1.ManifestWork, error) { + return nil, errors.NewMethodNotSupported(common.ManifestWorkGR, "updatestatus") +} + +func (c *ManifestWorkAgentClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return errors.NewMethodNotSupported(common.ManifestWorkGR, "delete") +} + +func (c *ManifestWorkAgentClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + return errors.NewMethodNotSupported(common.ManifestWorkGR, "deletecollection") +} + +func (c *ManifestWorkAgentClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*workv1.ManifestWork, error) { + klog.V(4).Infof("getting manifestwork %s", name) + return c.lister.Get(name) +} + +func (c *ManifestWorkAgentClient) List(ctx context.Context, opts metav1.ListOptions) (*workv1.ManifestWorkList, error) { + klog.V(4).Infof("sync manifestworks") + // send resync request to fetch manifestworks from source when the ManifestWorkInformer starts + if err := c.cloudEventsClient.Resync(ctx, types.SourceAll); err != nil { + return nil, err + } + + return &workv1.ManifestWorkList{}, nil +} + +func (c *ManifestWorkAgentClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.watcher, nil +} + +func (c *ManifestWorkAgentClient) Patch(ctx context.Context, name string, pt kubetypes.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *workv1.ManifestWork, err error) { + klog.V(4).Infof("patching manifestwork %s", name) + + lastWork, err := c.lister.Get(name) + if err != nil { + return nil, err + } + + patchedWork, err := utils.Patch(pt, lastWork, data) + if err != nil { + return nil, err + } + + eventDataType, err := types.ParseCloudEventsDataType(patchedWork.Annotations[common.CloudEventsDataTypeAnnotationKey]) + if err != nil { + return nil, err + } + + eventType := types.CloudEventsType{ + CloudEventsDataType: *eventDataType, + SubResource: types.SubResourceStatus, + } + + newWork := patchedWork.DeepCopy() + + statusUpdated, err := isStatusUpdate(subresources) + if err != nil { + return nil, err + } + + if statusUpdated { + eventType.Action = common.UpdateRequestAction + if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil { + return nil, err + } + + // refresh the work status in the ManifestWorkInformer local cache with patched work. + c.watcher.Receive(watch.Event{Type: watch.Modified, Object: newWork}) + return newWork, nil + } + + // the finalizers of a deleting manifestwork are removed, marking the manifestwork status to deleted and sending + // it back to source + if !newWork.DeletionTimestamp.IsZero() && len(newWork.Finalizers) == 0 { + meta.SetStatusCondition(&newWork.Status.Conditions, metav1.Condition{ + Type: common.ManifestsDeleted, + Status: metav1.ConditionTrue, + Reason: "ManifestsDeleted", + Message: fmt.Sprintf("The manifests are deleted from the cluster %s", newWork.Namespace), + }) + + eventType.Action = common.DeleteRequestAction + if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil { + return nil, err + } + + // delete the manifestwork from the ManifestWorkInformer local cache. + c.watcher.Receive(watch.Event{Type: watch.Deleted, Object: newWork}) + return newWork, nil + } + + // refresh the work in the ManifestWorkInformer local cache with patched work. + c.watcher.Receive(watch.Event{Type: watch.Modified, Object: newWork}) + return newWork, nil +} + +func isStatusUpdate(subresources []string) (bool, error) { + if len(subresources) == 0 { + return false, nil + } + + if len(subresources) == 1 && subresources[0] == "status" { + return true, nil + } + + return false, fmt.Errorf("unsupported subresources %v", subresources) +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifest.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifest.go new file mode 100644 index 000000000..bb0f4f9ad --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifest.go @@ -0,0 +1,193 @@ +package codec + +import ( + "fmt" + "strconv" + + "github.com/bwmarrin/snowflake" + cloudevents "github.com/cloudevents/sdk-go/v2" + cloudeventstypes "github.com/cloudevents/sdk-go/v2/types" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + kubetypes "k8s.io/apimachinery/pkg/types" + + "open-cluster-management.io/api/utils/work/v1/utils" + "open-cluster-management.io/api/utils/work/v1/workvalidator" + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload" +) + +var sequenceGenerator *snowflake.Node + +func init() { + // init the snowflake id generator with node id 1 for each single agent. Each single agent has its own consumer id + // to be identified, and we can ensure the order of status update event from the same agent via sequence id. The + // events from different agents are independent, hence the ordering among them needs not to be guaranteed. + // + // The snowflake `NewNode` returns error only when the snowflake node id is less than 1 or great than 1024, so the + // error can be ignored here. + sequenceGenerator, _ = snowflake.NewNode(1) +} + +// ManifestCodec is a codec to encode/decode a ManifestWork/cloudevent with ManifestBundle for an agent. +type ManifestCodec struct { + restMapper meta.RESTMapper +} + +func NewManifestCodec(restMapper meta.RESTMapper) *ManifestCodec { + return &ManifestCodec{ + restMapper: restMapper, + } +} + +// EventDataType returns the event data type for `io.open-cluster-management.works.v1alpha1.manifests`. +func (c *ManifestCodec) EventDataType() types.CloudEventsDataType { + return payload.ManifestEventDataType +} + +// Encode the status of a ManifestWork to a cloudevent with ManifestStatus. +func (c *ManifestCodec) Encode(source string, eventType types.CloudEventsType, work *workv1.ManifestWork) (*cloudevents.Event, error) { + if eventType.CloudEventsDataType != payload.ManifestEventDataType { + return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) + } + + resourceVersion, err := strconv.ParseInt(work.ResourceVersion, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse the resourceversion of the work %s, %v", work.UID, err) + } + + originalSource, ok := work.Labels[common.CloudEventsOriginalSourceLabelKey] + if !ok { + return nil, fmt.Errorf("failed to find originalsource from the work %s", work.UID) + } + + if len(work.Spec.Workload.Manifests) != 1 { + return nil, fmt.Errorf("too many manifests in the work %s", work.UID) + } + + evt := types.NewEventBuilder(source, eventType). + WithResourceID(string(work.UID)). + WithStatusUpdateSequenceID(sequenceGenerator.Generate().String()). + WithResourceVersion(resourceVersion). + WithClusterName(work.Namespace). + WithOriginalSource(originalSource). + NewEvent() + + statusPayload := &payload.ManifestStatus{ + Conditions: work.Status.Conditions, + } + + if len(work.Status.ResourceStatus.Manifests) != 0 { + statusPayload.Status = &work.Status.ResourceStatus.Manifests[0] + } + + if err := evt.SetData(cloudevents.ApplicationJSON, statusPayload); err != nil { + return nil, fmt.Errorf("failed to encode manifestwork status to a cloudevent: %v", err) + } + + return &evt, nil +} + +// Decode a cloudevent whose data is Manifest to a ManifestWork. +func (c *ManifestCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWork, error) { + eventType, err := types.ParseCloudEventsType(evt.Type()) + if err != nil { + return nil, fmt.Errorf("failed to parse cloud event type %s, %v", evt.Type(), err) + } + + if eventType.CloudEventsDataType != payload.ManifestEventDataType { + return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) + } + + evtExtensions := evt.Context.GetExtensions() + + resourceID, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionResourceID]) + if err != nil { + return nil, fmt.Errorf("failed to get resourceid extension: %v", err) + } + + resourceVersion, err := cloudeventstypes.ToInteger(evtExtensions[types.ExtensionResourceVersion]) + if err != nil { + return nil, fmt.Errorf("failed to get resourceversion extension: %v", err) + } + + clusterName, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionClusterName]) + if err != nil { + return nil, fmt.Errorf("failed to get clustername extension: %v", err) + } + + work := &workv1.ManifestWork{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + UID: kubetypes.UID(resourceID), + ResourceVersion: fmt.Sprintf("%d", resourceVersion), + Name: resourceID, + Namespace: clusterName, + Labels: map[string]string{ + common.CloudEventsOriginalSourceLabelKey: evt.Source(), + }, + Annotations: map[string]string{ + common.CloudEventsDataTypeAnnotationKey: eventType.CloudEventsDataType.String(), + }, + }, + } + + if _, ok := evtExtensions[types.ExtensionDeletionTimestamp]; ok { + deletionTimestamp, err := cloudeventstypes.ToTime(evtExtensions[types.ExtensionDeletionTimestamp]) + if err != nil { + return nil, fmt.Errorf("failed to get deletiontimestamp, %v", err) + } + + work.DeletionTimestamp = &metav1.Time{Time: deletionTimestamp} + return work, nil + } + + manifestPayload := &payload.Manifest{} + if err := evt.DataAs(manifestPayload); err != nil { + return nil, fmt.Errorf("failed to unmarshal event data %s, %v", string(evt.Data()), err) + } + + unstructuredObj := manifestPayload.Manifest + rawJson, err := unstructuredObj.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("failed to get manifest GVR from event %s, %v", string(evt.Data()), err) + } + + work.Spec = workv1.ManifestWorkSpec{ + Workload: workv1.ManifestsTemplate{ + Manifests: []workv1.Manifest{{RawExtension: runtime.RawExtension{Raw: rawJson}}}, + }, + DeleteOption: manifestPayload.DeleteOption, + } + + if manifestPayload.ConfigOption != nil { + _, gvr, err := utils.BuildResourceMeta(0, &unstructuredObj, c.restMapper) + if err != nil { + return nil, fmt.Errorf("failed to get manifest GVR from event %s, %v", string(evt.Data()), err) + } + + work.Spec.ManifestConfigs = []workv1.ManifestConfigOption{ + { + ResourceIdentifier: workv1.ResourceIdentifier{ + Group: gvr.Group, + Resource: gvr.Resource, + Name: unstructuredObj.GetName(), + Namespace: unstructuredObj.GetNamespace(), + }, + FeedbackRules: manifestPayload.ConfigOption.FeedbackRules, + UpdateStrategy: manifestPayload.ConfigOption.UpdateStrategy, + }, + } + } + + // validate the manifest + if err := workvalidator.ManifestValidator.ValidateManifests(work.Spec.Workload.Manifests); err != nil { + return nil, fmt.Errorf("manifest is invalid, %v", err) + } + + return work, nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifestbundle.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifestbundle.go new file mode 100644 index 000000000..fc17ffaa4 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/codec/manifestbundle.go @@ -0,0 +1,141 @@ +package codec + +import ( + "fmt" + "strconv" + + cloudevents "github.com/cloudevents/sdk-go/v2" + cloudeventstypes "github.com/cloudevents/sdk-go/v2/types" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubetypes "k8s.io/apimachinery/pkg/types" + + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/apis/work/v1/validator" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload" +) + +// ManifestBundleCodec is a codec to encode/decode a ManifestWork/cloudevent with ManifestBundle for an agent. +type ManifestBundleCodec struct{} + +func NewManifestBundleCodec() *ManifestBundleCodec { + return &ManifestBundleCodec{} +} + +// EventDataType always returns the event data type `io.open-cluster-management.works.v1alpha1.manifestbundles`. +func (c *ManifestBundleCodec) EventDataType() types.CloudEventsDataType { + return payload.ManifestBundleEventDataType +} + +// Encode the status of a ManifestWork to a cloudevent with ManifestBundleStatus. +func (c *ManifestBundleCodec) Encode(source string, eventType types.CloudEventsType, work *workv1.ManifestWork) (*cloudevents.Event, error) { + if eventType.CloudEventsDataType != payload.ManifestBundleEventDataType { + return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) + } + + resourceVersion, err := strconv.ParseInt(work.ResourceVersion, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse the resourceversion of the work %s, %v", work.UID, err) + } + + originalSource, ok := work.Labels[common.CloudEventsOriginalSourceLabelKey] + if !ok { + return nil, fmt.Errorf("failed to find originalsource from the work %s", work.UID) + } + + evt := types.NewEventBuilder(source, eventType). + WithResourceID(string(work.UID)). + WithStatusUpdateSequenceID(sequenceGenerator.Generate().String()). + WithResourceVersion(resourceVersion). + WithClusterName(work.Namespace). + WithOriginalSource(originalSource). + NewEvent() + + manifestBundleStatus := &payload.ManifestBundleStatus{ + Conditions: work.Status.Conditions, + ResourceStatus: work.Status.ResourceStatus.Manifests, + } + + if err := evt.SetData(cloudevents.ApplicationJSON, manifestBundleStatus); err != nil { + return nil, fmt.Errorf("failed to encode manifestwork status to a cloudevent: %v", err) + } + + return &evt, nil +} + +// Decode a cloudevent whose data is ManifestBundle to a ManifestWork. +func (c *ManifestBundleCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWork, error) { + eventType, err := types.ParseCloudEventsType(evt.Type()) + if err != nil { + return nil, fmt.Errorf("failed to parse cloud event type %s, %v", evt.Type(), err) + } + + if eventType.CloudEventsDataType != payload.ManifestBundleEventDataType { + return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) + } + + evtExtensions := evt.Context.GetExtensions() + + resourceID, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionResourceID]) + if err != nil { + return nil, fmt.Errorf("failed to get resourceid extension: %v", err) + } + + resourceVersion, err := cloudeventstypes.ToInteger(evtExtensions[types.ExtensionResourceVersion]) + if err != nil { + return nil, fmt.Errorf("failed to get resourceversion extension: %v", err) + } + + clusterName, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionClusterName]) + if err != nil { + return nil, fmt.Errorf("failed to get clustername extension: %v", err) + } + + work := &workv1.ManifestWork{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + UID: kubetypes.UID(resourceID), + ResourceVersion: fmt.Sprintf("%d", resourceVersion), + Name: resourceID, + Namespace: clusterName, + Labels: map[string]string{ + common.CloudEventsOriginalSourceLabelKey: evt.Source(), + }, + Annotations: map[string]string{ + common.CloudEventsDataTypeAnnotationKey: eventType.CloudEventsDataType.String(), + }, + }, + } + + if _, ok := evtExtensions[types.ExtensionDeletionTimestamp]; ok { + deletionTimestamp, err := cloudeventstypes.ToTime(evtExtensions[types.ExtensionDeletionTimestamp]) + if err != nil { + return nil, fmt.Errorf("failed to get deletiontimestamp, %v", err) + } + + work.DeletionTimestamp = &metav1.Time{Time: deletionTimestamp} + return work, nil + } + + manifests := &payload.ManifestBundle{} + if err := evt.DataAs(manifests); err != nil { + return nil, fmt.Errorf("failed to unmarshal event data %s, %v", string(evt.Data()), err) + } + + work.Spec = workv1.ManifestWorkSpec{ + Workload: workv1.ManifestsTemplate{ + Manifests: manifests.Manifests, + }, + DeleteOption: manifests.DeleteOption, + ManifestConfigs: manifests.ManifestConfigs, + } + + // validate the manifests + if err := validator.ManifestValidator.ValidateManifests(work.Spec.Workload.Manifests); err != nil { + return nil, fmt.Errorf("manifests are invalid, %v", err) + } + + return work, nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/handler/resourcehandler.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/handler/resourcehandler.go new file mode 100644 index 000000000..983c7634c --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/handler/resourcehandler.go @@ -0,0 +1,59 @@ +package handler + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/watch" + workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" + + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher" +) + +// NewManifestWorkAgentHandler returns a ResourceHandler for a ManifestWork on managed cluster. It sends the kube events +// with ManifestWorWatcher after CloudEventAgentClient received the ManifestWork specs from source, then the +// ManifestWorkInformer handles the kube events in its local cache. +func NewManifestWorkAgentHandler(lister workv1lister.ManifestWorkNamespaceLister, watcher *watcher.ManifestWorkWatcher) generic.ResourceHandler[*workv1.ManifestWork] { + return func(action types.ResourceAction, work *workv1.ManifestWork) error { + switch action { + case types.Added: + watcher.Receive(watch.Event{Type: watch.Added, Object: work}) + case types.Modified: + lastWork, err := lister.Get(work.Name) + if err != nil { + return err + } + + updatedWork := work.DeepCopy() + + // restore the fields that are maintained by local agent + updatedWork.Labels = lastWork.Labels + updatedWork.Annotations = lastWork.Annotations + updatedWork.Finalizers = lastWork.Finalizers + updatedWork.Status = lastWork.Status + + watcher.Receive(watch.Event{Type: watch.Modified, Object: updatedWork}) + case types.Deleted: + // the manifestwork is deleting on the source, we just update its deletion timestamp. + lastWork, err := lister.Get(work.Name) + if errors.IsNotFound(err) { + return nil + } + + if err != nil { + return err + } + + updatedWork := lastWork.DeepCopy() + updatedWork.DeletionTimestamp = work.DeletionTimestamp + watcher.Receive(watch.Event{Type: watch.Modified, Object: updatedWork}) + default: + return fmt.Errorf("unsupported resource action %s", action) + } + + return nil + } +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/clientbuilder.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/clientbuilder.go new file mode 100644 index 000000000..c75dd34c4 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/clientbuilder.go @@ -0,0 +1,270 @@ +package work + +import ( + "context" + "fmt" + "time" + + "k8s.io/client-go/rest" + "k8s.io/klog/v2" + + workclientset "open-cluster-management.io/api/client/work/clientset/versioned" + workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" + workinformers "open-cluster-management.io/api/client/work/informers/externalversions" + workv1informers "open-cluster-management.io/api/client/work/informers/externalversions/work/v1" + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + agentclient "open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/client" + agenthandler "open-cluster-management.io/sdk-go/pkg/cloudevents/work/agent/handler" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/internal" + sourceclient "open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client" + sourcehandler "open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/handler" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher" +) + +const defaultInformerResyncTime = 10 * time.Minute + +// ClientHolder holds a manifestwork client that implements the ManifestWorkInterface based on different configuration +// and a ManifestWorkInformer that is built with the manifestWork client. +// +// ClientHolder also implements the ManifestWorksGetter interface. +type ClientHolder struct { + workClientSet workclientset.Interface + manifestWorkInformer workv1informers.ManifestWorkInformer +} + +var _ workv1client.ManifestWorksGetter = &ClientHolder{} + +// WorkInterface returns a workclientset Interface +func (h *ClientHolder) WorkInterface() workclientset.Interface { + return h.workClientSet +} + +// ManifestWorks returns a ManifestWorkInterface +func (h *ClientHolder) ManifestWorks(namespace string) workv1client.ManifestWorkInterface { + return h.workClientSet.WorkV1().ManifestWorks(namespace) +} + +// ManifestWorkInformer returns a ManifestWorkInformer +func (h *ClientHolder) ManifestWorkInformer() workv1informers.ManifestWorkInformer { + return h.manifestWorkInformer +} + +// ClientHolderBuilder builds the ClientHolder with different configuration. +type ClientHolderBuilder struct { + config any + codecs []generic.Codec[*workv1.ManifestWork] + informerOptions []workinformers.SharedInformerOption + informerResyncTime time.Duration + sourceID string + clusterName string + clientID string +} + +// NewClientHolderBuilder returns a ClientHolderBuilder with a given configuration. +// +// Available configurations: +// - Kubeconfig (*rest.Config): builds a manifestwork client with kubeconfig +// - MQTTOptions (*mqtt.MQTTOptions): builds a manifestwork client based on cloudevents with MQTT +// - GRPCOptions (*grpc.GRPCOptions): builds a manifestwork client based on cloudevents with GRPC +// +// TODO using a specified config instead of any +func NewClientHolderBuilder(config any) *ClientHolderBuilder { + return &ClientHolderBuilder{ + config: config, + informerResyncTime: defaultInformerResyncTime, + } +} + +// WithClientID set the client ID for source/agent cloudevents client. +func (b *ClientHolderBuilder) WithClientID(clientID string) *ClientHolderBuilder { + b.clientID = clientID + return b +} + +// WithSourceID set the source ID when building a manifestwork client for a source. +func (b *ClientHolderBuilder) WithSourceID(sourceID string) *ClientHolderBuilder { + b.sourceID = sourceID + return b +} + +// WithClusterName set the managed cluster name when building a manifestwork client for an agent. +func (b *ClientHolderBuilder) WithClusterName(clusterName string) *ClientHolderBuilder { + b.clusterName = clusterName + return b +} + +// WithCodecs add codecs when building a manifestwork client based on cloudevents. +func (b *ClientHolderBuilder) WithCodecs(codecs ...generic.Codec[*workv1.ManifestWork]) *ClientHolderBuilder { + b.codecs = codecs + return b +} + +// WithInformerConfig set the ManifestWorkInformer configs. If the resync time is not set, the default time (10 minutes) +// will be used when building the ManifestWorkInformer. +func (b *ClientHolderBuilder) WithInformerConfig( + resyncTime time.Duration, options ...workinformers.SharedInformerOption) *ClientHolderBuilder { + b.informerResyncTime = resyncTime + b.informerOptions = options + return b +} + +// NewSourceClientHolder returns a ClientHolder for source +func (b *ClientHolderBuilder) NewSourceClientHolder(ctx context.Context) (*ClientHolder, error) { + switch config := b.config.(type) { + case *rest.Config: + return b.newKubeClients(config) + case *mqtt.MQTTOptions: + return b.newSourceClients(ctx, mqtt.NewSourceOptions(config, b.clientID, b.sourceID)) + case *grpc.GRPCOptions: + return b.newSourceClients(ctx, grpc.NewSourceOptions(config, b.sourceID)) + default: + return nil, fmt.Errorf("unsupported client configuration type %T", config) + } +} + +// NewAgentClientHolder returns a ClientHolder for agent +func (b *ClientHolderBuilder) NewAgentClientHolder(ctx context.Context) (*ClientHolder, error) { + switch config := b.config.(type) { + case *rest.Config: + return b.newKubeClients(config) + case *mqtt.MQTTOptions: + return b.newAgentClients(ctx, mqtt.NewAgentOptions(config, b.clusterName, b.clientID)) + case *grpc.GRPCOptions: + return b.newAgentClients(ctx, grpc.NewAgentOptions(config, b.clusterName, b.clientID)) + default: + return nil, fmt.Errorf("unsupported client configuration type %T", config) + } +} + +func (b *ClientHolderBuilder) newAgentClients(ctx context.Context, agentOptions *options.CloudEventsAgentOptions) (*ClientHolder, error) { + if len(b.clientID) == 0 { + return nil, fmt.Errorf("client id is required") + } + + if len(b.clusterName) == 0 { + return nil, fmt.Errorf("cluster name is required") + } + + workLister := &ManifestWorkLister{} + watcher := watcher.NewManifestWorkWatcher() + cloudEventsClient, err := generic.NewCloudEventAgentClient[*workv1.ManifestWork]( + ctx, + agentOptions, + workLister, + ManifestWorkStatusHash, + b.codecs..., + ) + if err != nil { + return nil, err + } + + manifestWorkClient := agentclient.NewManifestWorkAgentClient(cloudEventsClient, watcher) + workClient := &internal.WorkV1ClientWrapper{ManifestWorkClient: manifestWorkClient} + workClientSet := &internal.WorkClientSetWrapper{WorkV1ClientWrapper: workClient} + factory := workinformers.NewSharedInformerFactoryWithOptions(workClientSet, b.informerResyncTime, b.informerOptions...) + informers := factory.Work().V1().ManifestWorks() + manifestWorkLister := informers.Lister() + namespacedLister := manifestWorkLister.ManifestWorks(b.clusterName) + + // Set informer lister back to work lister and client. + workLister.Lister = manifestWorkLister + // TODO the work client and informer share a same store in the current implementation, ideally, the store should be + // only written from the server. we may need to revisit the implementation in the future. + manifestWorkClient.SetLister(namespacedLister) + + cloudEventsClient.Subscribe(ctx, agenthandler.NewManifestWorkAgentHandler(namespacedLister, watcher)) + + go func() { + for { + select { + case <-ctx.Done(): + return + case <-cloudEventsClient.ReconnectedChan(): + // when receiving a client reconnected signal, we resync all sources for this agent + // TODO after supporting multiple sources, we should only resync agent known sources + if err := cloudEventsClient.Resync(ctx, types.SourceAll); err != nil { + klog.Errorf("failed to send resync request, %v", err) + } + } + } + }() + + return &ClientHolder{ + workClientSet: workClientSet, + manifestWorkInformer: informers, + }, nil +} + +func (b *ClientHolderBuilder) newSourceClients(ctx context.Context, sourceOptions *options.CloudEventsSourceOptions) (*ClientHolder, error) { + if len(b.clientID) == 0 { + return nil, fmt.Errorf("client id is required") + } + + if len(b.sourceID) == 0 { + return nil, fmt.Errorf("source id is required") + } + + workLister := &ManifestWorkLister{} + watcher := watcher.NewManifestWorkWatcher() + cloudEventsClient, err := generic.NewCloudEventSourceClient[*workv1.ManifestWork]( + ctx, + sourceOptions, + workLister, + ManifestWorkStatusHash, + b.codecs..., + ) + if err != nil { + return nil, err + } + + manifestWorkClient := sourceclient.NewManifestWorkSourceClient(b.sourceID, cloudEventsClient, watcher) + workClient := &internal.WorkV1ClientWrapper{ManifestWorkClient: manifestWorkClient} + workClientSet := &internal.WorkClientSetWrapper{WorkV1ClientWrapper: workClient} + factory := workinformers.NewSharedInformerFactoryWithOptions(workClientSet, b.informerResyncTime, b.informerOptions...) + informers := factory.Work().V1().ManifestWorks() + manifestWorkLister := informers.Lister() + // Set informer lister back to work lister and client. + workLister.Lister = manifestWorkLister + manifestWorkClient.SetLister(manifestWorkLister) + + sourceHandler := sourcehandler.NewManifestWorkSourceHandler(manifestWorkLister, watcher) + cloudEventsClient.Subscribe(ctx, sourceHandler.HandlerFunc()) + + go sourceHandler.Run(ctx.Done()) + go func() { + for { + select { + case <-ctx.Done(): + return + case <-cloudEventsClient.ReconnectedChan(): + // when receiving a client reconnected signal, we resync all clusters for this source + if err := cloudEventsClient.Resync(ctx, types.ClusterAll); err != nil { + klog.Errorf("failed to send resync request, %v", err) + } + } + } + }() + + return &ClientHolder{ + workClientSet: workClientSet, + manifestWorkInformer: informers, + }, nil +} + +func (b *ClientHolderBuilder) newKubeClients(config *rest.Config) (*ClientHolder, error) { + kubeWorkClientSet, err := workclientset.NewForConfig(config) + if err != nil { + return nil, err + } + + factory := workinformers.NewSharedInformerFactoryWithOptions(kubeWorkClientSet, b.informerResyncTime, b.informerOptions...) + return &ClientHolder{ + workClientSet: kubeWorkClientSet, + manifestWorkInformer: factory.Work().V1().ManifestWorks(), + }, nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/common/common.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/common/common.go new file mode 100644 index 000000000..53134eb89 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/common/common.go @@ -0,0 +1,29 @@ +package common + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + + workv1 "open-cluster-management.io/api/work/v1" +) + +const ( + // CloudEventsDataTypeAnnotationKey is the key of the cloudevents data type annotation. + CloudEventsDataTypeAnnotationKey = "cloudevents.open-cluster-management.io/datatype" + + // CloudEventsGenerationAnnotationKey is the key of the manifestwork generation annotation. + CloudEventsGenerationAnnotationKey = "cloudevents.open-cluster-management.io/generation" +) + +// CloudEventsOriginalSourceLabelKey is the key of the cloudevents original source label. +const CloudEventsOriginalSourceLabelKey = "cloudevents.open-cluster-management.io/originalsource" + +// ManifestsDeleted represents the manifests are deleted. +const ManifestsDeleted = "Deleted" + +const ( + CreateRequestAction = "create_request" + UpdateRequestAction = "update_request" + DeleteRequestAction = "delete_request" +) + +var ManifestWorkGR = schema.GroupResource{Group: workv1.GroupName, Resource: "manifestworks"} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/configloader.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/configloader.go new file mode 100644 index 000000000..724143c33 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/configloader.go @@ -0,0 +1,81 @@ +package work + +import ( + "fmt" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/grpc" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options/mqtt" +) + +const ( + ConfigTypeKube = "kube" + ConfigTypeMQTT = "mqtt" + ConfigTypeGRPC = "grpc" +) + +// ConfigLoader loads a configuration object with a configuration file. +type ConfigLoader struct { + configType string + configPath string + + kubeConfig *rest.Config +} + +// NewConfigLoader returns a ConfigLoader with the given configuration type and configuration file path. +// +// Available configuration types: +// - kube +// - mqtt +// - grpc +func NewConfigLoader(configType, configPath string) *ConfigLoader { + return &ConfigLoader{ + configType: configType, + configPath: configPath, + } +} + +// WithKubeConfig sets a kube config, this config will be used when configuration type is kube and the kube +// configuration file path not set. +func (l *ConfigLoader) WithKubeConfig(kubeConfig *rest.Config) *ConfigLoader { + l.kubeConfig = kubeConfig + return l +} + +// TODO using a specified config instead of any +func (l *ConfigLoader) LoadConfig() (string, any, error) { + switch l.configType { + case ConfigTypeKube: + if l.configPath == "" { + if l.kubeConfig == nil { + return "", nil, fmt.Errorf("neither the kube config path nor kube config object was specified") + } + + return l.kubeConfig.Host, l.kubeConfig, nil + } + + kubeConfig, err := clientcmd.BuildConfigFromFlags("", l.configPath) + if err != nil { + return "", nil, err + } + + return kubeConfig.Host, kubeConfig, nil + case ConfigTypeMQTT: + mqttOptions, err := mqtt.BuildMQTTOptionsFromFlags(l.configPath) + if err != nil { + return "", nil, err + } + + return mqttOptions.BrokerHost, mqttOptions, nil + case ConfigTypeGRPC: + grpcOptions, err := grpc.BuildGRPCOptionsFromFlags(l.configPath) + if err != nil { + return "", nil, err + } + + return grpcOptions.URL, grpcOptions, nil + } + + return "", nil, fmt.Errorf("unsupported config type %s", l.configType) +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/internal/clientset.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/internal/clientset.go new file mode 100644 index 000000000..768a6339b --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/internal/clientset.go @@ -0,0 +1,54 @@ +package internal + +import ( + discovery "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + + workclientset "open-cluster-management.io/api/client/work/clientset/versioned" + workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" + workv1alpha1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1alpha1" + sourceclient "open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client" +) + +// WorkClientSetWrapper wraps a work client that has a manifestwork client to a work clientset interface, this wrapper +// will helps us to build manifestwork informer factory easily. +type WorkClientSetWrapper struct { + WorkV1ClientWrapper *WorkV1ClientWrapper +} + +var _ workclientset.Interface = &WorkClientSetWrapper{} + +func (c *WorkClientSetWrapper) WorkV1() workv1client.WorkV1Interface { + return c.WorkV1ClientWrapper +} + +func (c *WorkClientSetWrapper) WorkV1alpha1() workv1alpha1client.WorkV1alpha1Interface { + return nil +} + +func (c *WorkClientSetWrapper) Discovery() discovery.DiscoveryInterface { + return nil +} + +// WorkV1ClientWrapper wraps a manifestwork client to a WorkV1Interface +type WorkV1ClientWrapper struct { + ManifestWorkClient workv1client.ManifestWorkInterface +} + +var _ workv1client.WorkV1Interface = &WorkV1ClientWrapper{} + +func (c *WorkV1ClientWrapper) ManifestWorks(namespace string) workv1client.ManifestWorkInterface { + if sourceManifestWorkClient, ok := c.ManifestWorkClient.(*sourceclient.ManifestWorkSourceClient); ok { + sourceManifestWorkClient.SetNamespace(namespace) + return sourceManifestWorkClient + } + return c.ManifestWorkClient +} + +func (c *WorkV1ClientWrapper) AppliedManifestWorks() workv1client.AppliedManifestWorkInterface { + return nil +} + +func (c *WorkV1ClientWrapper) RESTClient() rest.Interface { + return nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/lister.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/lister.go new file mode 100644 index 000000000..0911ee381 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/lister.go @@ -0,0 +1,31 @@ +package work + +import ( + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + + workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" +) + +// ManifestWorkLister list the ManifestWorks from a ManifestWorkInformer's local cache. +type ManifestWorkLister struct { + Lister workv1lister.ManifestWorkLister +} + +// List returns the ManifestWorks from a ManifestWorkInformer's local cache. +func (l *ManifestWorkLister) List(options types.ListOptions) ([]*workv1.ManifestWork, error) { + selector := labels.Everything() + if options.Source != types.SourceAll { + req, err := labels.NewRequirement(common.CloudEventsOriginalSourceLabelKey, selection.Equals, []string{options.Source}) + if err != nil { + return nil, err + } + + selector = labels.NewSelector().Add(*req) + } + + return l.Lister.ManifestWorks(options.ClusterName).List(selector) +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload/mainfiest.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload/mainfiest.go new file mode 100644 index 000000000..519729a05 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload/mainfiest.go @@ -0,0 +1,54 @@ +package payload + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" +) + +var ManifestEventDataType = types.CloudEventsDataType{ + Group: "io.open-cluster-management.works", + Version: "v1alpha1", + Resource: "manifests", +} + +// Manifest represents the data in a cloudevent, it contains a single manifest. +type Manifest struct { + // Manifest represents a resource to be deployed on managed cluster. + Manifest unstructured.Unstructured `json:"manifest"` + + // DeleteOption represents deletion strategy when this manifest is deleted. + DeleteOption *workv1.DeleteOption `json:"deleteOption,omitempty"` + + // ConfigOption represents the configuration of this manifest. + ConfigOption *ManifestConfigOption `json:"configOption,omitempty"` +} + +// ManifestStatus represents the data in a cloudevent, it contains the status of a SingleManifest on a managed +// cluster. +type ManifestStatus struct { + // Conditions contains the different condition statuses for a SingleManifest on a managed cluster. + // Valid condition types are: + // 1. Applied represents the manifest of a SingleManifest is applied successfully on a managed cluster. + // 2. Progressing represents the manifest of a SingleManifest is being applied on a managed cluster. + // 3. Available represents the manifest of a SingleManifest exists on the managed cluster. + // 4. Degraded represents the current state of manifest of a SingleManifest does not match the desired state for a + // certain period. + // 5. Deleted represents the manifests of a SingleManifest is deleted from a managed cluster. + Conditions []metav1.Condition `json:"conditions"` + + // Status represents the conditions of this manifest on a managed cluster. + Status *workv1.ManifestCondition `json:"status,omitempty"` +} + +type ManifestConfigOption struct { + // FeedbackRules defines what resource status field should be returned. + // If it is not set or empty, no feedback rules will be honored. + FeedbackRules []workv1.FeedbackRule `json:"feedbackRules,omitempty"` + + // UpdateStrategy defines the strategy to update this manifest. + // UpdateStrategy is Update if it is not set. + UpdateStrategy *workv1.UpdateStrategy `json:"updateStrategy,omitempty"` +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload/manifestbundle.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload/manifestbundle.go new file mode 100644 index 000000000..f7ca9fa98 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload/manifestbundle.go @@ -0,0 +1,43 @@ +package payload + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" +) + +var ManifestBundleEventDataType = types.CloudEventsDataType{ + Group: "io.open-cluster-management.works", + Version: "v1alpha1", + Resource: "manifestbundles", +} + +// ManifestBundle represents the data in a cloudevent, it contains a bundle of manifests. +type ManifestBundle struct { + // Manifests represents a list of Kuberenetes resources to be deployed on a managed cluster. + Manifests []workv1.Manifest `json:"manifests"` + + // DeleteOption represents deletion strategy when the manifests are deleted. + DeleteOption *workv1.DeleteOption `json:"deleteOption,omitempty"` + + // ManifestConfigs represents the configurations of manifests. + ManifestConfigs []workv1.ManifestConfigOption `json:"manifestConfigs,omitempty"` +} + +// ManifestBundleStatus represents the data in a cloudevent, it contains the status of a ManifestBundle on a managed +// cluster. +type ManifestBundleStatus struct { + // Conditions contains the different condition statuses for a ManifestBundle on managed cluster. + // Valid condition types are: + // 1. Applied represents the manifests in a ManifestBundle are applied successfully on a managed cluster. + // 2. Progressing represents the manifests in a ManifestBundle are being applied on a managed cluster. + // 3. Available represents the manifests in a ManifestBundle exist on a managed cluster. + // 4. Degraded represents the current state of manifests in a ManifestBundle do not match the desired state for a + // certain period. + // 5. Deleted represents the manifests in a ManifestBundle are deleted from a managed cluster. + Conditions []metav1.Condition `json:"conditions"` + + // ManifestResourceStatus represents the status of each resource in manifest work deployed on managed cluster. + ResourceStatus []workv1.ManifestCondition `json:"resourceStatus,omitempty"` +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client/manifestwork.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client/manifestwork.go new file mode 100644 index 000000000..ed6eb8a46 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/client/manifestwork.go @@ -0,0 +1,216 @@ +package client + +import ( + "context" + "fmt" + "strconv" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubetypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog/v2" + + workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" + workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher" +) + +// ManifestWorkSourceClient implements the ManifestWorkInterface. +type ManifestWorkSourceClient struct { + cloudEventsClient *generic.CloudEventSourceClient[*workv1.ManifestWork] + watcher *watcher.ManifestWorkWatcher + lister workv1lister.ManifestWorkLister + namespace string + sourceID string +} + +var _ workv1client.ManifestWorkInterface = &ManifestWorkSourceClient{} + +func NewManifestWorkSourceClient(sourceID string, + cloudEventsClient *generic.CloudEventSourceClient[*workv1.ManifestWork], + watcher *watcher.ManifestWorkWatcher) *ManifestWorkSourceClient { + return &ManifestWorkSourceClient{ + cloudEventsClient: cloudEventsClient, + watcher: watcher, + sourceID: sourceID, + } +} + +func (c *ManifestWorkSourceClient) SetLister(lister workv1lister.ManifestWorkLister) { + c.lister = lister +} + +func (mw *ManifestWorkSourceClient) SetNamespace(namespace string) { + mw.namespace = namespace +} + +func (c *ManifestWorkSourceClient) Create(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.CreateOptions) (*workv1.ManifestWork, error) { + _, err := c.lister.ManifestWorks(c.namespace).Get(manifestWork.Name) + if err == nil { + return nil, errors.NewAlreadyExists(common.ManifestWorkGR, manifestWork.Name) + } + + if !errors.IsNotFound(err) { + return nil, err + } + + // TODO if we support multiple data type in future, we may need to get the data type from + // the cloudevents data type annotation + eventType := types.CloudEventsType{ + CloudEventsDataType: payload.ManifestBundleEventDataType, + SubResource: types.SubResourceSpec, + Action: common.CreateRequestAction, + } + + generation, err := getWorkGeneration(manifestWork) + if err != nil { + return nil, err + } + + newWork := manifestWork.DeepCopy() + newWork.UID = kubetypes.UID(utils.UID(c.sourceID, c.namespace, newWork.Name)) + newWork.Generation = generation + ensureSourceLabel(c.sourceID, newWork) + if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil { + return nil, err + } + + // add the new work to the ManifestWorkInformer local cache. + c.watcher.Receive(watch.Event{Type: watch.Added, Object: newWork}) + return newWork.DeepCopy(), nil +} + +func (c *ManifestWorkSourceClient) Update(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.UpdateOptions) (*workv1.ManifestWork, error) { + return nil, errors.NewMethodNotSupported(common.ManifestWorkGR, "update") +} + +func (c *ManifestWorkSourceClient) UpdateStatus(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.UpdateOptions) (*workv1.ManifestWork, error) { + return nil, errors.NewMethodNotSupported(common.ManifestWorkGR, "updatestatus") +} + +func (c *ManifestWorkSourceClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + work, err := c.lister.ManifestWorks(c.namespace).Get(name) + if errors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + + // TODO if we support multiple data type in future, we may need to get the data type from + // the cloudevents data type annotation + eventType := types.CloudEventsType{ + CloudEventsDataType: payload.ManifestBundleEventDataType, + SubResource: types.SubResourceSpec, + Action: common.DeleteRequestAction, + } + + deletingWork := work.DeepCopy() + now := metav1.Now() + deletingWork.DeletionTimestamp = &now + + if err := c.cloudEventsClient.Publish(ctx, eventType, deletingWork); err != nil { + return err + } + + // update the deleting work in the ManifestWorkInformer local cache. + c.watcher.Receive(watch.Event{Type: watch.Modified, Object: deletingWork}) + return nil +} + +func (c *ManifestWorkSourceClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + return errors.NewMethodNotSupported(common.ManifestWorkGR, "deletecollection") +} + +func (c *ManifestWorkSourceClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*workv1.ManifestWork, error) { + klog.V(4).Infof("getting manifestwork %s", name) + return c.lister.ManifestWorks(c.namespace).Get(name) +} + +func (c *ManifestWorkSourceClient) List(ctx context.Context, opts metav1.ListOptions) (*workv1.ManifestWorkList, error) { + klog.V(4).Infof("list manifestworks") + // send resync request to fetch manifestwork status from agents when the ManifestWorkInformer starts + if err := c.cloudEventsClient.Resync(ctx, types.ClusterAll); err != nil { + return nil, err + } + + return &workv1.ManifestWorkList{}, nil +} + +func (c *ManifestWorkSourceClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.watcher, nil +} + +func (c *ManifestWorkSourceClient) Patch(ctx context.Context, name string, pt kubetypes.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *workv1.ManifestWork, err error) { + klog.V(4).Infof("patching manifestwork %s", name) + + if len(subresources) != 0 { + return nil, fmt.Errorf("unsupported to update subresources %v", subresources) + } + + lastWork, err := c.lister.ManifestWorks(c.namespace).Get(name) + if err != nil { + return nil, err + } + + patchedWork, err := utils.Patch(pt, lastWork, data) + if err != nil { + return nil, err + } + + generation, err := getWorkGeneration(patchedWork) + if err != nil { + return nil, err + } + + // TODO if we support multiple data type in future, we may need to get the data type from + // the cloudevents data type annotation + eventType := types.CloudEventsType{ + CloudEventsDataType: payload.ManifestBundleEventDataType, + SubResource: types.SubResourceSpec, + Action: common.UpdateRequestAction, + } + + newWork := patchedWork.DeepCopy() + newWork.Generation = generation + if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil { + return nil, err + } + + // refresh the work in the ManifestWorkInformer local cache with patched work. + c.watcher.Receive(watch.Event{Type: watch.Modified, Object: newWork}) + return newWork.DeepCopy(), nil +} + +// getWorkGeneration retrieves the work generation from the annotation with the key +// "cloudevents.open-cluster-management.io/generation". +// if no generation is set in the annotation, then 0 is returned, which means the message +// broker guarantees the message order. +func getWorkGeneration(work *workv1.ManifestWork) (int64, error) { + generation, ok := work.Annotations[common.CloudEventsGenerationAnnotationKey] + if !ok { + return 0, nil + } + + generationInt, err := strconv.Atoi(generation) + if err != nil { + return 0, fmt.Errorf("failed to convert generation %s to int: %v", generation, err) + } + + return int64(generationInt), nil +} + +func ensureSourceLabel(sourceID string, work *workv1.ManifestWork) { + if work.Labels == nil { + work.Labels = map[string]string{} + } + + work.Labels[common.CloudEventsOriginalSourceLabelKey] = sourceID +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/codec/manifestbundle.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/codec/manifestbundle.go new file mode 100644 index 000000000..f19e70b11 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/codec/manifestbundle.go @@ -0,0 +1,101 @@ +package codec + +import ( + "fmt" + + cloudevents "github.com/cloudevents/sdk-go/v2" + cloudeventstypes "github.com/cloudevents/sdk-go/v2/types" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubetypes "k8s.io/apimachinery/pkg/types" + + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload" +) + +// ManifestBundleCodec is a codec to encode/decode a ManifestWork/cloudevent with ManifestBundle for a source. +type ManifestBundleCodec struct{} + +func NewManifestBundleCodec() *ManifestBundleCodec { + return &ManifestBundleCodec{} +} + +// EventDataType always returns the event data type `io.open-cluster-management.works.v1alpha1.manifestbundles`. +func (c *ManifestBundleCodec) EventDataType() types.CloudEventsDataType { + return payload.ManifestBundleEventDataType +} + +// Encode the spec of a ManifestWork to a cloudevent with ManifestBundle. +func (c *ManifestBundleCodec) Encode(source string, eventType types.CloudEventsType, work *workv1.ManifestWork) (*cloudevents.Event, error) { + if eventType.CloudEventsDataType != payload.ManifestBundleEventDataType { + return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) + } + + evt := types.NewEventBuilder(source, eventType). + WithClusterName(work.Namespace). + WithResourceID(string(work.UID)). + WithResourceVersion(work.Generation). + NewEvent() + if !work.DeletionTimestamp.IsZero() { + evt.SetExtension(types.ExtensionDeletionTimestamp, work.DeletionTimestamp.Time) + return &evt, nil + } + + manifests := &payload.ManifestBundle{ + Manifests: work.Spec.Workload.Manifests, + DeleteOption: work.Spec.DeleteOption, + ManifestConfigs: work.Spec.ManifestConfigs, + } + if err := evt.SetData(cloudevents.ApplicationJSON, manifests); err != nil { + return nil, fmt.Errorf("failed to encode manifestwork status to a cloudevent: %v", err) + } + + return &evt, nil +} + +// Decode a cloudevent whose data is ManifestBundle to a ManifestWork. +func (c *ManifestBundleCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWork, error) { + eventType, err := types.ParseCloudEventsType(evt.Type()) + if err != nil { + return nil, fmt.Errorf("failed to parse cloud event type %s, %v", evt.Type(), err) + } + + if eventType.CloudEventsDataType != payload.ManifestBundleEventDataType { + return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) + } + + evtExtensions := evt.Context.GetExtensions() + + resourceID, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionResourceID]) + if err != nil { + return nil, fmt.Errorf("failed to get resourceid extension: %v", err) + } + + resourceVersion, err := cloudeventstypes.ToInteger(evtExtensions[types.ExtensionResourceVersion]) + if err != nil { + return nil, fmt.Errorf("failed to get resourceversion extension: %v", err) + } + + work := &workv1.ManifestWork{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + UID: kubetypes.UID(resourceID), + ResourceVersion: fmt.Sprintf("%d", resourceVersion), + }, + } + + manifestStatus := &payload.ManifestBundleStatus{} + if err := evt.DataAs(manifestStatus); err != nil { + return nil, fmt.Errorf("failed to unmarshal event data %s, %v", string(evt.Data()), err) + } + + work.Status = workv1.ManifestWorkStatus{ + Conditions: manifestStatus.Conditions, + ResourceStatus: workv1.ManifestResourceStatus{ + Manifests: manifestStatus.ResourceStatus, + }, + } + + return work, nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/handler/resourcehandler.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/handler/resourcehandler.go new file mode 100644 index 000000000..0ad31f15d --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/handler/resourcehandler.go @@ -0,0 +1,167 @@ +package handler + +import ( + "fmt" + "strconv" + "time" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/labels" + kubetypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" + + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic" + "open-cluster-management.io/sdk-go/pkg/cloudevents/generic/types" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher" +) + +const ManifestWorkFinalizer = "cloudevents.open-cluster-management.io/manifest-work-cleanup" + +type ManifestWorkSourceHandler struct { + works workqueue.RateLimitingInterface + lister workv1lister.ManifestWorkLister + watcher *watcher.ManifestWorkWatcher +} + +// NewManifestWorkSourceHandler returns a ResourceHandler for a ManifestWork source client. It sends the kube events +// with ManifestWorWatcher after CloudEventSourceClient received the ManifestWork status from agent, then the +// ManifestWorkInformer handles the kube events in its local cache. +func NewManifestWorkSourceHandler(lister workv1lister.ManifestWorkLister, watcher *watcher.ManifestWorkWatcher) *ManifestWorkSourceHandler { + return &ManifestWorkSourceHandler{ + works: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "manifestwork-source-handler"), + lister: lister, + watcher: watcher, + } +} + +func (h *ManifestWorkSourceHandler) Run(stopCh <-chan struct{}) { + defer h.works.ShutDown() + + // start a goroutine to handle the works from the queue + // the .Until will re-kick the runWorker one second after the runWorker completes + go wait.Until(h.runWorker, time.Second, stopCh) + + // wait until we're told to stop + <-stopCh +} + +func (h *ManifestWorkSourceHandler) HandlerFunc() generic.ResourceHandler[*workv1.ManifestWork] { + return func(action types.ResourceAction, obj *workv1.ManifestWork) error { + switch action { + case types.StatusModified: + h.works.Add(obj) + default: + return fmt.Errorf("unsupported resource action %s", action) + } + return nil + } +} + +func (h *ManifestWorkSourceHandler) runWorker() { + // hot loop until we're told to stop. processNextEvent will automatically wait until there's work available, so + // we don't worry about secondary waits + for h.processNextWork() { + } +} + +// processNextWork deals with one key off the queue. +func (h *ManifestWorkSourceHandler) processNextWork() bool { + // pull the next event item from queue. + // events queue blocks until it can return an item to be processed + key, quit := h.works.Get() + if quit { + // the current queue is shutdown and becomes empty, quit this process + return false + } + defer h.works.Done(key) + + if err := h.handleWork(key.(*workv1.ManifestWork)); err != nil { + // we failed to handle the work, we should requeue the item to work on later + // this method will add a backoff to avoid hotlooping on particular items + h.works.AddRateLimited(key) + return true + } + + // we handle the event successfully, tell the queue to stop tracking history for this event + h.works.Forget(key) + return true +} + +func (h *ManifestWorkSourceHandler) handleWork(work *workv1.ManifestWork) error { + lastWork := h.getWorkByUID(work.UID) + if lastWork == nil { + // the work is not found, this may be the client is restarted and the local cache is not ready, requeue this + // work + return errors.NewNotFound(common.ManifestWorkGR, string(work.UID)) + } + + updatedWork := lastWork.DeepCopy() + if meta.IsStatusConditionTrue(work.Status.Conditions, common.ManifestsDeleted) { + updatedWork.Finalizers = []string{} + h.watcher.Receive(watch.Event{Type: watch.Deleted, Object: updatedWork}) + return nil + } + + resourceVersion, err := strconv.Atoi(work.ResourceVersion) + if err != nil { + klog.Errorf("invalid resource version for work %s/%s, %v", lastWork.Namespace, lastWork.Name, err) + return nil + } + + if int64(resourceVersion) > lastWork.Generation { + klog.Warningf("the work %s/%s resource version %d is great than its generation %d, ignore", + lastWork.Namespace, lastWork.Name, resourceVersion, work.Generation) + return nil + } + + // no status change + if equality.Semantic.DeepEqual(lastWork.Status, work.Status) { + return nil + } + + // the work has been handled by agent, we ensure a finalizer on the work + updatedWork.Finalizers = ensureFinalizers(updatedWork.Finalizers) + updatedWork.Status = work.Status + h.watcher.Receive(watch.Event{Type: watch.Modified, Object: updatedWork}) + return nil +} + +func (h *ManifestWorkSourceHandler) getWorkByUID(uid kubetypes.UID) *workv1.ManifestWork { + works, err := h.lister.List(labels.Everything()) + if err != nil { + klog.Errorf("failed to lists works, %v", err) + return nil + } + + for _, work := range works { + if work.UID == uid { + return work + } + } + + return nil +} + +func ensureFinalizers(workFinalizers []string) []string { + has := false + for _, f := range workFinalizers { + if f == ManifestWorkFinalizer { + has = true + break + } + } + + if !has { + workFinalizers = append(workFinalizers, ManifestWorkFinalizer) + } + + return workFinalizers +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/statushash.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/statushash.go new file mode 100644 index 000000000..642f78609 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/statushash.go @@ -0,0 +1,18 @@ +package work + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + + workv1 "open-cluster-management.io/api/work/v1" +) + +// ManifestWorkStatusHash returns the SHA256 checksum of a ManifestWork status. +func ManifestWorkStatusHash(work *workv1.ManifestWork) (string, error) { + statusBytes, err := json.Marshal(work.Status) + if err != nil { + return "", fmt.Errorf("failed to marshal work status, %v", err) + } + return fmt.Sprintf("%x", sha256.Sum256(statusBytes)), nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils/utils.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils/utils.go new file mode 100644 index 000000000..51b0b3354 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/utils/utils.go @@ -0,0 +1,55 @@ +package utils + +import ( + "encoding/json" + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/google/uuid" + "k8s.io/apimachinery/pkg/types" + workv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" +) + +// Patch applies the patch to a work with the patch type. +func Patch(patchType types.PatchType, work *workv1.ManifestWork, patchData []byte) (*workv1.ManifestWork, error) { + workData, err := json.Marshal(work) + if err != nil { + return nil, err + } + + var patchedData []byte + switch patchType { + case types.JSONPatchType: + var patchObj jsonpatch.Patch + patchObj, err = jsonpatch.DecodePatch(patchData) + if err != nil { + return nil, err + } + patchedData, err = patchObj.Apply(workData) + if err != nil { + return nil, err + } + + case types.MergePatchType: + patchedData, err = jsonpatch.MergePatch(workData, patchData) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unsupported patch type: %s", patchType) + } + + patchedWork := &workv1.ManifestWork{} + if err := json.Unmarshal(patchedData, patchedWork); err != nil { + return nil, err + } + + return patchedWork, nil +} + +// UID returns a v5 UUID based on sourceID, work name and namespace to make sure it is consistent +func UID(sourceID, namespace, name string) string { + id := fmt.Sprintf("%s-%s-%s-%s", sourceID, common.ManifestWorkGR.String(), namespace, name) + return uuid.NewSHA1(uuid.NameSpaceOID, []byte(id)).String() +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher/watcher.go b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher/watcher.go new file mode 100644 index 000000000..d22a7512a --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/cloudevents/work/watcher/watcher.go @@ -0,0 +1,64 @@ +package watcher + +import ( + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog/v2" +) + +// ManifestWorkWatcher implements the watch.Interface. It returns a chan which will receive all the events. +type ManifestWorkWatcher struct { + sync.Mutex + + result chan watch.Event + done chan struct{} +} + +var _ watch.Interface = &ManifestWorkWatcher{} + +func NewManifestWorkWatcher() *ManifestWorkWatcher { + mw := &ManifestWorkWatcher{ + // It's easy for a consumer to add buffering via an extra + // goroutine/channel, but impossible for them to remove it, + // so nonbuffered is better. + result: make(chan watch.Event), + // If the watcher is externally stopped there is no receiver anymore + // and the send operations on the result channel, especially the + // error reporting might block forever. + // Therefore a dedicated stop channel is used to resolve this blocking. + done: make(chan struct{}), + } + + return mw +} + +// ResultChan implements Interface. +func (mw *ManifestWorkWatcher) ResultChan() <-chan watch.Event { + return mw.result +} + +// Stop implements Interface. +func (mw *ManifestWorkWatcher) Stop() { + // Call Close() exactly once by locking and setting a flag. + mw.Lock() + defer mw.Unlock() + // closing a closed channel always panics, therefore check before closing + select { + case <-mw.done: + close(mw.result) + default: + close(mw.done) + } +} + +// Receive a event from the work client and sends down the result channel. +func (mw *ManifestWorkWatcher) Receive(evt watch.Event) { + if klog.V(4).Enabled() { + obj, _ := meta.Accessor(evt.Object) + klog.V(4).Infof("Receive the event %v for %v", evt.Type, obj.GetName()) + } + + mw.result <- evt +} From d8d4f5269b42f4e24b4917ce38d372da62aa56a6 Mon Sep 17 00:00:00 2001 From: morvencao Date: Tue, 16 Apr 2024 07:43:22 +0000 Subject: [PATCH 2/6] remove e2e testing for cloudevents. Signed-off-by: morvencao --- .github/workflows/go-presubmit.yml | 31 - Makefile | 6 - pkg/addonmanager/addontesting/helpers.go | 5 - .../controllers/agentdeploy/utils.go | 13 - pkg/addonmanager/manager.go | 8 - test/e2ecloudevents/e2e_suite_test.go | 155 ----- .../helloworld_cloudevents_test.go | 536 ------------------ 7 files changed, 754 deletions(-) delete mode 100644 test/e2ecloudevents/e2e_suite_test.go delete mode 100644 test/e2ecloudevents/helloworld_cloudevents_test.go diff --git a/.github/workflows/go-presubmit.yml b/.github/workflows/go-presubmit.yml index 8f24bf54a..823c2be26 100644 --- a/.github/workflows/go-presubmit.yml +++ b/.github/workflows/go-presubmit.yml @@ -119,37 +119,6 @@ jobs: env: KUBECONFIG: /home/runner/.kube/config - e2e-cloudevents: - name: e2e-cloudevents - runs-on: ubuntu-latest - steps: - - name: checkout code - uses: actions/checkout@v4 - with: - fetch-depth: 1 - - name: install Go - uses: actions/setup-go@v3 - with: - go-version: ${{ env.GO_VERSION }} - - name: install imagebuilder - run: go install github.com/openshift/imagebuilder/cmd/imagebuilder@v1.2.3 - - name: addon-examples-image - run: imagebuilder --allow-pull -t quay.io/open-cluster-management/addon-examples:latest -t quay.io/ocm/addon-examples:latest -f ./build/Dockerfile.example . - - name: setup kind - uses: engineerd/setup-kind@v0.5.0 - with: - version: v0.11.1 - name: cluster1 - - name: Load image on the nodes of the cluster - run: | - kind load docker-image --name=cluster1 quay.io/open-cluster-management/addon-examples:latest - kind load docker-image --name=cluster1 quay.io/ocm/addon-examples:latest - - name: Run e2e test with cloudevents - run: | - make test-e2e-cloudevents - env: - KUBECONFIG: /home/runner/.kube/config - e2e-hosted: name: e2e-hosted runs-on: ubuntu-latest diff --git a/Makefile b/Makefile index d42f025b8..53102acdd 100644 --- a/Makefile +++ b/Makefile @@ -141,12 +141,6 @@ build-e2e: test-e2e: build-e2e deploy-ocm deploy-helloworld deploy-helloworld-helm ./e2e.test -test.v -ginkgo.v -build-e2e-cloudevents: - go test -c ./test/e2ecloudevents - -test-e2e-cloudevents: build-e2e-cloudevents deploy-ocm-cloudevents deploy-helloworld-cloudevents - ./e2ecloudevents.test -test.v -ginkgo.v - build-hosted-e2e: go test -c ./test/e2ehosted diff --git a/pkg/addonmanager/addontesting/helpers.go b/pkg/addonmanager/addontesting/helpers.go index c33b1a014..c0a6c43de 100644 --- a/pkg/addonmanager/addontesting/helpers.go +++ b/pkg/addonmanager/addontesting/helpers.go @@ -16,8 +16,6 @@ import ( addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" clusterv1 "open-cluster-management.io/api/cluster/v1" workapiv1 "open-cluster-management.io/api/work/v1" - "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" - "open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload" "open-cluster-management.io/addon-framework/pkg/basecontroller/events" ) @@ -189,9 +187,6 @@ func NewManifestWork(name, namespace string, objects ...*unstructured.Unstructur ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, - Annotations: map[string]string{ - common.CloudEventsDataTypeAnnotationKey: payload.ManifestBundleEventDataType.String(), - }, }, Spec: workapiv1.ManifestWorkSpec{ Workload: workapiv1.ManifestsTemplate{ diff --git a/pkg/addonmanager/controllers/agentdeploy/utils.go b/pkg/addonmanager/controllers/agentdeploy/utils.go index 5ebf07475..7df50357a 100644 --- a/pkg/addonmanager/controllers/agentdeploy/utils.go +++ b/pkg/addonmanager/controllers/agentdeploy/utils.go @@ -13,8 +13,6 @@ import ( clusterv1 "open-cluster-management.io/api/cluster/v1" workapiv1 "open-cluster-management.io/api/work/v1" workbuilder "open-cluster-management.io/sdk-go/pkg/apis/work/v1/builder" - "open-cluster-management.io/sdk-go/pkg/cloudevents/work/common" - "open-cluster-management.io/sdk-go/pkg/cloudevents/work/payload" "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" "open-cluster-management.io/addon-framework/pkg/agent" @@ -92,9 +90,6 @@ func newManifestWork(addonNamespace, addonName, clusterName string, manifests [] Labels: map[string]string{ addonapiv1alpha1.AddonLabelKey: addonName, }, - Annotations: map[string]string{ - common.CloudEventsDataTypeAnnotationKey: payload.ManifestBundleEventDataType.String(), - }, }, Spec: workapiv1.ManifestWorkSpec{ Workload: workapiv1.ManifestsTemplate{ @@ -320,14 +315,6 @@ func (b *addonWorksBuilder) BuildDeployWorks(installMode, addonWorkNamespace str return nil, nil, err } - if len(annotations) == 0 { - annotations = map[string]string{ - common.CloudEventsDataTypeAnnotationKey: payload.ManifestBundleEventDataType.String(), - } - } else { - annotations[common.CloudEventsDataTypeAnnotationKey] = payload.ManifestBundleEventDataType.String() - } - return b.workBuilder.Build(deployObjects, newAddonWorkObjectMeta(b.processor.manifestWorkNamePrefix(addon.Namespace, addon.Name), addon.Name, addon.Namespace, addonWorkNamespace, owner), workbuilder.ExistingManifestWorksOption(existingWorks), diff --git a/pkg/addonmanager/manager.go b/pkg/addonmanager/manager.go index 47db0f228..101e22669 100644 --- a/pkg/addonmanager/manager.go +++ b/pkg/addonmanager/manager.go @@ -45,14 +45,6 @@ type AddonManager interface { // Start starts all registered addon agent. Start(ctx context.Context) error - - // StartWithInformers starts all registered addon agent with the given informers. - // StartWithInformers(ctx context.Context, - // kubeInformers kubeinformers.SharedInformerFactory, - // workInformers workv1informers.SharedInformerFactory, - // addonInformers addoninformers.SharedInformerFactory, - // clusterInformers clusterv1informers.SharedInformerFactory, - // dynamicInformers dynamicinformer.DynamicSharedInformerFactory) error } type addonManager struct { diff --git a/test/e2ecloudevents/e2e_suite_test.go b/test/e2ecloudevents/e2e_suite_test.go deleted file mode 100644 index 14766faf2..000000000 --- a/test/e2ecloudevents/e2e_suite_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package e2ecloudevents - -import ( - "context" - "fmt" - "os" - "strings" - "testing" - "time" - - ginkgo "github.com/onsi/ginkgo" - gomega "github.com/onsi/gomega" - - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/util/retry" - - certificatesv1 "k8s.io/api/certificates/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - addonclient "open-cluster-management.io/api/client/addon/clientset/versioned" - clusterclient "open-cluster-management.io/api/client/cluster/clientset/versioned" - clusterv1 "open-cluster-management.io/api/cluster/v1" -) - -func TestE2E(t *testing.T) { - gomega.RegisterFailHandler(ginkgo.Fail) - ginkgo.RunSpecs(t, "E2E suite") -} - -var ( - managedClusterName string - hubKubeClient kubernetes.Interface - hubAddOnClient addonclient.Interface - hubClusterClient clusterclient.Interface - clusterCfg *rest.Config -) - -// This suite is sensitive to the following environment variables: -// -// - MANAGED_CLUSTER_NAME sets the name of the cluster -// - KUBECONFIG is the location of the kubeconfig file to use -var _ = ginkgo.BeforeSuite(func() { - kubeconfig := os.Getenv("KUBECONFIG") - managedClusterName = os.Getenv("MANAGED_CLUSTER_NAME") - if managedClusterName == "" { - managedClusterName = "cluster1" - } - err := func() error { - var err error - clusterCfg, err = clientcmd.BuildConfigFromFlags("", kubeconfig) - if err != nil { - return err - } - - hubKubeClient, err = kubernetes.NewForConfig(clusterCfg) - if err != nil { - return err - } - - hubAddOnClient, err = addonclient.NewForConfig(clusterCfg) - if err != nil { - return err - } - - hubClusterClient, err = clusterclient.NewForConfig(clusterCfg) - - return err - }() - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - var csrs *certificatesv1.CertificateSigningRequestList - // Waiting for the CSR for ManagedCluster to exist - err = wait.Poll(1*time.Second, 120*time.Second, func() (bool, error) { - var err error - csrs, err = hubKubeClient.CertificatesV1().CertificateSigningRequests().List(context.TODO(), metav1.ListOptions{ - LabelSelector: fmt.Sprintf("open-cluster-management.io/cluster-name = %v", managedClusterName), - }) - if err != nil { - return false, err - } - - if len(csrs.Items) >= 1 { - return true, nil - } - - return false, nil - }) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - // Approving all pending CSRs - for i := range csrs.Items { - csr := &csrs.Items[i] - if !strings.HasPrefix(csr.Name, managedClusterName) { - continue - } - - err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - csr, err = hubKubeClient.CertificatesV1().CertificateSigningRequests().Get(context.TODO(), csr.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - // make the e2e test idempotent to ease debugging in local environment - for _, condition := range csr.Status.Conditions { - if condition.Type == certificatesv1.CertificateApproved { - if condition.Status == corev1.ConditionTrue { - return nil - } - } - } - - csr.Status.Conditions = append(csr.Status.Conditions, certificatesv1.CertificateSigningRequestCondition{ - Type: certificatesv1.CertificateApproved, - Status: corev1.ConditionTrue, - Reason: "Approved by E2E", - Message: "Approved as part of Loopback e2e", - }) - _, err := hubKubeClient.CertificatesV1().CertificateSigningRequests().UpdateApproval(context.TODO(), csr.Name, csr, metav1.UpdateOptions{}) - return err - }) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - } - - var managedCluster *clusterv1.ManagedCluster - // Waiting for ManagedCluster to exist - err = wait.Poll(1*time.Second, 120*time.Second, func() (bool, error) { - var err error - managedCluster, err = hubClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), managedClusterName, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return false, nil - } - if err != nil { - return false, err - } - return true, nil - }) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - // Accepting ManagedCluster - err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - var err error - managedCluster, err = hubClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), managedCluster.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - managedCluster.Spec.HubAcceptsClient = true - managedCluster.Spec.LeaseDurationSeconds = 5 - _, err = hubClusterClient.ClusterV1().ManagedClusters().Update(context.TODO(), managedCluster, metav1.UpdateOptions{}) - return err - }) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) -}) diff --git a/test/e2ecloudevents/helloworld_cloudevents_test.go b/test/e2ecloudevents/helloworld_cloudevents_test.go deleted file mode 100644 index fa9090536..000000000 --- a/test/e2ecloudevents/helloworld_cloudevents_test.go +++ /dev/null @@ -1,536 +0,0 @@ -package e2ecloudevents - -import ( - "context" - "fmt" - - "github.com/onsi/ginkgo" - "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" - apiequality "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/rand" - addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" -) - -const ( - eventuallyTimeout = 300 // seconds - eventuallyInterval = 1 // seconds -) - -const ( - addonName = "helloworldcloudevents" - addonInstallNamespace = "open-cluster-management-agent-addon" - deployConfigName = "deploy-config" - deployImageOverrideConfigName = "image-override-deploy-config" - deployProxyConfigName = "proxy-deploy-config" - deployAgentInstallNamespaceConfigName = "agent-install-namespace-deploy-config" -) - -var ( - nodeSelector = map[string]string{"kubernetes.io/os": "linux"} - tolerations = []corev1.Toleration{{Key: "foo", Operator: corev1.TolerationOpExists, Effect: corev1.TaintEffectNoExecute}} - registries = []addonapiv1alpha1.ImageMirror{ - { - Source: "quay.io/open-cluster-management/addon-examples", - Mirror: "quay.io/ocm/addon-examples", - }, - } - - proxyConfig = addonapiv1alpha1.ProxyConfig{ - // settings of http proxy will not impact the communicaiton between - // hub kube-apiserver and the add-on agent running on managed cluster. - HTTPProxy: "http://127.0.0.1:3128", - NoProxy: "example.com", - } - - agentInstallNamespaceConfig = "test-ns" -) - -var _ = ginkgo.Describe("install/uninstall helloworldcloudevents addons", func() { - ginkgo.BeforeEach(func() { - gomega.Eventually(func() error { - _, err := hubClusterClient.ClusterV1().ManagedClusters().Get(context.Background(), managedClusterName, metav1.GetOptions{}) - if err != nil { - return err - } - - _, err = hubKubeClient.CoreV1().Namespaces().Get(context.Background(), managedClusterName, metav1.GetOptions{}) - if err != nil { - return err - } - - testNs := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: agentInstallNamespaceConfig, - }, - } - _, err = hubKubeClient.CoreV1().Namespaces().Create(context.Background(), testNs, metav1.CreateOptions{}) - if err != nil { - if errors.IsAlreadyExists(err) { - return nil - } - return err - } - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - }) - - ginkgo.AfterEach(func() { - ginkgo.By("Clean up the customized agent install namespace after each case.") - gomega.Eventually(func() error { - _, err := hubKubeClient.CoreV1().Namespaces().Get(context.Background(), - agentInstallNamespaceConfig, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return nil - } - - if err == nil { - errd := hubKubeClient.CoreV1().Namespaces().Delete(context.Background(), - agentInstallNamespaceConfig, metav1.DeleteOptions{}) - if errd != nil { - return errd - } - return fmt.Errorf("ns is deleting, need re-check if namespace is not found") - } - - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - }) - - ginkgo.It("addon should be worked", func() { - ginkgo.By("Make sure cma annotation managed by addon-manager is added") - gomega.Eventually(func() error { - cma, err := hubAddOnClient.AddonV1alpha1().ClusterManagementAddOns().Get(context.Background(), addonName, metav1.GetOptions{}) - if err != nil { - return err - } - - if cma.Annotations[addonapiv1alpha1.AddonLifecycleAnnotationKey] != addonapiv1alpha1.AddonLifecycleAddonManagerAnnotationValue { - return fmt.Errorf("addon should have annotation, but get %v", cma.Annotations) - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - ginkgo.By("Make sure addon is available") - gomega.Eventually(func() error { - addon, err := hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), addonName, metav1.GetOptions{}) - if err != nil { - return err - } - - if !meta.IsStatusConditionTrue(addon.Status.Conditions, "ManifestApplied") { - return fmt.Errorf("addon should be applied to spoke, but get condition %v", addon.Status.Conditions) - } - - if !meta.IsStatusConditionTrue(addon.Status.Conditions, "Available") { - return fmt.Errorf("addon should be available on spoke, but get condition %v", addon.Status.Conditions) - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - ginkgo.By("Make sure addon is functioning") - configmap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("config-%s", rand.String(6)), - Namespace: managedClusterName, - }, - Data: map[string]string{ - "key1": rand.String(6), - "key2": rand.String(6), - }, - } - - _, err := hubKubeClient.CoreV1().ConfigMaps(managedClusterName).Create(context.Background(), configmap, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - gomega.Eventually(func() error { - copyiedConfig, err := hubKubeClient.CoreV1().ConfigMaps(addonInstallNamespace).Get(context.Background(), configmap.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - if !apiequality.Semantic.DeepEqual(copyiedConfig.Data, configmap.Data) { - return fmt.Errorf("expected configmap is not correct, %v", copyiedConfig.Data) - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - ginkgo.By("Prepare a AddOnDeploymentConfig for addon nodeSelector and tolerations") - gomega.Eventually(func() error { - return prepareAddOnDeploymentConfig(managedClusterName) - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - ginkgo.By("Add the configs to ManagedClusterAddOn") - gomega.Eventually(func() error { - addon, err := hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), addonName, metav1.GetOptions{}) - if err != nil { - return err - } - newAddon := addon.DeepCopy() - newAddon.Spec.Configs = []addonapiv1alpha1.AddOnConfig{ - { - ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ - Group: "addon.open-cluster-management.io", - Resource: "addondeploymentconfigs", - }, - ConfigReferent: addonapiv1alpha1.ConfigReferent{ - Namespace: managedClusterName, - Name: deployConfigName, - }, - }, - } - _, err = hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Update(context.Background(), newAddon, metav1.UpdateOptions{}) - if err != nil { - return err - } - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - ginkgo.By("Make sure addon is configured") - gomega.Eventually(func() error { - agentDeploy, err := hubKubeClient.AppsV1().Deployments(addonInstallNamespace).Get(context.Background(), "helloworldcloudevents-agent", metav1.GetOptions{}) - if err != nil { - return err - } - - if !equality.Semantic.DeepEqual(agentDeploy.Spec.Template.Spec.NodeSelector, nodeSelector) { - return fmt.Errorf("unexpected nodeSeletcor %v", agentDeploy.Spec.Template.Spec.NodeSelector) - } - - if !equality.Semantic.DeepEqual(agentDeploy.Spec.Template.Spec.Tolerations, tolerations) { - return fmt.Errorf("unexpected tolerations %v", agentDeploy.Spec.Template.Spec.Tolerations) - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - ginkgo.By("Remove addon") - err = hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Delete(context.Background(), addonName, metav1.DeleteOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - }) - - ginkgo.It("addon should be worked with proxy settings", func() { - ginkgo.By("Make sure addon is available") - gomega.Eventually(func() error { - addon, err := hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), addonName, metav1.GetOptions{}) - if err != nil { - return err - } - - if !meta.IsStatusConditionTrue(addon.Status.Conditions, "ManifestApplied") { - return fmt.Errorf("addon should be applied to spoke, but get condition %v", addon.Status.Conditions) - } - - if !meta.IsStatusConditionTrue(addon.Status.Conditions, "Available") { - return fmt.Errorf("addon should be available on spoke, but get condition %v", addon.Status.Conditions) - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - ginkgo.By("Make sure addon is functioning") - configmap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("config-%s", rand.String(6)), - Namespace: managedClusterName, - }, - Data: map[string]string{ - "key1": rand.String(6), - "key2": rand.String(6), - }, - } - - _, err := hubKubeClient.CoreV1().ConfigMaps(managedClusterName).Create(context.Background(), configmap, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - gomega.Eventually(func() error { - copyiedConfig, err := hubKubeClient.CoreV1().ConfigMaps(addonInstallNamespace).Get(context.Background(), configmap.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - if !apiequality.Semantic.DeepEqual(copyiedConfig.Data, configmap.Data) { - return fmt.Errorf("expected configmap is not correct, %v", copyiedConfig.Data) - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - ginkgo.By("Prepare a AddOnDeploymentConfig for proxy settings") - gomega.Eventually(func() error { - return prepareProxyConfigAddOnDeploymentConfig(managedClusterName) - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - ginkgo.By("Add the configs to ManagedClusterAddOn") - gomega.Eventually(func() error { - addon, err := hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), addonName, metav1.GetOptions{}) - if err != nil { - return err - } - newAddon := addon.DeepCopy() - newAddon.Spec.Configs = []addonapiv1alpha1.AddOnConfig{ - { - ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ - Group: "addon.open-cluster-management.io", - Resource: "addondeploymentconfigs", - }, - ConfigReferent: addonapiv1alpha1.ConfigReferent{ - Namespace: managedClusterName, - Name: deployProxyConfigName, - }, - }, - } - _, err = hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Update(context.Background(), newAddon, metav1.UpdateOptions{}) - if err != nil { - return err - } - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - ginkgo.By("Make sure addon is configured") - gomega.Eventually(func() error { - agentDeploy, err := hubKubeClient.AppsV1().Deployments(addonInstallNamespace).Get(context.Background(), "helloworldcloudevents-agent", metav1.GetOptions{}) - if err != nil { - return err - } - - containers := agentDeploy.Spec.Template.Spec.Containers - if len(containers) != 1 { - return fmt.Errorf("expect one container, but %v", containers) - } - - // check the proxy settings - deployProxyConfig := addonapiv1alpha1.ProxyConfig{} - for _, envVar := range containers[0].Env { - if envVar.Name == "HTTP_PROXY" { - deployProxyConfig.HTTPProxy = envVar.Value - } - - if envVar.Name == "HTTPS_PROXY" { - deployProxyConfig.HTTPSProxy = envVar.Value - } - - if envVar.Name == "NO_PROXY" { - deployProxyConfig.NoProxy = envVar.Value - } - } - - if !equality.Semantic.DeepEqual(proxyConfig, deployProxyConfig) { - return fmt.Errorf("expected proxy settings %v, but got %v", proxyConfig, deployProxyConfig) - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - ginkgo.By("Remove addon") - err = hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Delete(context.Background(), addonName, metav1.DeleteOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - }) - - ginkgo.It("addon registraion agent install namespace should work", func() { - ginkgo.By("Make sure addon is available") - gomega.Eventually(func() error { - addon, err := hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.Background(), addonName, metav1.GetOptions{}) - if err != nil { - return err - } - - if !meta.IsStatusConditionTrue(addon.Status.Conditions, "ManifestApplied") { - return fmt.Errorf("addon should be applied to spoke, but get condition %v", addon.Status.Conditions) - } - - if !meta.IsStatusConditionTrue(addon.Status.Conditions, "Available") { - return fmt.Errorf("addon should be available on spoke, but get condition %v", addon.Status.Conditions) - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - ginkgo.By("Make sure addon is functioning") - configmap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("config-%s", rand.String(6)), - Namespace: managedClusterName, - }, - Data: map[string]string{ - "key1": rand.String(6), - "key2": rand.String(6), - }, - } - - _, err := hubKubeClient.CoreV1().ConfigMaps(managedClusterName).Create(context.Background(), configmap, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - gomega.Eventually(func() error { - copyiedConfig, err := hubKubeClient.CoreV1().ConfigMaps(addonInstallNamespace).Get(context.Background(), configmap.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - if !apiequality.Semantic.DeepEqual(copyiedConfig.Data, configmap.Data) { - return fmt.Errorf("expected configmap is not correct, %v", copyiedConfig.Data) - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - ginkgo.By("Prepare a AddOnDeploymentConfig for addon agent install namespace") - gomega.Eventually(func() error { - return prepareAgentInstallNamespaceAddOnDeploymentConfig(managedClusterName) - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - ginkgo.By("Add the configs to ManagedClusterAddOn") - gomega.Eventually(func() error { - addon, err := hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get( - context.Background(), addonName, metav1.GetOptions{}) - if err != nil { - return err - } - newAddon := addon.DeepCopy() - newAddon.Spec.Configs = []addonapiv1alpha1.AddOnConfig{ - { - ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ - Group: "addon.open-cluster-management.io", - Resource: "addondeploymentconfigs", - }, - ConfigReferent: addonapiv1alpha1.ConfigReferent{ - Namespace: managedClusterName, - Name: deployAgentInstallNamespaceConfigName, - }, - }, - } - _, err = hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Update( - context.Background(), newAddon, metav1.UpdateOptions{}) - if err != nil { - return err - } - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - ginkgo.By("Make sure addon is configured") - gomega.Eventually(func() error { - _, err := hubKubeClient.CoreV1().Secrets(agentInstallNamespaceConfig).Get( - context.Background(), "helloworldcloudevents-hub-kubeconfig", metav1.GetOptions{}) - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - ginkgo.By("Remove addon") - err = hubAddOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Delete(context.Background(), addonName, metav1.DeleteOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - }) -}) - -func prepareAddOnDeploymentConfig(namespace string) error { - _, err := hubAddOnClient.AddonV1alpha1().AddOnDeploymentConfigs(namespace).Get(context.Background(), deployConfigName, metav1.GetOptions{}) - if errors.IsNotFound(err) { - if _, err := hubAddOnClient.AddonV1alpha1().AddOnDeploymentConfigs(managedClusterName).Create( - context.Background(), - &addonapiv1alpha1.AddOnDeploymentConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: deployConfigName, - Namespace: namespace, - }, - Spec: addonapiv1alpha1.AddOnDeploymentConfigSpec{ - NodePlacement: &addonapiv1alpha1.NodePlacement{ - NodeSelector: nodeSelector, - Tolerations: tolerations, - }, - AgentInstallNamespace: addonInstallNamespace, - }, - }, - metav1.CreateOptions{}, - ); err != nil { - return err - } - - return nil - } - - return err -} - -func prepareImageOverrideAddOnDeploymentConfig(namespace string) error { - _, err := hubAddOnClient.AddonV1alpha1().AddOnDeploymentConfigs(namespace).Get( - context.Background(), deployImageOverrideConfigName, metav1.GetOptions{}) - if errors.IsNotFound(err) { - if _, err := hubAddOnClient.AddonV1alpha1().AddOnDeploymentConfigs(managedClusterName).Create( - context.Background(), - &addonapiv1alpha1.AddOnDeploymentConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: deployImageOverrideConfigName, - Namespace: namespace, - }, - Spec: addonapiv1alpha1.AddOnDeploymentConfigSpec{ - Registries: registries, - AgentInstallNamespace: addonInstallNamespace, - }, - }, - metav1.CreateOptions{}, - ); err != nil { - return err - } - - return nil - } - - return err -} - -func prepareProxyConfigAddOnDeploymentConfig(namespace string) error { - _, err := hubAddOnClient.AddonV1alpha1().AddOnDeploymentConfigs(namespace).Get(context.Background(), deployProxyConfigName, metav1.GetOptions{}) - if errors.IsNotFound(err) { - if _, err := hubAddOnClient.AddonV1alpha1().AddOnDeploymentConfigs(managedClusterName).Create( - context.Background(), - &addonapiv1alpha1.AddOnDeploymentConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: deployProxyConfigName, - Namespace: namespace, - }, - Spec: addonapiv1alpha1.AddOnDeploymentConfigSpec{ - ProxyConfig: proxyConfig, - AgentInstallNamespace: addonInstallNamespace, - }, - }, - metav1.CreateOptions{}, - ); err != nil { - return err - } - - return nil - } - - return err -} - -func prepareAgentInstallNamespaceAddOnDeploymentConfig(namespace string) error { - _, err := hubAddOnClient.AddonV1alpha1().AddOnDeploymentConfigs(namespace).Get( - context.Background(), deployAgentInstallNamespaceConfigName, metav1.GetOptions{}) - if errors.IsNotFound(err) { - if _, err := hubAddOnClient.AddonV1alpha1().AddOnDeploymentConfigs(managedClusterName).Create( - context.Background(), - &addonapiv1alpha1.AddOnDeploymentConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: deployAgentInstallNamespaceConfigName, - Namespace: namespace, - }, - Spec: addonapiv1alpha1.AddOnDeploymentConfigSpec{ - AgentInstallNamespace: agentInstallNamespaceConfig, - }, - }, - metav1.CreateOptions{}, - ); err != nil { - return err - } - - return nil - } - - return err -} From 5c1a6e7a994360ea141655c3e8eb3bf1cf9379ed Mon Sep 17 00:00:00 2001 From: morvencao Date: Wed, 17 Apr 2024 02:05:50 +0000 Subject: [PATCH 3/6] revert addon manager interface change. Signed-off-by: morvencao --- pkg/addonmanager/cloudevents/manager.go | 13 ++++++++++++- pkg/addonmanager/manager.go | 8 ++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/pkg/addonmanager/cloudevents/manager.go b/pkg/addonmanager/cloudevents/manager.go index 2203fdf58..c56355794 100644 --- a/pkg/addonmanager/cloudevents/manager.go +++ b/pkg/addonmanager/cloudevents/manager.go @@ -181,7 +181,7 @@ func (a *cloudeventsAddonManager) Start(ctx context.Context) error { return err } - err = a.StartWithInformers(ctx, workClient, workInformers, kubeInformers, addonInformers, clusterInformers, dynamicInformers) + err = a.startWithInformers(ctx, workClient, workInformers, kubeInformers, addonInformers, clusterInformers, dynamicInformers) if err != nil { return err } @@ -195,6 +195,17 @@ func (a *cloudeventsAddonManager) Start(ctx context.Context) error { } func (a *cloudeventsAddonManager) StartWithInformers(ctx context.Context, + kubeInformers kubeinformers.SharedInformerFactory, + workInformers workinformers.SharedInformerFactory, + addonInformers addoninformers.SharedInformerFactory, + clusterInformers clusterv1informers.SharedInformerFactory, + dynamicInformers dynamicinformer.DynamicSharedInformerFactory) error { + // TODO: Implement this method to support usage within the addon template. + // Requires updating sdk-go to enable clientHolder to manage the shared informer for ManifestWork. + return fmt.Errorf("method StartWithInformers is not implemented") +} + +func (a *cloudeventsAddonManager) startWithInformers(ctx context.Context, workClient workclientset.Interface, workInformers workv1informers.ManifestWorkInformer, kubeInformers kubeinformers.SharedInformerFactory, diff --git a/pkg/addonmanager/manager.go b/pkg/addonmanager/manager.go index 101e22669..57953b722 100644 --- a/pkg/addonmanager/manager.go +++ b/pkg/addonmanager/manager.go @@ -45,6 +45,14 @@ type AddonManager interface { // Start starts all registered addon agent. Start(ctx context.Context) error + + // StartWithInformers starts all registered addon agent with the given informers. + StartWithInformers(ctx context.Context, + kubeInformers kubeinformers.SharedInformerFactory, + workInformers workv1informers.SharedInformerFactory, + addonInformers addoninformers.SharedInformerFactory, + clusterInformers clusterv1informers.SharedInformerFactory, + dynamicInformers dynamicinformer.DynamicSharedInformerFactory) error } type addonManager struct { From 7ad610768cd2b451ec2c31d262f8e64605f19f26 Mon Sep 17 00:00:00 2001 From: morvencao Date: Wed, 17 Apr 2024 11:05:04 +0000 Subject: [PATCH 4/6] add base addon manager. Signed-off-by: morvencao --- pkg/addonmanager/base_manager.go | 216 ++++++++++++++++++++++++ pkg/addonmanager/cloudevents/manager.go | 139 +++++---------- pkg/addonmanager/interface.go | 40 +++++ pkg/addonmanager/manager.go | 207 ++--------------------- 4 files changed, 306 insertions(+), 296 deletions(-) create mode 100644 pkg/addonmanager/base_manager.go create mode 100644 pkg/addonmanager/interface.go diff --git a/pkg/addonmanager/base_manager.go b/pkg/addonmanager/base_manager.go new file mode 100644 index 000000000..0d1663f13 --- /dev/null +++ b/pkg/addonmanager/base_manager.go @@ -0,0 +1,216 @@ +package addonmanager + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic/dynamicinformer" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions" + clusterv1informers "open-cluster-management.io/api/client/cluster/informers/externalversions" + workv1client "open-cluster-management.io/api/client/work/clientset/versioned" + workv1informers "open-cluster-management.io/api/client/work/informers/externalversions" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonconfig" + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy" + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate" + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/cmaconfig" + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/cmamanagedby" + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/registration" + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" + "open-cluster-management.io/addon-framework/pkg/utils" +) + +// BaseAddonManagerImpl is the base implementation of BaseAddonManager +// that manages the addon agents and configs. +type BaseAddonManagerImpl struct { + addonAgents map[string]agent.AgentAddon + addonConfigs map[schema.GroupVersionResource]bool + config *rest.Config + syncContexts []factory.SyncContext +} + +// NewBaseAddonManagerImpl creates a new BaseAddonManagerImpl instance with the given config. +func NewBaseAddonManagerImpl(config *rest.Config) *BaseAddonManagerImpl { + return &BaseAddonManagerImpl{ + config: config, + syncContexts: []factory.SyncContext{}, + addonConfigs: map[schema.GroupVersionResource]bool{}, + addonAgents: map[string]agent.AgentAddon{}, + } +} + +func (a *BaseAddonManagerImpl) GetConfig() *rest.Config { + return a.config +} + +func (a *BaseAddonManagerImpl) GetAddonAgents() map[string]agent.AgentAddon { + return a.addonAgents +} + +func (a *BaseAddonManagerImpl) GetAddonConfigs() map[schema.GroupVersionResource]bool { + return a.addonConfigs +} + +func (a *BaseAddonManagerImpl) GetSyncContexts() []factory.SyncContext { + return a.syncContexts +} + +func (a *BaseAddonManagerImpl) SetSyncContexts(syncContexts []factory.SyncContext) { + a.syncContexts = syncContexts +} + +func (a *BaseAddonManagerImpl) AddAgent(addon agent.AgentAddon) error { + addonOption := addon.GetAgentAddonOptions() + if len(addonOption.AddonName) == 0 { + return fmt.Errorf("addon name should be set") + } + if _, ok := a.addonAgents[addonOption.AddonName]; ok { + return fmt.Errorf("an agent is added for the addon already") + } + a.addonAgents[addonOption.AddonName] = addon + return nil +} + +func (a *BaseAddonManagerImpl) Trigger(clusterName, addonName string) { + for _, syncContex := range a.syncContexts { + syncContex.Queue().Add(fmt.Sprintf("%s/%s", clusterName, addonName)) + } +} + +func (a *BaseAddonManagerImpl) StartWithInformers(ctx context.Context, + kubeInformers kubeinformers.SharedInformerFactory, + workInformers workv1informers.SharedInformerFactory, + addonInformers addoninformers.SharedInformerFactory, + clusterInformers clusterv1informers.SharedInformerFactory, + dynamicInformers dynamicinformer.DynamicSharedInformerFactory) error { + + kubeClient, err := kubernetes.NewForConfig(a.config) + if err != nil { + return err + } + + addonClient, err := addonv1alpha1client.NewForConfig(a.config) + if err != nil { + return err + } + + workClient, err := workv1client.NewForConfig(a.config) + if err != nil { + return err + } + + v1CSRSupported, v1beta1Supported, err := utils.IsCSRSupported(kubeClient) + if err != nil { + return err + } + + for _, agentImpl := range a.addonAgents { + for _, configGVR := range agentImpl.GetAgentAddonOptions().SupportedConfigGVRs { + a.addonConfigs[configGVR] = true + } + } + + deployController := agentdeploy.NewAddonDeployController( + workClient, + addonClient, + clusterInformers.Cluster().V1().ManagedClusters(), + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + workInformers.Work().V1().ManifestWorks(), + a.addonAgents, + ) + + registrationController := registration.NewAddonRegistrationController( + addonClient, + clusterInformers.Cluster().V1().ManagedClusters(), + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + a.addonAgents, + ) + + // This controller is used during migrating addons to be managed by addon-manager. + // This should be removed when the migration is done. + // The migration plan refer to https://github.com/open-cluster-management-io/ocm/issues/355. + managementAddonController := cmamanagedby.NewCMAManagedByController( + addonClient, + addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), + a.addonAgents, + utils.FilterByAddonName(a.addonAgents), + ) + + var addonConfigController, managementAddonConfigController factory.Controller + if len(a.addonConfigs) != 0 { + addonConfigController = addonconfig.NewAddonConfigController( + addonClient, + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), + dynamicInformers, + a.addonConfigs, + utils.FilterByAddonName(a.addonAgents), + ) + managementAddonConfigController = cmaconfig.NewCMAConfigController( + addonClient, + addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), + dynamicInformers, + a.addonConfigs, + utils.FilterByAddonName(a.addonAgents), + ) + } + + var csrApproveController factory.Controller + var csrSignController factory.Controller + // Spawn the following controllers only if v1 CSR api is supported in the + // hub cluster. Under v1beta1 CSR api, all the CSR objects will be signed + // by the kube-controller-manager so custom CSR controller should be + // disabled to avoid conflict. + if v1CSRSupported { + csrApproveController = certificate.NewCSRApprovingController( + kubeClient, + clusterInformers.Cluster().V1().ManagedClusters(), + kubeInformers.Certificates().V1().CertificateSigningRequests(), + nil, + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + a.addonAgents, + ) + csrSignController = certificate.NewCSRSignController( + kubeClient, + clusterInformers.Cluster().V1().ManagedClusters(), + kubeInformers.Certificates().V1().CertificateSigningRequests(), + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + a.addonAgents, + ) + } else if v1beta1Supported { + csrApproveController = certificate.NewCSRApprovingController( + kubeClient, + clusterInformers.Cluster().V1().ManagedClusters(), + nil, + kubeInformers.Certificates().V1beta1().CertificateSigningRequests(), + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + a.addonAgents, + ) + } + + a.syncContexts = append(a.syncContexts, deployController.SyncContext()) + + go deployController.Run(ctx, 1) + go registrationController.Run(ctx, 1) + go managementAddonController.Run(ctx, 1) + + if addonConfigController != nil { + go addonConfigController.Run(ctx, 1) + } + if managementAddonConfigController != nil { + go managementAddonConfigController.Run(ctx, 1) + } + if csrApproveController != nil { + go csrApproveController.Run(ctx, 1) + } + if csrSignController != nil { + go csrSignController.Run(ctx, 1) + } + return nil +} diff --git a/pkg/addonmanager/cloudevents/manager.go b/pkg/addonmanager/cloudevents/manager.go index c56355794..e11dc5be9 100644 --- a/pkg/addonmanager/cloudevents/manager.go +++ b/pkg/addonmanager/cloudevents/manager.go @@ -2,11 +2,9 @@ package cloudevents import ( "context" - "fmt" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic/dynamicinformer" kubeinformers "k8s.io/client-go/informers" @@ -18,9 +16,7 @@ import ( addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions" clusterv1client "open-cluster-management.io/api/client/cluster/clientset/versioned" clusterv1informers "open-cluster-management.io/api/client/cluster/informers/externalversions" - workclientset "open-cluster-management.io/api/client/work/clientset/versioned" workinformers "open-cluster-management.io/api/client/work/informers/externalversions" - workv1informers "open-cluster-management.io/api/client/work/informers/externalversions/work/v1" "open-cluster-management.io/addon-framework/pkg/addonmanager" "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonconfig" @@ -29,7 +25,6 @@ import ( "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/cmaconfig" "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/cmamanagedby" "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/registration" - "open-cluster-management.io/addon-framework/pkg/agent" "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" "open-cluster-management.io/addon-framework/pkg/index" "open-cluster-management.io/addon-framework/pkg/utils" @@ -37,35 +32,21 @@ import ( "open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/codec" ) +// cloudeventsAddonManager is the implementation of AddonManager with +// the base implementation and cloudevents options type cloudeventsAddonManager struct { - addonAgents map[string]agent.AgentAddon - addonConfigs map[schema.GroupVersionResource]bool - config *rest.Config - options *CloudEventsOptions - syncContexts []factory.SyncContext -} - -func (a *cloudeventsAddonManager) AddAgent(addon agent.AgentAddon) error { - addonOption := addon.GetAgentAddonOptions() - if len(addonOption.AddonName) == 0 { - return fmt.Errorf("addon name should be set") - } - if _, ok := a.addonAgents[addonOption.AddonName]; ok { - return fmt.Errorf("an agent is added for the addon already") - } - a.addonAgents[addonOption.AddonName] = addon - return nil -} - -func (a *cloudeventsAddonManager) Trigger(clusterName, addonName string) { - for _, syncContex := range a.syncContexts { - syncContex.Queue().Add(fmt.Sprintf("%s/%s", clusterName, addonName)) - } + *addonmanager.BaseAddonManagerImpl + options *CloudEventsOptions } func (a *cloudeventsAddonManager) Start(ctx context.Context) error { + config := a.GetConfig() + addonAgents := a.GetAddonAgents() + addonConfigs := a.GetAddonConfigs() + syncContexts := a.GetSyncContexts() + var addonNames []string - for key := range a.addonAgents { + for key := range addonAgents { addonNames = append(addonNames, key) } @@ -73,8 +54,8 @@ func (a *cloudeventsAddonManager) Start(ctx context.Context) error { // ManifestWork client that implements the ManifestWorkInterface and ManifestWork informer based on different // driver configuration. // Refer to Event Based Manifestwork proposal in enhancements repo to get more details. - _, config, err := cloudeventswork.NewConfigLoader(a.options.WorkDriver, a.options.WorkDriverConfig). - WithKubeConfig(a.config). + _, clientConfig, err := cloudeventswork.NewConfigLoader(a.options.WorkDriver, a.options.WorkDriverConfig). + WithKubeConfig(a.GetConfig()). LoadConfig() if err != nil { return err @@ -97,7 +78,7 @@ func (a *cloudeventsAddonManager) Start(ctx context.Context) error { }, ) - clientHolder, err := cloudeventswork.NewClientHolderBuilder(config). + clientHolder, err := cloudeventswork.NewClientHolderBuilder(clientConfig). WithClientID(a.options.CloudEventsClientID). WithSourceID(a.options.SourceID). WithInformerConfig(10*time.Minute, workInformOption). @@ -107,22 +88,22 @@ func (a *cloudeventsAddonManager) Start(ctx context.Context) error { return err } - kubeClient, err := kubernetes.NewForConfig(a.config) + kubeClient, err := kubernetes.NewForConfig(config) if err != nil { return err } - dynamicClient, err := dynamic.NewForConfig(a.config) + dynamicClient, err := dynamic.NewForConfig(config) if err != nil { return err } - addonClient, err := addonv1alpha1client.NewForConfig(a.config) + addonClient, err := addonv1alpha1client.NewForConfig(config) if err != nil { return err } - clusterClient, err := clusterv1client.NewForConfig(a.config) + clusterClient, err := clusterv1client.NewForConfig(config) if err != nil { return err } @@ -181,56 +162,14 @@ func (a *cloudeventsAddonManager) Start(ctx context.Context) error { return err } - err = a.startWithInformers(ctx, workClient, workInformers, kubeInformers, addonInformers, clusterInformers, dynamicInformers) - if err != nil { - return err - } - - kubeInformers.Start(ctx.Done()) - go workInformers.Informer().Run(ctx.Done()) - addonInformers.Start(ctx.Done()) - clusterInformers.Start(ctx.Done()) - dynamicInformers.Start(ctx.Done()) - return nil -} - -func (a *cloudeventsAddonManager) StartWithInformers(ctx context.Context, - kubeInformers kubeinformers.SharedInformerFactory, - workInformers workinformers.SharedInformerFactory, - addonInformers addoninformers.SharedInformerFactory, - clusterInformers clusterv1informers.SharedInformerFactory, - dynamicInformers dynamicinformer.DynamicSharedInformerFactory) error { - // TODO: Implement this method to support usage within the addon template. - // Requires updating sdk-go to enable clientHolder to manage the shared informer for ManifestWork. - return fmt.Errorf("method StartWithInformers is not implemented") -} - -func (a *cloudeventsAddonManager) startWithInformers(ctx context.Context, - workClient workclientset.Interface, - workInformers workv1informers.ManifestWorkInformer, - kubeInformers kubeinformers.SharedInformerFactory, - addonInformers addoninformers.SharedInformerFactory, - clusterInformers clusterv1informers.SharedInformerFactory, - dynamicInformers dynamicinformer.DynamicSharedInformerFactory) error { - - kubeClient, err := kubernetes.NewForConfig(a.config) - if err != nil { - return err - } - - addonClient, err := addonv1alpha1client.NewForConfig(a.config) - if err != nil { - return err - } - v1CSRSupported, v1beta1Supported, err := utils.IsCSRSupported(kubeClient) if err != nil { return err } - for _, agentImpl := range a.addonAgents { + for _, agentImpl := range addonAgents { for _, configGVR := range agentImpl.GetAgentAddonOptions().SupportedConfigGVRs { - a.addonConfigs[configGVR] = true + addonConfigs[configGVR] = true } } @@ -240,14 +179,14 @@ func (a *cloudeventsAddonManager) startWithInformers(ctx context.Context, clusterInformers.Cluster().V1().ManagedClusters(), addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), workInformers, - a.addonAgents, + addonAgents, ) registrationController := registration.NewAddonRegistrationController( addonClient, clusterInformers.Cluster().V1().ManagedClusters(), addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), - a.addonAgents, + addonAgents, ) // This controller is used during migrating addons to be managed by addon-manager. @@ -256,26 +195,26 @@ func (a *cloudeventsAddonManager) startWithInformers(ctx context.Context, managementAddonController := cmamanagedby.NewCMAManagedByController( addonClient, addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), - a.addonAgents, - utils.FilterByAddonName(a.addonAgents), + addonAgents, + utils.FilterByAddonName(addonAgents), ) var addonConfigController, managementAddonConfigController factory.Controller - if len(a.addonConfigs) != 0 { + if len(addonConfigs) != 0 { addonConfigController = addonconfig.NewAddonConfigController( addonClient, addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), dynamicInformers, - a.addonConfigs, - utils.FilterByAddonName(a.addonAgents), + addonConfigs, + utils.FilterByAddonName(addonAgents), ) managementAddonConfigController = cmaconfig.NewCMAConfigController( addonClient, addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), dynamicInformers, - a.addonConfigs, - utils.FilterByAddonName(a.addonAgents), + addonConfigs, + utils.FilterByAddonName(addonAgents), ) } @@ -292,14 +231,14 @@ func (a *cloudeventsAddonManager) startWithInformers(ctx context.Context, kubeInformers.Certificates().V1().CertificateSigningRequests(), nil, addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), - a.addonAgents, + addonAgents, ) csrSignController = certificate.NewCSRSignController( kubeClient, clusterInformers.Cluster().V1().ManagedClusters(), kubeInformers.Certificates().V1().CertificateSigningRequests(), addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), - a.addonAgents, + addonAgents, ) } else if v1beta1Supported { csrApproveController = certificate.NewCSRApprovingController( @@ -308,11 +247,11 @@ func (a *cloudeventsAddonManager) startWithInformers(ctx context.Context, nil, kubeInformers.Certificates().V1beta1().CertificateSigningRequests(), addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), - a.addonAgents, + addonAgents, ) } - a.syncContexts = append(a.syncContexts, deployController.SyncContext()) + a.SetSyncContexts(append(syncContexts, deployController.SyncContext())) go deployController.Run(ctx, 1) go registrationController.Run(ctx, 1) @@ -330,19 +269,21 @@ func (a *cloudeventsAddonManager) startWithInformers(ctx context.Context, if csrSignController != nil { go csrSignController.Run(ctx, 1) } + + kubeInformers.Start(ctx.Done()) + go workInformers.Informer().Run(ctx.Done()) + addonInformers.Start(ctx.Done()) + clusterInformers.Start(ctx.Done()) + dynamicInformers.Start(ctx.Done()) return nil } // New returns a new addon manager with the given config and optional options func New(config *rest.Config, opts *CloudEventsOptions) (addonmanager.AddonManager, error) { cloudeventsAddonManager := &cloudeventsAddonManager{ - config: config, - syncContexts: []factory.SyncContext{}, - addonConfigs: map[schema.GroupVersionResource]bool{}, - addonAgents: map[string]agent.AgentAddon{}, + BaseAddonManagerImpl: addonmanager.NewBaseAddonManagerImpl(config), + options: opts, } - // set options from the given options - cloudeventsAddonManager.options = opts return cloudeventsAddonManager, nil } diff --git a/pkg/addonmanager/interface.go b/pkg/addonmanager/interface.go new file mode 100644 index 000000000..b6c8ad323 --- /dev/null +++ b/pkg/addonmanager/interface.go @@ -0,0 +1,40 @@ +package addonmanager + +import ( + "context" + + "k8s.io/client-go/dynamic/dynamicinformer" + kubeinformers "k8s.io/client-go/informers" + "open-cluster-management.io/addon-framework/pkg/agent" + addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions" + clusterv1informers "open-cluster-management.io/api/client/cluster/informers/externalversions" + workv1informers "open-cluster-management.io/api/client/work/informers/externalversions" +) + +// BaseAddonManager is the interface to initialize a manager on hub to manage the addon +// agents on all managedcluster +type BaseAddonManager interface { + // AddAgent register an addon agent to the manager. + AddAgent(addon agent.AgentAddon) error + + // Trigger triggers a reconcile loop in the manager. Currently it + // only trigger the deploy controller. + Trigger(clusterName, addonName string) + + // StartWithInformers starts all registered addon agent with the given informers. + StartWithInformers(ctx context.Context, + kubeInformers kubeinformers.SharedInformerFactory, + workInformers workv1informers.SharedInformerFactory, + addonInformers addoninformers.SharedInformerFactory, + clusterInformers clusterv1informers.SharedInformerFactory, + dynamicInformers dynamicinformer.DynamicSharedInformerFactory) error +} + +// AddonManager is the interface based on BaseAddonManager to initialize a manager on hub +// to manage the addon agents and controllers +type AddonManager interface { + BaseAddonManager + + // Start starts all registered addon agents. + Start(ctx context.Context) error +} diff --git a/pkg/addonmanager/manager.go b/pkg/addonmanager/manager.go index 57953b722..2fab867fe 100644 --- a/pkg/addonmanager/manager.go +++ b/pkg/addonmanager/manager.go @@ -2,11 +2,9 @@ package addonmanager import ( "context" - "fmt" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic/dynamicinformer" kubeinformers "k8s.io/client-go/informers" @@ -21,87 +19,37 @@ import ( workv1client "open-cluster-management.io/api/client/work/clientset/versioned" workv1informers "open-cluster-management.io/api/client/work/informers/externalversions" - "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonconfig" - "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy" - "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate" - "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/cmaconfig" - "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/cmamanagedby" - "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/registration" - "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" "open-cluster-management.io/addon-framework/pkg/index" - "open-cluster-management.io/addon-framework/pkg/utils" ) -// AddonManager is the interface to initialize a manager on hub to manage the addon -// agents on all managedcluster -type AddonManager interface { - // AddAgent register an addon agent to the manager. - AddAgent(addon agent.AgentAddon) error - - // Trigger triggers a reconcile loop in the manager. Currently it - // only trigger the deploy controller. - Trigger(clusterName, addonName string) - - // Start starts all registered addon agent. - Start(ctx context.Context) error - - // StartWithInformers starts all registered addon agent with the given informers. - StartWithInformers(ctx context.Context, - kubeInformers kubeinformers.SharedInformerFactory, - workInformers workv1informers.SharedInformerFactory, - addonInformers addoninformers.SharedInformerFactory, - clusterInformers clusterv1informers.SharedInformerFactory, - dynamicInformers dynamicinformer.DynamicSharedInformerFactory) error -} - +// addonManager is the implementation of AddonManager with the base implementation. type addonManager struct { - addonAgents map[string]agent.AgentAddon - addonConfigs map[schema.GroupVersionResource]bool - config *rest.Config - syncContexts []factory.SyncContext -} - -func (a *addonManager) AddAgent(addon agent.AgentAddon) error { - addonOption := addon.GetAgentAddonOptions() - if len(addonOption.AddonName) == 0 { - return fmt.Errorf("addon name should be set") - } - if _, ok := a.addonAgents[addonOption.AddonName]; ok { - return fmt.Errorf("an agent is added for the addon already") - } - a.addonAgents[addonOption.AddonName] = addon - return nil -} - -func (a *addonManager) Trigger(clusterName, addonName string) { - for _, syncContex := range a.syncContexts { - syncContex.Queue().Add(fmt.Sprintf("%s/%s", clusterName, addonName)) - } + *BaseAddonManagerImpl } +// Start starts all registered addon agents and controllers. func (a *addonManager) Start(ctx context.Context) error { - kubeClient, err := kubernetes.NewForConfig(a.config) + kubeClient, err := kubernetes.NewForConfig(a.GetConfig()) if err != nil { return err } - workClient, err := workv1client.NewForConfig(a.config) + workClient, err := workv1client.NewForConfig(a.GetConfig()) if err != nil { return err } - dynamicClient, err := dynamic.NewForConfig(a.config) + dynamicClient, err := dynamic.NewForConfig(a.GetConfig()) if err != nil { return err } - addonClient, err := addonv1alpha1client.NewForConfig(a.config) + addonClient, err := addonv1alpha1client.NewForConfig(a.GetConfig()) if err != nil { return err } - clusterClient, err := clusterv1client.NewForConfig(a.config) + clusterClient, err := clusterv1client.NewForConfig(a.GetConfig()) if err != nil { return err } @@ -111,7 +59,7 @@ func (a *addonManager) Start(ctx context.Context) error { dynamicInformers := dynamicinformer.NewDynamicSharedInformerFactory(dynamicClient, 10*time.Minute) var addonNames []string - for key := range a.addonAgents { + for key := range a.GetAddonAgents() { addonNames = append(addonNames, key) } kubeInformers := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, 10*time.Minute, @@ -189,144 +137,9 @@ func (a *addonManager) Start(ctx context.Context) error { return nil } -func (a *addonManager) StartWithInformers(ctx context.Context, - kubeInformers kubeinformers.SharedInformerFactory, - workInformers workv1informers.SharedInformerFactory, - addonInformers addoninformers.SharedInformerFactory, - clusterInformers clusterv1informers.SharedInformerFactory, - dynamicInformers dynamicinformer.DynamicSharedInformerFactory) error { - - kubeClient, err := kubernetes.NewForConfig(a.config) - if err != nil { - return err - } - - addonClient, err := addonv1alpha1client.NewForConfig(a.config) - if err != nil { - return err - } - - workClient, err := workv1client.NewForConfig(a.config) - if err != nil { - return err - } - - v1CSRSupported, v1beta1Supported, err := utils.IsCSRSupported(kubeClient) - if err != nil { - return err - } - - for _, agentImpl := range a.addonAgents { - for _, configGVR := range agentImpl.GetAgentAddonOptions().SupportedConfigGVRs { - a.addonConfigs[configGVR] = true - } - } - - deployController := agentdeploy.NewAddonDeployController( - workClient, - addonClient, - clusterInformers.Cluster().V1().ManagedClusters(), - addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), - workInformers.Work().V1().ManifestWorks(), - a.addonAgents, - ) - - registrationController := registration.NewAddonRegistrationController( - addonClient, - clusterInformers.Cluster().V1().ManagedClusters(), - addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), - a.addonAgents, - ) - - // This controller is used during migrating addons to be managed by addon-manager. - // This should be removed when the migration is done. - // The migration plan refer to https://github.com/open-cluster-management-io/ocm/issues/355. - managementAddonController := cmamanagedby.NewCMAManagedByController( - addonClient, - addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), - a.addonAgents, - utils.FilterByAddonName(a.addonAgents), - ) - - var addonConfigController, managementAddonConfigController factory.Controller - if len(a.addonConfigs) != 0 { - addonConfigController = addonconfig.NewAddonConfigController( - addonClient, - addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), - addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), - dynamicInformers, - a.addonConfigs, - utils.FilterByAddonName(a.addonAgents), - ) - managementAddonConfigController = cmaconfig.NewCMAConfigController( - addonClient, - addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), - dynamicInformers, - a.addonConfigs, - utils.FilterByAddonName(a.addonAgents), - ) - } - - var csrApproveController factory.Controller - var csrSignController factory.Controller - // Spawn the following controllers only if v1 CSR api is supported in the - // hub cluster. Under v1beta1 CSR api, all the CSR objects will be signed - // by the kube-controller-manager so custom CSR controller should be - // disabled to avoid conflict. - if v1CSRSupported { - csrApproveController = certificate.NewCSRApprovingController( - kubeClient, - clusterInformers.Cluster().V1().ManagedClusters(), - kubeInformers.Certificates().V1().CertificateSigningRequests(), - nil, - addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), - a.addonAgents, - ) - csrSignController = certificate.NewCSRSignController( - kubeClient, - clusterInformers.Cluster().V1().ManagedClusters(), - kubeInformers.Certificates().V1().CertificateSigningRequests(), - addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), - a.addonAgents, - ) - } else if v1beta1Supported { - csrApproveController = certificate.NewCSRApprovingController( - kubeClient, - clusterInformers.Cluster().V1().ManagedClusters(), - nil, - kubeInformers.Certificates().V1beta1().CertificateSigningRequests(), - addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), - a.addonAgents, - ) - } - - a.syncContexts = append(a.syncContexts, deployController.SyncContext()) - - go deployController.Run(ctx, 1) - go registrationController.Run(ctx, 1) - go managementAddonController.Run(ctx, 1) - - if addonConfigController != nil { - go addonConfigController.Run(ctx, 1) - } - if managementAddonConfigController != nil { - go managementAddonConfigController.Run(ctx, 1) - } - if csrApproveController != nil { - go csrApproveController.Run(ctx, 1) - } - if csrSignController != nil { - go csrSignController.Run(ctx, 1) - } - return nil -} - // New returns a new Manager for creating addon agents. func New(config *rest.Config) (AddonManager, error) { return &addonManager{ - config: config, - syncContexts: []factory.SyncContext{}, - addonConfigs: map[schema.GroupVersionResource]bool{}, - addonAgents: map[string]agent.AgentAddon{}, + NewBaseAddonManagerImpl(config), }, nil } From a7bac3bea1ff1f0f5eead9daf1994e34f70dc7b3 Mon Sep 17 00:00:00 2001 From: morvencao Date: Thu, 18 Apr 2024 09:32:54 +0000 Subject: [PATCH 5/6] address comments. Signed-off-by: morvencao --- Makefile | 12 -- build/Dockerfile.example | 1 - cmd/example/helloworld/main.go | 27 ++- cmd/example/helloworld_cloudevents/main.go | 145 ------------- .../helloworld-cloudevents/kustomization.yaml | 28 --- .../resources/addon_deployment_config.yaml | 6 - .../resources/cluster_role.yaml | 50 ----- .../resources/cluster_role_binding.yaml | 12 -- ...ld_cloudevents_clustermanagementaddon.yaml | 21 -- .../helloworld_cloudevents_controller.yaml | 43 ---- .../resources/managed_clusterset_binding.yaml | 6 - .../resources/placement.yaml | 10 - .../resources/service_account.yaml | 4 - .../resources/work-driver-config.yaml | 11 - examples/deploy/ocm-cloudevents/install.sh | 87 -------- examples/helloworld_cloudevents/helloworld.go | 95 --------- .../helloworld_cloudevents/helloworld_test.go | 192 ------------------ .../templates/clusterrolebinding.yaml | 12 -- .../manifests/templates/deployment.yaml | 68 ------- .../manifests/templates/serviceaccount.yaml | 5 - pkg/addonmanager/base_manager.go | 26 +-- pkg/addonmanager/cloudevents/manager.go | 115 +---------- pkg/addonmanager/interface.go | 8 +- pkg/addonmanager/manager.go | 2 +- 24 files changed, 34 insertions(+), 952 deletions(-) delete mode 100644 cmd/example/helloworld_cloudevents/main.go delete mode 100644 examples/deploy/addon/helloworld-cloudevents/kustomization.yaml delete mode 100644 examples/deploy/addon/helloworld-cloudevents/resources/addon_deployment_config.yaml delete mode 100644 examples/deploy/addon/helloworld-cloudevents/resources/cluster_role.yaml delete mode 100644 examples/deploy/addon/helloworld-cloudevents/resources/cluster_role_binding.yaml delete mode 100644 examples/deploy/addon/helloworld-cloudevents/resources/helloworld_cloudevents_clustermanagementaddon.yaml delete mode 100644 examples/deploy/addon/helloworld-cloudevents/resources/helloworld_cloudevents_controller.yaml delete mode 100644 examples/deploy/addon/helloworld-cloudevents/resources/managed_clusterset_binding.yaml delete mode 100644 examples/deploy/addon/helloworld-cloudevents/resources/placement.yaml delete mode 100644 examples/deploy/addon/helloworld-cloudevents/resources/service_account.yaml delete mode 100644 examples/deploy/addon/helloworld-cloudevents/resources/work-driver-config.yaml delete mode 100755 examples/deploy/ocm-cloudevents/install.sh delete mode 100644 examples/helloworld_cloudevents/helloworld.go delete mode 100644 examples/helloworld_cloudevents/helloworld_test.go delete mode 100644 examples/helloworld_cloudevents/manifests/templates/clusterrolebinding.yaml delete mode 100644 examples/helloworld_cloudevents/manifests/templates/deployment.yaml delete mode 100644 examples/helloworld_cloudevents/manifests/templates/serviceaccount.yaml diff --git a/Makefile b/Makefile index 53102acdd..415d75d87 100644 --- a/Makefile +++ b/Makefile @@ -56,9 +56,6 @@ verify: verify-gocilint deploy-ocm: examples/deploy/ocm/install.sh -deploy-ocm-cloudevents: - examples/deploy/ocm-cloudevents/install.sh - deploy-hosted-ocm: examples/deploy/hosted-ocm/install.sh @@ -74,12 +71,6 @@ deploy-helloworld: ensure-kustomize $(KUSTOMIZE) build examples/deploy/addon/helloworld | $(KUBECTL) apply -f - mv examples/deploy/addon/helloworld/kustomization.yaml.tmp examples/deploy/addon/helloworld/kustomization.yaml -deploy-helloworld-cloudevents: ensure-kustomize - cp examples/deploy/addon/helloworld-cloudevents/kustomization.yaml examples/deploy/addon/helloworld-cloudevents/kustomization.yaml.tmp - cd examples/deploy/addon/helloworld-cloudevents && ../../../../$(KUSTOMIZE) edit set image quay.io/open-cluster-management/addon-examples=$(EXAMPLE_IMAGE_NAME) - $(KUSTOMIZE) build examples/deploy/addon/helloworld-cloudevents | $(KUBECTL) apply -f - - mv examples/deploy/addon/helloworld-cloudevents/kustomization.yaml.tmp examples/deploy/addon/helloworld-cloudevents/kustomization.yaml - deploy-helloworld-helm: ensure-kustomize cp examples/deploy/addon/helloworld-helm/kustomization.yaml examples/deploy/addon/helloworld-helm/kustomization.yaml.tmp cd examples/deploy/addon/helloworld-helm && ../../../../$(KUSTOMIZE) edit set image quay.io/open-cluster-management/addon-examples=$(EXAMPLE_IMAGE_NAME) @@ -120,9 +111,6 @@ undeploy-busybox: ensure-kustomize undeploy-helloworld: ensure-kustomize $(KUSTOMIZE) build examples/deploy/addon/helloworld | $(KUBECTL) delete --ignore-not-found -f - -undeploy-helloworld-cloudevents: ensure-kustomize - $(KUSTOMIZE) build examples/deploy/addon/helloworld-cloudevents | $(KUBECTL) delete --ignore-not-found -f - - undeploy-helloworld-helm: ensure-kustomize $(KUSTOMIZE) build examples/deploy/addon/helloworld-helm | $(KUBECTL) delete --ignore-not-found -f - diff --git a/build/Dockerfile.example b/build/Dockerfile.example index 04a821f8c..fafd0f4d7 100644 --- a/build/Dockerfile.example +++ b/build/Dockerfile.example @@ -8,7 +8,6 @@ RUN make build --warn-undefined-variables FROM registry.access.redhat.com/ubi8/ubi-minimal:latest COPY --from=builder /go/src/open-cluster-management.io/addon-framework/busybox / COPY --from=builder /go/src/open-cluster-management.io/addon-framework/helloworld / -COPY --from=builder /go/src/open-cluster-management.io/addon-framework/helloworld_cloudevents / COPY --from=builder /go/src/open-cluster-management.io/addon-framework/helloworld_helm / COPY --from=builder /go/src/open-cluster-management.io/addon-framework/helloworld_hosted / diff --git a/cmd/example/helloworld/main.go b/cmd/example/helloworld/main.go index b933922a5..17a597e73 100644 --- a/cmd/example/helloworld/main.go +++ b/cmd/example/helloworld/main.go @@ -21,6 +21,7 @@ import ( "open-cluster-management.io/addon-framework/examples/helloworld_agent" "open-cluster-management.io/addon-framework/pkg/addonfactory" "open-cluster-management.io/addon-framework/pkg/addonmanager" + "open-cluster-management.io/addon-framework/pkg/addonmanager/cloudevents" cmdfactory "open-cluster-management.io/addon-framework/pkg/cmd/factory" "open-cluster-management.io/addon-framework/pkg/utils" "open-cluster-management.io/addon-framework/pkg/version" @@ -66,24 +67,40 @@ func newCommand() *cobra.Command { } func newControllerCommand() *cobra.Command { + o := cloudevents.NewCloudEventsOptions() + c := &addManagerConfig{cloudeventsOptions: o} cmd := cmdfactory. - NewControllerCommandConfig("helloworld-addon-controller", version.Get(), runController). + NewControllerCommandConfig("helloworld-addon-controller", version.Get(), c.runController). NewCommand() cmd.Use = "controller" cmd.Short = "Start the addon controller" + o.AddFlags(cmd) return cmd } -func runController(ctx context.Context, kubeConfig *rest.Config) error { +// addManagerConfig holds cloudevents configuration for addon manager +type addManagerConfig struct { + cloudeventsOptions *cloudevents.CloudEventsOptions +} + +func (c *addManagerConfig) runController(ctx context.Context, kubeConfig *rest.Config) error { addonClient, err := addonv1alpha1client.NewForConfig(kubeConfig) if err != nil { return err } - mgr, err := addonmanager.New(kubeConfig) - if err != nil { - return err + var mgr addonmanager.AddonManager + if c.cloudeventsOptions.WorkDriver == "kube" { + mgr, err = addonmanager.New(kubeConfig) + if err != nil { + return err + } + } else { + mgr, err = cloudevents.New(kubeConfig, c.cloudeventsOptions) + if err != nil { + return err + } } registrationOption := helloworld.NewRegistrationOption( diff --git a/cmd/example/helloworld_cloudevents/main.go b/cmd/example/helloworld_cloudevents/main.go deleted file mode 100644 index f1e22841a..000000000 --- a/cmd/example/helloworld_cloudevents/main.go +++ /dev/null @@ -1,145 +0,0 @@ -package main - -import ( - "context" - goflag "flag" - "fmt" - "math/rand" - "os" - "time" - - "github.com/spf13/cobra" - "github.com/spf13/pflag" - utilrand "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/client-go/rest" - utilflag "k8s.io/component-base/cli/flag" - logs "k8s.io/component-base/logs" - "k8s.io/klog/v2" - addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" - - "open-cluster-management.io/addon-framework/examples/helloworld_agent" - "open-cluster-management.io/addon-framework/examples/helloworld_cloudevents" - "open-cluster-management.io/addon-framework/pkg/addonfactory" - "open-cluster-management.io/addon-framework/pkg/addonmanager/cloudevents" - cmdfactory "open-cluster-management.io/addon-framework/pkg/cmd/factory" - "open-cluster-management.io/addon-framework/pkg/utils" - "open-cluster-management.io/addon-framework/pkg/version" -) - -func main() { - rand.Seed(time.Now().UTC().UnixNano()) - - pflag.CommandLine.SetNormalizeFunc(utilflag.WordSepNormalizeFunc) - pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) - - logs.AddFlags(pflag.CommandLine) - logs.InitLogs() - defer logs.FlushLogs() - - command := newCommand() - if err := command.Execute(); err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } -} - -func newCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "addon", - Short: "helloworld example addon", - Run: func(cmd *cobra.Command, args []string) { - if err := cmd.Help(); err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - } - os.Exit(1) - }, - } - - if v := version.Get().String(); len(v) == 0 { - cmd.Version = "" - } else { - cmd.Version = v - } - - cmd.AddCommand(newControllerCommand()) - cmd.AddCommand(helloworld_agent.NewAgentCommand(helloworld_cloudevents.AddonName)) - - return cmd -} - -func newControllerCommand() *cobra.Command { - o := cloudevents.NewCloudEventsOptions() - c := &addManagerConfig{cloudeventsOptions: o} - cmd := cmdfactory. - NewControllerCommandConfig("helloworld-addon-controller", version.Get(), c.runController). - NewCommand() - cmd.Use = "controller" - cmd.Short = "Start the addon controller with cloudevents" - o.AddFlags(cmd) - - return cmd -} - -// addManagerConfig holds cloudevents configuration for addon manager -type addManagerConfig struct { - cloudeventsOptions *cloudevents.CloudEventsOptions -} - -func (c *addManagerConfig) runController(ctx context.Context, kubeConfig *rest.Config) error { - addonClient, err := addonv1alpha1client.NewForConfig(kubeConfig) - if err != nil { - return err - } - - mgr, err := cloudevents.New(kubeConfig, c.cloudeventsOptions) - if err != nil { - return err - } - - registrationOption := helloworld_cloudevents.NewRegistrationOption( - kubeConfig, - helloworld_cloudevents.AddonName, - utilrand.String(5), - ) - - // Set agent install namespace from addon deployment config if it exists - registrationOption.AgentInstallNamespace = utils.AgentInstallNamespaceFromDeploymentConfigFunc( - utils.NewAddOnDeploymentConfigGetter(addonClient), - ) - - agentAddon, err := addonfactory.NewAgentAddonFactory(helloworld_cloudevents.AddonName, helloworld_cloudevents.FS, "manifests/templates"). - WithConfigGVRs(utils.AddOnDeploymentConfigGVR). - WithGetValuesFuncs( - helloworld_cloudevents.GetDefaultValues, - addonfactory.GetAddOnDeploymentConfigValues( - utils.NewAddOnDeploymentConfigGetter(addonClient), - addonfactory.ToAddOnDeploymentConfigValues, - addonfactory.ToImageOverrideValuesFunc("Image", helloworld_cloudevents.DefaultImage), - ), - ). - WithAgentRegistrationOption(registrationOption). - WithAgentInstallNamespace( - utils.AgentInstallNamespaceFromDeploymentConfigFunc( - utils.NewAddOnDeploymentConfigGetter(addonClient), - ), - ). - WithAgentHealthProber(helloworld_cloudevents.AgentHealthProber()). - BuildTemplateAgentAddon() - if err != nil { - klog.Errorf("failed to build agent %v", err) - return err - } - - err = mgr.AddAgent(agentAddon) - if err != nil { - klog.Fatal(err) - } - - err = mgr.Start(ctx) - if err != nil { - klog.Fatal(err) - } - <-ctx.Done() - - return nil -} diff --git a/examples/deploy/addon/helloworld-cloudevents/kustomization.yaml b/examples/deploy/addon/helloworld-cloudevents/kustomization.yaml deleted file mode 100644 index 6e31a765c..000000000 --- a/examples/deploy/addon/helloworld-cloudevents/kustomization.yaml +++ /dev/null @@ -1,28 +0,0 @@ -namespace: open-cluster-management - -resources: -- resources/cluster_role.yaml -- resources/cluster_role_binding.yaml -- resources/service_account.yaml -- resources/managed_clusterset_binding.yaml -- resources/placement.yaml -- resources/addon_deployment_config.yaml -- resources/helloworld_cloudevents_clustermanagementaddon.yaml -- resources/helloworld_cloudevents_controller.yaml -- resources/work-driver-config.yaml - -images: -- name: quay.io/open-cluster-management/addon-examples - newName: quay.io/open-cluster-management/addon-examples - newTag: latest -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -vars: - - name: EXAMPLE_IMAGE_NAME - objref: - apiVersion: apps/v1 - kind: Deployment - name: helloworldcloudevents-controller - fieldref: - fieldpath: spec.template.spec.containers.[name=helloworldcloudevents-controller].image diff --git a/examples/deploy/addon/helloworld-cloudevents/resources/addon_deployment_config.yaml b/examples/deploy/addon/helloworld-cloudevents/resources/addon_deployment_config.yaml deleted file mode 100644 index d29abd622..000000000 --- a/examples/deploy/addon/helloworld-cloudevents/resources/addon_deployment_config.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: addon.open-cluster-management.io/v1alpha1 -kind: AddOnDeploymentConfig -metadata: - name: global -spec: - agentInstallNamespace: open-cluster-management-agent-addon diff --git a/examples/deploy/addon/helloworld-cloudevents/resources/cluster_role.yaml b/examples/deploy/addon/helloworld-cloudevents/resources/cluster_role.yaml deleted file mode 100644 index e3623c705..000000000 --- a/examples/deploy/addon/helloworld-cloudevents/resources/cluster_role.yaml +++ /dev/null @@ -1,50 +0,0 @@ - kind: ClusterRole - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - name: helloworldcloudevents-addon - rules: - - apiGroups: [""] - resources: ["configmaps", "events"] - verbs: ["get", "list", "watch", "create", "update", "delete", "deletecollection", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "list", "watch", "create", "update", "patch"] - - apiGroups: ["rbac.authorization.k8s.io"] - resources: ["roles", "rolebindings"] - verbs: ["get", "list", "watch", "create", "update", "delete"] - - apiGroups: ["authorization.k8s.io"] - resources: ["subjectaccessreviews"] - verbs: ["get", "create"] - - apiGroups: ["certificates.k8s.io"] - resources: ["certificatesigningrequests", "certificatesigningrequests/approval"] - verbs: ["get", "list", "watch", "create", "update"] - - apiGroups: ["certificates.k8s.io"] - resources: ["signers"] - verbs: ["approve"] - - apiGroups: ["cluster.open-cluster-management.io"] - resources: ["managedclusters"] - verbs: ["get", "list", "watch"] - - apiGroups: ["work.open-cluster-management.io"] - resources: ["manifestworks"] - verbs: ["create", "update", "get", "list", "watch", "delete", "deletecollection", "patch"] - - apiGroups: ["addon.open-cluster-management.io"] - resources: ["managedclusteraddons/finalizers"] - verbs: ["update"] - - apiGroups: [ "addon.open-cluster-management.io" ] - resources: [ "clustermanagementaddons/finalizers" ] - verbs: [ "update" ] - - apiGroups: [ "addon.open-cluster-management.io" ] - resources: [ "clustermanagementaddons/status" ] - verbs: ["update", "patch"] - - apiGroups: ["addon.open-cluster-management.io"] - resources: ["clustermanagementaddons"] - verbs: ["get", "list", "watch", "patch"] - - apiGroups: ["addon.open-cluster-management.io"] - resources: ["managedclusteraddons"] - verbs: ["get", "list", "watch", "create", "update", "delete"] - - apiGroups: ["addon.open-cluster-management.io"] - resources: ["managedclusteraddons/status"] - verbs: ["update", "patch"] - - apiGroups: ["addon.open-cluster-management.io"] - resources: ["addondeploymentconfigs"] - verbs: ["get", "list", "watch"] diff --git a/examples/deploy/addon/helloworld-cloudevents/resources/cluster_role_binding.yaml b/examples/deploy/addon/helloworld-cloudevents/resources/cluster_role_binding.yaml deleted file mode 100644 index 88a1823aa..000000000 --- a/examples/deploy/addon/helloworld-cloudevents/resources/cluster_role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: helloworldcloudevents-addon -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: helloworldcloudevents-addon -subjects: - - kind: ServiceAccount - name: helloworldcloudevents-addon-sa - namespace: open-cluster-management diff --git a/examples/deploy/addon/helloworld-cloudevents/resources/helloworld_cloudevents_clustermanagementaddon.yaml b/examples/deploy/addon/helloworld-cloudevents/resources/helloworld_cloudevents_clustermanagementaddon.yaml deleted file mode 100644 index dcfec9820..000000000 --- a/examples/deploy/addon/helloworld-cloudevents/resources/helloworld_cloudevents_clustermanagementaddon.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: addon.open-cluster-management.io/v1alpha1 -kind: ClusterManagementAddOn -metadata: - name: helloworldcloudevents -spec: - addOnMeta: - displayName: helloworldcloudevents - description: "helloworldcloudevents is an example addon using cloudevents created by go template" - supportedConfigs: - - group: addon.open-cluster-management.io - resource: addondeploymentconfigs - installStrategy: - type: Placements - placements: - - name: global - namespace: open-cluster-management - configs: - - group: addon.open-cluster-management.io - resource: addondeploymentconfigs - name: global - namespace: open-cluster-management diff --git a/examples/deploy/addon/helloworld-cloudevents/resources/helloworld_cloudevents_controller.yaml b/examples/deploy/addon/helloworld-cloudevents/resources/helloworld_cloudevents_controller.yaml deleted file mode 100644 index 9bbb3bbe5..000000000 --- a/examples/deploy/addon/helloworld-cloudevents/resources/helloworld_cloudevents_controller.yaml +++ /dev/null @@ -1,43 +0,0 @@ -kind: Deployment -apiVersion: apps/v1 -metadata: - name: helloworldcloudevents-controller - labels: - app: helloworldcloudevents-controller -spec: - replicas: 1 - selector: - matchLabels: - app: helloworldcloudevents-controller - template: - metadata: - labels: - app: helloworldcloudevents-controller - spec: - serviceAccountName: helloworldcloudevents-addon-sa - containers: - - name: helloworldcloudevents-controller - image: quay.io/open-cluster-management/addon-examples - imagePullPolicy: IfNotPresent - env: - - name: EXAMPLE_IMAGE_NAME - value: $(EXAMPLE_IMAGE_NAME) - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - args: - - "/helloworld_cloudevents" - - "controller" - - "--work-driver=mqtt" - - "--work-driver-config=/var/run/secrets/hub/config.yaml" - - "--cloudevents-client-id=addon-manager-$(POD_NAME)" - - "--source-id=addon-manager" - volumeMounts: - - mountPath: /var/run/secrets/hub - name: workdriverconfig - readOnly: true - volumes: - - name: workdriverconfig - secret: - secretName: work-driver-config \ No newline at end of file diff --git a/examples/deploy/addon/helloworld-cloudevents/resources/managed_clusterset_binding.yaml b/examples/deploy/addon/helloworld-cloudevents/resources/managed_clusterset_binding.yaml deleted file mode 100644 index 7b1ce9ca7..000000000 --- a/examples/deploy/addon/helloworld-cloudevents/resources/managed_clusterset_binding.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: cluster.open-cluster-management.io/v1beta2 -kind: ManagedClusterSetBinding -metadata: - name: global -spec: - clusterSet: global diff --git a/examples/deploy/addon/helloworld-cloudevents/resources/placement.yaml b/examples/deploy/addon/helloworld-cloudevents/resources/placement.yaml deleted file mode 100644 index da500bac2..000000000 --- a/examples/deploy/addon/helloworld-cloudevents/resources/placement.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: cluster.open-cluster-management.io/v1beta1 -kind: Placement -metadata: - name: global -spec: - clusterSets: - - global - predicates: - - requiredClusterSelector: - labelSelector: {} diff --git a/examples/deploy/addon/helloworld-cloudevents/resources/service_account.yaml b/examples/deploy/addon/helloworld-cloudevents/resources/service_account.yaml deleted file mode 100644 index ba111648c..000000000 --- a/examples/deploy/addon/helloworld-cloudevents/resources/service_account.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: helloworldcloudevents-addon-sa diff --git a/examples/deploy/addon/helloworld-cloudevents/resources/work-driver-config.yaml b/examples/deploy/addon/helloworld-cloudevents/resources/work-driver-config.yaml deleted file mode 100644 index 5070201c9..000000000 --- a/examples/deploy/addon/helloworld-cloudevents/resources/work-driver-config.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: work-driver-config -stringData: - config.yaml: | - brokerHost: mosquitto.mqtt:1883 - topics: - sourceEvents: sources/addon-manager/clusters/+/sourceevents - agentEvents: sources/addon-manager/clusters/+/agentevents - sourceBroadcast: sources/addon-manager/sourcebroadcast diff --git a/examples/deploy/ocm-cloudevents/install.sh b/examples/deploy/ocm-cloudevents/install.sh deleted file mode 100755 index 3ef0b26be..000000000 --- a/examples/deploy/ocm-cloudevents/install.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/bin/bash - -set -o nounset -set -o pipefail - -KUBECTL=${KUBECTL:-kubectl} - -BUILD_DIR="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" -DEPLOY_DIR="$(dirname "$BUILD_DIR")" -EXAMPLE_DIR="$(dirname "$DEPLOY_DIR")" -REPO_DIR="$(dirname "$EXAMPLE_DIR")" -WORK_DIR="${REPO_DIR}/_output" -CLUSTERADM="${WORK_DIR}/bin/clusteradm" - -export PATH=$PATH:${WORK_DIR}/bin - -echo "############ Download clusteradm" -mkdir -p "${WORK_DIR}/bin" -wget -qO- https://github.com/open-cluster-management-io/clusteradm/releases/latest/download/clusteradm_${GOHOSTOS}_${GOHOSTARCH}.tar.gz | sudo tar -xvz -C ${WORK_DIR}/bin/ -chmod +x "${CLUSTERADM}" - -echo "############ Init hub" -${CLUSTERADM} init --wait --bundle-version=latest -joincmd=$(${CLUSTERADM} get token | grep clusteradm) - -echo "############ Init agent as cluster1" -$(echo ${joincmd} --force-internal-endpoint-lookup --wait --bundle-version=latest | sed "s//${MANAGED_CLUSTER_NAME}/g") - -echo "############ Accept join of cluster1" -${CLUSTERADM} accept --clusters ${MANAGED_CLUSTER_NAME} --wait - -echo "############ All-in-one env is installed successfully!!" - -echo "############ Deploy mqtt broker" -${KUBECTL} apply -f ${DEPLOY_DIR}/mqtt/mqtt-broker.yaml - -echo "############ Configure the work-agent" -${KUBECTL} -n open-cluster-management scale --replicas=0 deployment/klusterlet - -cat << EOF | ${KUBECTL} -n open-cluster-management-agent apply -f - -apiVersion: v1 -kind: Secret -metadata: - name: work-driver-config -stringData: - config.yaml: | - brokerHost: mosquitto.mqtt:1883 - topics: - sourceEvents: sources/addon-manager/clusters/${MANAGED_CLUSTER_NAME}/sourceevents - agentEvents: sources/addon-manager/clusters/${MANAGED_CLUSTER_NAME}/agentevents - agentBroadcast: clusters/${MANAGED_CLUSTER_NAME}/agentbroadcast -EOF - -# patch klusterlet-work-agent deployment to use mqtt as workload source driver -${KUBECTL} -n open-cluster-management-agent patch deployment/klusterlet-work-agent --type=json \ - -p='[ - {"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--cloudevents-client-codecs=manifestbundle"}, - {"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--cloudevents-client-id=work-agent-$(POD_NAME)"}, - {"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--workload-source-driver=mqtt"}, - {"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--workload-source-config=/var/run/secrets/hub/config.yaml"} - ]' - -${KUBECTL} -n open-cluster-management-agent patch deployment/klusterlet-work-agent --type=json \ - -p='[{"op": "add", "path": "/spec/template/spec/volumes/-", "value": {"name": "workdriverconfig","secret": {"secretName": "work-driver-config"}}}]' - -${KUBECTL} -n open-cluster-management-agent patch deployment/klusterlet-work-agent --type=json \ - -p='[{"op": "add", "path": "/spec/template/spec/containers/0/volumeMounts/-", "value": {"name": "workdriverconfig","mountPath": "/var/run/secrets/hub"}}]' - -${KUBECTL} -n open-cluster-management-agent scale --replicas=1 deployment/klusterlet-work-agent -${KUBECTL} -n open-cluster-management-agent rollout status deployment/klusterlet-work-agent --timeout=120s -${KUBECTL} -n open-cluster-management-agent get pod -l app=klusterlet-manifestwork-agent - -# TODO: add live probe for the work-agent to check if it is connected to the mqtt broker -isRunning=false -for i in {1..10}; do - if ${KUBECTL} -n open-cluster-management-agent logs deployment/klusterlet-work-agent | grep "subscribing to topics"; then - echo "klusterlet-work-agent is subscribing to topics from mqtt broker" - isRunning=true - break - fi - sleep 12 -done - -if [ "$isRunning" = false ]; then - echo "timeout waiting for klusterlet-work-agent to subscribe to topics from mqtt broker" - exit 1 -fi diff --git a/examples/helloworld_cloudevents/helloworld.go b/examples/helloworld_cloudevents/helloworld.go deleted file mode 100644 index 3ec453dd9..000000000 --- a/examples/helloworld_cloudevents/helloworld.go +++ /dev/null @@ -1,95 +0,0 @@ -package helloworld_cloudevents - -import ( - "embed" - "fmt" - "os" - - "k8s.io/client-go/rest" - "open-cluster-management.io/addon-framework/examples/rbac" - "open-cluster-management.io/addon-framework/pkg/addonfactory" - "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/utils" - addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" - clusterv1 "open-cluster-management.io/api/cluster/v1" - workapiv1 "open-cluster-management.io/api/work/v1" -) - -const ( - DefaultImage = "quay.io/open-cluster-management/addon-examples:latest" - AddonName = "helloworldcloudevents" - InstallationNamespace = "open-cluster-management-agent-addon" -) - -//go:embed manifests -//go:embed manifests/templates -var FS embed.FS - -func NewRegistrationOption(kubeConfig *rest.Config, addonName, agentName string) *agent.RegistrationOption { - return &agent.RegistrationOption{ - CSRConfigurations: agent.KubeClientSignerConfigurations(addonName, agentName), - CSRApproveCheck: utils.DefaultCSRApprover(agentName), - PermissionConfig: rbac.AddonRBAC(kubeConfig), - } -} - -func GetDefaultValues(cluster *clusterv1.ManagedCluster, - addon *addonapiv1alpha1.ManagedClusterAddOn) (addonfactory.Values, error) { - - image := os.Getenv("EXAMPLE_IMAGE_NAME") - if len(image) == 0 { - image = DefaultImage - } - - manifestConfig := struct { - KubeConfigSecret string - ClusterName string - Image string - }{ - KubeConfigSecret: fmt.Sprintf("%s-hub-kubeconfig", addon.Name), - ClusterName: cluster.Name, - Image: image, - } - - return addonfactory.StructToValues(manifestConfig), nil -} - -func AgentHealthProber() *agent.HealthProber { - return &agent.HealthProber{ - Type: agent.HealthProberTypeWork, - WorkProber: &agent.WorkHealthProber{ - ProbeFields: []agent.ProbeField{ - { - ResourceIdentifier: workapiv1.ResourceIdentifier{ - Group: "apps", - Resource: "deployments", - Name: "helloworldcloudevents-agent", - Namespace: InstallationNamespace, - }, - ProbeRules: []workapiv1.FeedbackRule{ - { - Type: workapiv1.WellKnownStatusType, - }, - }, - }, - }, - HealthCheck: func(identifier workapiv1.ResourceIdentifier, result workapiv1.StatusFeedbackResult) error { - if len(result.Values) == 0 { - return fmt.Errorf("no values are probed for deployment %s/%s", identifier.Namespace, identifier.Name) - } - for _, value := range result.Values { - if value.Name != "ReadyReplicas" { - continue - } - - if *value.Value.Integer >= 1 { - return nil - } - - return fmt.Errorf("readyReplica is %d for deployement %s/%s", *value.Value.Integer, identifier.Namespace, identifier.Name) - } - return fmt.Errorf("readyReplica is not probed") - }, - }, - } -} diff --git a/examples/helloworld_cloudevents/helloworld_test.go b/examples/helloworld_cloudevents/helloworld_test.go deleted file mode 100644 index 9939cd9be..000000000 --- a/examples/helloworld_cloudevents/helloworld_test.go +++ /dev/null @@ -1,192 +0,0 @@ -package helloworld_cloudevents - -import ( - "testing" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - utilrand "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/klog/v2" - addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" - fakeaddon "open-cluster-management.io/api/client/addon/clientset/versioned/fake" - clusterv1 "open-cluster-management.io/api/cluster/v1" - - "open-cluster-management.io/addon-framework/pkg/addonfactory" - "open-cluster-management.io/addon-framework/pkg/addonmanager/addontesting" - "open-cluster-management.io/addon-framework/pkg/utils" -) - -var ( - nodeSelector = map[string]string{"kubernetes.io/os": "linux"} - tolerations = []corev1.Toleration{{Key: "foo", Operator: corev1.TolerationOpExists, Effect: corev1.TaintEffectNoExecute}} -) - -func TestManifestAddonAgent(t *testing.T) { - cases := []struct { - name string - managedCluster *clusterv1.ManagedCluster - managedClusterAddOn *addonapiv1alpha1.ManagedClusterAddOn - configs []runtime.Object - verifyDeployment func(t *testing.T, objs []runtime.Object) - }{ - { - name: "no configs", - managedCluster: addontesting.NewManagedCluster("cluster1"), - managedClusterAddOn: addontesting.NewAddon("helloworldcloudevents", "cluster1"), - configs: []runtime.Object{}, - verifyDeployment: func(t *testing.T, objs []runtime.Object) { - deployment := findHelloWorldCloudEventsDeployment(objs) - if deployment == nil { - t.Fatalf("expected deployment, but failed") - } - - if deployment.Name != "helloworldcloudevents-agent" { - t.Errorf("unexpected deployment name %s", deployment.Name) - } - - if deployment.Namespace != addonfactory.AddonDefaultInstallNamespace { - t.Errorf("unexpected deployment namespace %s", deployment.Namespace) - } - - if deployment.Spec.Template.Spec.Containers[0].Image != DefaultImage { - t.Errorf("unexpected image %s", deployment.Spec.Template.Spec.Containers[0].Image) - } - }, - }, - { - name: "override image with annotation", - managedCluster: addontesting.NewManagedCluster("cluster1"), - managedClusterAddOn: func() *addonapiv1alpha1.ManagedClusterAddOn { - addon := addontesting.NewAddon("test", "cluster1") - addon.Annotations = map[string]string{ - "addon.open-cluster-management.io/values": `{"Image":"quay.io/test:test"}`} - return addon - }(), - configs: []runtime.Object{}, - verifyDeployment: func(t *testing.T, objs []runtime.Object) { - deployment := findHelloWorldCloudEventsDeployment(objs) - if deployment == nil { - t.Fatalf("expected deployment, but failed") - } - - if deployment.Name != "helloworldcloudevents-agent" { - t.Errorf("unexpected deployment name %s", deployment.Name) - } - - if deployment.Namespace != addonfactory.AddonDefaultInstallNamespace { - t.Errorf("unexpected deployment namespace %s", deployment.Namespace) - } - - if deployment.Spec.Template.Spec.Containers[0].Image != "quay.io/test:test" { - t.Errorf("unexpected image %s", deployment.Spec.Template.Spec.Containers[0].Image) - } - }, - }, - { - name: "with addon deployment config", - managedCluster: addontesting.NewManagedCluster("cluster1"), - managedClusterAddOn: func() *addonapiv1alpha1.ManagedClusterAddOn { - addon := addontesting.NewAddon("test", "cluster1") - addon.Status.ConfigReferences = []addonapiv1alpha1.ConfigReference{ - { - ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ - Group: "addon.open-cluster-management.io", - Resource: "addondeploymentconfigs", - }, - ConfigReferent: addonapiv1alpha1.ConfigReferent{ - Namespace: "cluster1", - Name: "config", - }, - }, - } - return addon - }(), - configs: []runtime.Object{ - &addonapiv1alpha1.AddOnDeploymentConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: "config", - Namespace: "cluster1", - }, - Spec: addonapiv1alpha1.AddOnDeploymentConfigSpec{ - NodePlacement: &addonapiv1alpha1.NodePlacement{ - Tolerations: tolerations, - NodeSelector: nodeSelector, - }, - }, - }, - }, - verifyDeployment: func(t *testing.T, objs []runtime.Object) { - deployment := findHelloWorldCloudEventsDeployment(objs) - if deployment == nil { - t.Fatalf("expected deployment, but failed") - } - - if deployment.Name != "helloworldcloudevents-agent" { - t.Errorf("unexpected deployment name %s", deployment.Name) - } - - if deployment.Namespace != addonfactory.AddonDefaultInstallNamespace { - t.Errorf("unexpected deployment namespace %s", deployment.Namespace) - } - - if deployment.Spec.Template.Spec.Containers[0].Image != DefaultImage { - t.Errorf("unexpected image %s", deployment.Spec.Template.Spec.Containers[0].Image) - } - - if !equality.Semantic.DeepEqual(deployment.Spec.Template.Spec.NodeSelector, nodeSelector) { - t.Errorf("unexpected nodeSeletcor %v", deployment.Spec.Template.Spec.NodeSelector) - } - - if !equality.Semantic.DeepEqual(deployment.Spec.Template.Spec.Tolerations, tolerations) { - t.Errorf("unexpected tolerations %v", deployment.Spec.Template.Spec.Tolerations) - } - }, - }, - } - - for _, c := range cases { - fakeAddonClient := fakeaddon.NewSimpleClientset(c.configs...) - - agentAddon, err := addonfactory.NewAgentAddonFactory(AddonName, FS, "manifests/templates"). - WithConfigGVRs(utils.AddOnDeploymentConfigGVR). - WithGetValuesFuncs( - GetDefaultValues, - addonfactory.GetAddOnDeploymentConfigValues( - addonfactory.NewAddOnDeploymentConfigGetter(fakeAddonClient), - addonfactory.ToAddOnDeploymentConfigValues, - ), - addonfactory.GetValuesFromAddonAnnotation, - ). - WithAgentRegistrationOption(NewRegistrationOption(nil, AddonName, utilrand.String(5))). - WithAgentHealthProber(AgentHealthProber()). - BuildTemplateAgentAddon() - if err != nil { - klog.Fatalf("failed to build agent %v", err) - } - - objects, err := agentAddon.Manifests(c.managedCluster, c.managedClusterAddOn) - if err != nil { - t.Fatalf("failed to get manifests %v", err) - } - - if len(objects) != 3 { - t.Fatalf("expected 3 manifests, but %v", objects) - } - - c.verifyDeployment(t, objects) - } -} - -func findHelloWorldCloudEventsDeployment(objs []runtime.Object) *appsv1.Deployment { - for _, obj := range objs { - switch obj := obj.(type) { - case *appsv1.Deployment: - return obj - } - } - - return nil -} diff --git a/examples/helloworld_cloudevents/manifests/templates/clusterrolebinding.yaml b/examples/helloworld_cloudevents/manifests/templates/clusterrolebinding.yaml deleted file mode 100644 index f0271f974..000000000 --- a/examples/helloworld_cloudevents/manifests/templates/clusterrolebinding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: helloworldcloudevents-agent -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: - - kind: ServiceAccount - name: helloworldcloudevents-agent-sa - namespace: {{ .AddonInstallNamespace }} diff --git a/examples/helloworld_cloudevents/manifests/templates/deployment.yaml b/examples/helloworld_cloudevents/manifests/templates/deployment.yaml deleted file mode 100644 index f439d8ba7..000000000 --- a/examples/helloworld_cloudevents/manifests/templates/deployment.yaml +++ /dev/null @@ -1,68 +0,0 @@ -kind: Deployment -apiVersion: apps/v1 -metadata: - name: helloworldcloudevents-agent - namespace: {{ .AddonInstallNamespace }} - labels: - app: helloworldcloudevents-agent -spec: - replicas: 1 - selector: - matchLabels: - app: helloworldcloudevents-agent - template: - metadata: - labels: - app: helloworldcloudevents-agent - spec: - serviceAccountName: helloworldcloudevents-agent-sa -{{- if .NodeSelector }} - nodeSelector: - {{- range $key, $value := .NodeSelector }} - "{{ $key }}": "{{ $value }}" - {{- end }} -{{- end }} -{{- if .Tolerations }} - tolerations: - {{- range $toleration := .Tolerations }} - - key: "{{ $toleration.Key }}" - value: "{{ $toleration.Value }}" - effect: "{{ $toleration.Effect }}" - operator: "{{ $toleration.Operator }}" - {{- if $toleration.TolerationSeconds }} - tolerationSeconds: {{ $toleration.TolerationSeconds }} - {{- end }} - {{- end }} -{{- end }} - volumes: - - name: hub-config - secret: - secretName: {{ .KubeConfigSecret }} - containers: - - name: helloworldcloudevents-agent - image: {{ .Image }} - imagePullPolicy: IfNotPresent -{{- if or .HTTPProxy .HTTPSProxy}} - env: - {{- if .HTTPProxy }} - - name: HTTP_PROXY - value: {{ .HTTPProxy }} - {{- end }} - {{- if .HTTPSProxy }} - - name: HTTPS_PROXY - value: {{ .HTTPSProxy }} - {{- end }} - {{- if .NoProxy }} - - name: NO_PROXY - value: {{ .NoProxy }} - {{- end }} -{{- end }} - args: - - "/helloworld_cloudevents" - - "agent" - - "--hub-kubeconfig=/var/run/hub/kubeconfig" - - "--cluster-name={{ .ClusterName }}" - - "--addon-namespace={{ .AddonInstallNamespace }}" - volumeMounts: - - name: hub-config - mountPath: /var/run/hub diff --git a/examples/helloworld_cloudevents/manifests/templates/serviceaccount.yaml b/examples/helloworld_cloudevents/manifests/templates/serviceaccount.yaml deleted file mode 100644 index 0a2b1ecbf..000000000 --- a/examples/helloworld_cloudevents/manifests/templates/serviceaccount.yaml +++ /dev/null @@ -1,5 +0,0 @@ -kind: ServiceAccount -apiVersion: v1 -metadata: - name: helloworldcloudevents-agent-sa - namespace: {{ .AddonInstallNamespace }} diff --git a/pkg/addonmanager/base_manager.go b/pkg/addonmanager/base_manager.go index 0d1663f13..5cc585935 100644 --- a/pkg/addonmanager/base_manager.go +++ b/pkg/addonmanager/base_manager.go @@ -12,8 +12,8 @@ import ( addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions" clusterv1informers "open-cluster-management.io/api/client/cluster/informers/externalversions" - workv1client "open-cluster-management.io/api/client/work/clientset/versioned" - workv1informers "open-cluster-management.io/api/client/work/informers/externalversions" + workclientset "open-cluster-management.io/api/client/work/clientset/versioned" + workv1informers "open-cluster-management.io/api/client/work/informers/externalversions/work/v1" "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonconfig" "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy" @@ -53,18 +53,6 @@ func (a *BaseAddonManagerImpl) GetAddonAgents() map[string]agent.AgentAddon { return a.addonAgents } -func (a *BaseAddonManagerImpl) GetAddonConfigs() map[schema.GroupVersionResource]bool { - return a.addonConfigs -} - -func (a *BaseAddonManagerImpl) GetSyncContexts() []factory.SyncContext { - return a.syncContexts -} - -func (a *BaseAddonManagerImpl) SetSyncContexts(syncContexts []factory.SyncContext) { - a.syncContexts = syncContexts -} - func (a *BaseAddonManagerImpl) AddAgent(addon agent.AgentAddon) error { addonOption := addon.GetAgentAddonOptions() if len(addonOption.AddonName) == 0 { @@ -84,8 +72,9 @@ func (a *BaseAddonManagerImpl) Trigger(clusterName, addonName string) { } func (a *BaseAddonManagerImpl) StartWithInformers(ctx context.Context, + workClient workclientset.Interface, + workInformers workv1informers.ManifestWorkInformer, kubeInformers kubeinformers.SharedInformerFactory, - workInformers workv1informers.SharedInformerFactory, addonInformers addoninformers.SharedInformerFactory, clusterInformers clusterv1informers.SharedInformerFactory, dynamicInformers dynamicinformer.DynamicSharedInformerFactory) error { @@ -100,11 +89,6 @@ func (a *BaseAddonManagerImpl) StartWithInformers(ctx context.Context, return err } - workClient, err := workv1client.NewForConfig(a.config) - if err != nil { - return err - } - v1CSRSupported, v1beta1Supported, err := utils.IsCSRSupported(kubeClient) if err != nil { return err @@ -121,7 +105,7 @@ func (a *BaseAddonManagerImpl) StartWithInformers(ctx context.Context, addonClient, clusterInformers.Cluster().V1().ManagedClusters(), addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), - workInformers.Work().V1().ManifestWorks(), + workInformers, a.addonAgents, ) diff --git a/pkg/addonmanager/cloudevents/manager.go b/pkg/addonmanager/cloudevents/manager.go index e11dc5be9..6d2b71414 100644 --- a/pkg/addonmanager/cloudevents/manager.go +++ b/pkg/addonmanager/cloudevents/manager.go @@ -19,15 +19,7 @@ import ( workinformers "open-cluster-management.io/api/client/work/informers/externalversions" "open-cluster-management.io/addon-framework/pkg/addonmanager" - "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonconfig" - "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy" - "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate" - "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/cmaconfig" - "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/cmamanagedby" - "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/registration" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" "open-cluster-management.io/addon-framework/pkg/index" - "open-cluster-management.io/addon-framework/pkg/utils" cloudeventswork "open-cluster-management.io/sdk-go/pkg/cloudevents/work" "open-cluster-management.io/sdk-go/pkg/cloudevents/work/source/codec" ) @@ -42,8 +34,6 @@ type cloudeventsAddonManager struct { func (a *cloudeventsAddonManager) Start(ctx context.Context) error { config := a.GetConfig() addonAgents := a.GetAddonAgents() - addonConfigs := a.GetAddonConfigs() - syncContexts := a.GetSyncContexts() var addonNames []string for key := range addonAgents { @@ -162,114 +152,11 @@ func (a *cloudeventsAddonManager) Start(ctx context.Context) error { return err } - v1CSRSupported, v1beta1Supported, err := utils.IsCSRSupported(kubeClient) + err = a.StartWithInformers(ctx, workClient, workInformers, kubeInformers, addonInformers, clusterInformers, dynamicInformers) if err != nil { return err } - for _, agentImpl := range addonAgents { - for _, configGVR := range agentImpl.GetAgentAddonOptions().SupportedConfigGVRs { - addonConfigs[configGVR] = true - } - } - - deployController := agentdeploy.NewAddonDeployController( - workClient, - addonClient, - clusterInformers.Cluster().V1().ManagedClusters(), - addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), - workInformers, - addonAgents, - ) - - registrationController := registration.NewAddonRegistrationController( - addonClient, - clusterInformers.Cluster().V1().ManagedClusters(), - addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), - addonAgents, - ) - - // This controller is used during migrating addons to be managed by addon-manager. - // This should be removed when the migration is done. - // The migration plan refer to https://github.com/open-cluster-management-io/ocm/issues/355. - managementAddonController := cmamanagedby.NewCMAManagedByController( - addonClient, - addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), - addonAgents, - utils.FilterByAddonName(addonAgents), - ) - - var addonConfigController, managementAddonConfigController factory.Controller - if len(addonConfigs) != 0 { - addonConfigController = addonconfig.NewAddonConfigController( - addonClient, - addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), - addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), - dynamicInformers, - addonConfigs, - utils.FilterByAddonName(addonAgents), - ) - managementAddonConfigController = cmaconfig.NewCMAConfigController( - addonClient, - addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), - dynamicInformers, - addonConfigs, - utils.FilterByAddonName(addonAgents), - ) - } - - var csrApproveController factory.Controller - var csrSignController factory.Controller - // Spawn the following controllers only if v1 CSR api is supported in the - // hub cluster. Under v1beta1 CSR api, all the CSR objects will be signed - // by the kube-controller-manager so custom CSR controller should be - // disabled to avoid conflict. - if v1CSRSupported { - csrApproveController = certificate.NewCSRApprovingController( - kubeClient, - clusterInformers.Cluster().V1().ManagedClusters(), - kubeInformers.Certificates().V1().CertificateSigningRequests(), - nil, - addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), - addonAgents, - ) - csrSignController = certificate.NewCSRSignController( - kubeClient, - clusterInformers.Cluster().V1().ManagedClusters(), - kubeInformers.Certificates().V1().CertificateSigningRequests(), - addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), - addonAgents, - ) - } else if v1beta1Supported { - csrApproveController = certificate.NewCSRApprovingController( - kubeClient, - clusterInformers.Cluster().V1().ManagedClusters(), - nil, - kubeInformers.Certificates().V1beta1().CertificateSigningRequests(), - addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), - addonAgents, - ) - } - - a.SetSyncContexts(append(syncContexts, deployController.SyncContext())) - - go deployController.Run(ctx, 1) - go registrationController.Run(ctx, 1) - go managementAddonController.Run(ctx, 1) - - if addonConfigController != nil { - go addonConfigController.Run(ctx, 1) - } - if managementAddonConfigController != nil { - go managementAddonConfigController.Run(ctx, 1) - } - if csrApproveController != nil { - go csrApproveController.Run(ctx, 1) - } - if csrSignController != nil { - go csrSignController.Run(ctx, 1) - } - kubeInformers.Start(ctx.Done()) go workInformers.Informer().Run(ctx.Done()) addonInformers.Start(ctx.Done()) diff --git a/pkg/addonmanager/interface.go b/pkg/addonmanager/interface.go index b6c8ad323..5598a7701 100644 --- a/pkg/addonmanager/interface.go +++ b/pkg/addonmanager/interface.go @@ -8,7 +8,8 @@ import ( "open-cluster-management.io/addon-framework/pkg/agent" addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions" clusterv1informers "open-cluster-management.io/api/client/cluster/informers/externalversions" - workv1informers "open-cluster-management.io/api/client/work/informers/externalversions" + workclientset "open-cluster-management.io/api/client/work/clientset/versioned" + workv1informers "open-cluster-management.io/api/client/work/informers/externalversions/work/v1" ) // BaseAddonManager is the interface to initialize a manager on hub to manage the addon @@ -23,8 +24,9 @@ type BaseAddonManager interface { // StartWithInformers starts all registered addon agent with the given informers. StartWithInformers(ctx context.Context, + workClient workclientset.Interface, + workInformers workv1informers.ManifestWorkInformer, kubeInformers kubeinformers.SharedInformerFactory, - workInformers workv1informers.SharedInformerFactory, addonInformers addoninformers.SharedInformerFactory, clusterInformers clusterv1informers.SharedInformerFactory, dynamicInformers dynamicinformer.DynamicSharedInformerFactory) error @@ -35,6 +37,6 @@ type BaseAddonManager interface { type AddonManager interface { BaseAddonManager - // Start starts all registered addon agents. + // Start starts all registered addon agents and controllers. Start(ctx context.Context) error } diff --git a/pkg/addonmanager/manager.go b/pkg/addonmanager/manager.go index 2fab867fe..f98a12310 100644 --- a/pkg/addonmanager/manager.go +++ b/pkg/addonmanager/manager.go @@ -124,7 +124,7 @@ func (a *addonManager) Start(ctx context.Context) error { return err } - err = a.StartWithInformers(ctx, kubeInformers, workInformers, addonInformers, clusterInformers, dynamicInformers) + err = a.StartWithInformers(ctx, workClient, workInformers.Work().V1().ManifestWorks(), kubeInformers, addonInformers, clusterInformers, dynamicInformers) if err != nil { return err } From c7ba5b97f75681d05c756ddb5344282dd8a68155 Mon Sep 17 00:00:00 2001 From: morvencao Date: Fri, 19 Apr 2024 05:38:49 +0000 Subject: [PATCH 6/6] add e2e test back. Signed-off-by: morvencao --- .github/workflows/go-presubmit.yml | 31 +++++++ Makefile | 17 ++++ .../addon/helloworld_cloudevents/config.yaml | 5 ++ .../helloworld_cloudevents/kustomization.yaml | 38 ++++++++ .../work_driver_config_patch.yaml | 24 +++++ examples/deploy/ocm-cloudevents/install.sh | 87 +++++++++++++++++++ 6 files changed, 202 insertions(+) create mode 100644 examples/deploy/addon/helloworld_cloudevents/config.yaml create mode 100644 examples/deploy/addon/helloworld_cloudevents/kustomization.yaml create mode 100644 examples/deploy/addon/helloworld_cloudevents/work_driver_config_patch.yaml create mode 100755 examples/deploy/ocm-cloudevents/install.sh diff --git a/.github/workflows/go-presubmit.yml b/.github/workflows/go-presubmit.yml index 823c2be26..e8f130d48 100644 --- a/.github/workflows/go-presubmit.yml +++ b/.github/workflows/go-presubmit.yml @@ -119,6 +119,37 @@ jobs: env: KUBECONFIG: /home/runner/.kube/config + e2e-cloudevents: + name: e2e-cloudevents + runs-on: ubuntu-latest + steps: + - name: checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 1 + - name: install Go + uses: actions/setup-go@v3 + with: + go-version: ${{ env.GO_VERSION }} + - name: install imagebuilder + run: go install github.com/openshift/imagebuilder/cmd/imagebuilder@v1.2.3 + - name: addon-examples-image # will build and tag the examples image + run: imagebuilder --allow-pull -t quay.io/open-cluster-management/addon-examples:latest -t quay.io/ocm/addon-examples:latest -f ./build/Dockerfile.example . + - name: setup kind + uses: engineerd/setup-kind@v0.5.0 + with: + version: v0.11.1 + name: cluster1 + - name: Load image on the nodes of the cluster + run: | + kind load docker-image --name=cluster1 quay.io/open-cluster-management/addon-examples:latest + kind load docker-image --name=cluster1 quay.io/ocm/addon-examples:latest + - name: Run e2e test with cloudevents + run: | + make test-e2e-cloudevents + env: + KUBECONFIG: /home/runner/.kube/config + e2e-hosted: name: e2e-hosted runs-on: ubuntu-latest diff --git a/Makefile b/Makefile index 415d75d87..3abb8cf3a 100644 --- a/Makefile +++ b/Makefile @@ -56,6 +56,9 @@ verify: verify-gocilint deploy-ocm: examples/deploy/ocm/install.sh +deploy-ocm-cloudevents: + examples/deploy/ocm-cloudevents/install.sh + deploy-hosted-ocm: examples/deploy/hosted-ocm/install.sh @@ -71,6 +74,14 @@ deploy-helloworld: ensure-kustomize $(KUSTOMIZE) build examples/deploy/addon/helloworld | $(KUBECTL) apply -f - mv examples/deploy/addon/helloworld/kustomization.yaml.tmp examples/deploy/addon/helloworld/kustomization.yaml +deploy-helloworld-cloudevents: ensure-kustomize + cp examples/deploy/addon/helloworld_cloudevents/kustomization.yaml examples/deploy/addon/helloworld_cloudevents/kustomization.yaml.tmp + cp -r examples/deploy/addon/helloworld/resources examples/deploy/addon/helloworld_cloudevents/ + cd examples/deploy/addon/helloworld_cloudevents && ../../../../$(KUSTOMIZE) edit set image quay.io/open-cluster-management/addon-examples=$(EXAMPLE_IMAGE_NAME) + $(KUSTOMIZE) build examples/deploy/addon/helloworld_cloudevents | $(KUBECTL) apply -f - + mv examples/deploy/addon/helloworld_cloudevents/kustomization.yaml.tmp examples/deploy/addon/helloworld_cloudevents/kustomization.yaml + rm -rf examples/deploy/addon/helloworld_cloudevents/resources + deploy-helloworld-helm: ensure-kustomize cp examples/deploy/addon/helloworld-helm/kustomization.yaml examples/deploy/addon/helloworld-helm/kustomization.yaml.tmp cd examples/deploy/addon/helloworld-helm && ../../../../$(KUSTOMIZE) edit set image quay.io/open-cluster-management/addon-examples=$(EXAMPLE_IMAGE_NAME) @@ -111,6 +122,9 @@ undeploy-busybox: ensure-kustomize undeploy-helloworld: ensure-kustomize $(KUSTOMIZE) build examples/deploy/addon/helloworld | $(KUBECTL) delete --ignore-not-found -f - +undeploy-helloworld-cloudevents: ensure-kustomize + $(KUSTOMIZE) build examples/deploy/addon/helloworld-cloudevents | $(KUBECTL) delete --ignore-not-found -f - + undeploy-helloworld-helm: ensure-kustomize $(KUSTOMIZE) build examples/deploy/addon/helloworld-helm | $(KUBECTL) delete --ignore-not-found -f - @@ -129,6 +143,9 @@ build-e2e: test-e2e: build-e2e deploy-ocm deploy-helloworld deploy-helloworld-helm ./e2e.test -test.v -ginkgo.v +test-e2e-cloudevents: build-e2e deploy-ocm-cloudevents deploy-helloworld-cloudevents + ./e2e.test -test.v -ginkgo.v -ginkgo.focus="install/uninstall helloworld addons" + build-hosted-e2e: go test -c ./test/e2ehosted diff --git a/examples/deploy/addon/helloworld_cloudevents/config.yaml b/examples/deploy/addon/helloworld_cloudevents/config.yaml new file mode 100644 index 000000000..6b4dbf63a --- /dev/null +++ b/examples/deploy/addon/helloworld_cloudevents/config.yaml @@ -0,0 +1,5 @@ +brokerHost: mosquitto.mqtt:1883 +topics: + sourceEvents: sources/addon-manager/clusters/+/sourceevents + agentEvents: sources/addon-manager/clusters/+/agentevents + sourceBroadcast: sources/addon-manager/sourcebroadcast diff --git a/examples/deploy/addon/helloworld_cloudevents/kustomization.yaml b/examples/deploy/addon/helloworld_cloudevents/kustomization.yaml new file mode 100644 index 000000000..2ae31eda9 --- /dev/null +++ b/examples/deploy/addon/helloworld_cloudevents/kustomization.yaml @@ -0,0 +1,38 @@ +namespace: open-cluster-management + +resources: +- resources/cluster_role.yaml +- resources/cluster_role_binding.yaml +- resources/service_account.yaml +- resources/managed_clusterset_binding.yaml +- resources/placement.yaml +- resources/addon_deployment_config.yaml +- resources/helloworld_clustermanagementaddon.yaml +- resources/helloworld_controller.yaml + +images: +- name: quay.io/open-cluster-management/addon-examples + newName: quay.io/open-cluster-management/addon-examples + newTag: latest +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +vars: +- fieldref: + fieldPath: spec.template.spec.containers.[name=helloworld-controller].image + name: EXAMPLE_IMAGE_NAME + objref: + apiVersion: apps/v1 + kind: Deployment + name: helloworld-controller + +secretGenerator: +- files: + - config.yaml + name: work-driver-config + +generatorOptions: + disableNameSuffixHash: true + +patches: +- path: work_driver_config_patch.yaml diff --git a/examples/deploy/addon/helloworld_cloudevents/work_driver_config_patch.yaml b/examples/deploy/addon/helloworld_cloudevents/work_driver_config_patch.yaml new file mode 100644 index 000000000..ca5054876 --- /dev/null +++ b/examples/deploy/addon/helloworld_cloudevents/work_driver_config_patch.yaml @@ -0,0 +1,24 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: helloworld-controller +spec: + template: + spec: + containers: + - name: helloworld-controller + args: + - "/helloworld" + - "controller" + - "--work-driver=mqtt" + - "--work-driver-config=/var/run/secrets/hub/config.yaml" + - "--cloudevents-client-id=addon-manager-$(POD_NAME)" + - "--source-id=addon-manager" + volumeMounts: + - mountPath: /var/run/secrets/hub + name: workdriverconfig + readOnly: true + volumes: + - name: workdriverconfig + secret: + secretName: work-driver-config diff --git a/examples/deploy/ocm-cloudevents/install.sh b/examples/deploy/ocm-cloudevents/install.sh new file mode 100755 index 000000000..23f492201 --- /dev/null +++ b/examples/deploy/ocm-cloudevents/install.sh @@ -0,0 +1,87 @@ +#!/bin/bash + +set -o nounset +set -o pipefail + +KUBECTL=${KUBECTL:-kubectl} + +BUILD_DIR="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +DEPLOY_DIR="$(dirname "$BUILD_DIR")" +EXAMPLE_DIR="$(dirname "$DEPLOY_DIR")" +REPO_DIR="$(dirname "$EXAMPLE_DIR")" +WORK_DIR="${REPO_DIR}/_output" +CLUSTERADM="${WORK_DIR}/bin/clusteradm" + +export PATH=$PATH:${WORK_DIR}/bin + +echo "############ Download clusteradm" +mkdir -p "${WORK_DIR}/bin" +wget -qO- https://github.com/open-cluster-management-io/clusteradm/releases/latest/download/clusteradm_${GOHOSTOS}_${GOHOSTARCH}.tar.gz | sudo tar -xvz -C ${WORK_DIR}/bin/ +chmod +x "${CLUSTERADM}" + +echo "############ Init hub" +${CLUSTERADM} init --wait --bundle-version=latest +joincmd=$(${CLUSTERADM} get token | grep clusteradm) + +echo "############ Init agent as cluster1" +$(echo ${joincmd} --force-internal-endpoint-lookup --wait --bundle-version=latest | sed "s//${MANAGED_CLUSTER_NAME}/g") + +echo "############ Accept join of cluster1" +${CLUSTERADM} accept --clusters ${MANAGED_CLUSTER_NAME} --wait + +echo "############ All-in-one env is installed successfully!!" + +echo "############ Deploy mqtt broker" +${KUBECTL} apply -f ${DEPLOY_DIR}/mqtt/mqtt-broker.yaml + +echo "############ Configure the work-agent" +${KUBECTL} -n open-cluster-management scale --replicas=0 deployment/klusterlet + +cat << EOF | ${KUBECTL} -n open-cluster-management-agent apply -f - +apiVersion: v1 +kind: Secret +metadata: + name: work-driver-config +stringData: + config.yaml: | + brokerHost: mosquitto.mqtt:1883 + topics: + sourceEvents: sources/addon-manager/clusters/${MANAGED_CLUSTER_NAME}/sourceevents + agentEvents: sources/addon-manager/clusters/${MANAGED_CLUSTER_NAME}/agentevents + agentBroadcast: clusters/${MANAGED_CLUSTER_NAME}/agentbroadcast +EOF + +# patch klusterlet-work-agent deployment to use mqtt as workload source driver +${KUBECTL} -n open-cluster-management-agent patch deployment/klusterlet-work-agent --type=json \ + -p='[ + {"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--cloudevents-client-codecs=manifestbundle"}, + {"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--cloudevents-client-id=work-agent-$(POD_NAME)"}, + {"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--workload-source-driver=mqtt"}, + {"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--workload-source-config=/var/run/secrets/hub/config.yaml"} + ]' + +${KUBECTL} -n open-cluster-management-agent patch deployment/klusterlet-work-agent --type=json \ + -p='[{"op": "add", "path": "/spec/template/spec/volumes/-", "value": {"name": "workdriverconfig","secret": {"secretName": "work-driver-config"}}}]' + +${KUBECTL} -n open-cluster-management-agent patch deployment/klusterlet-work-agent --type=json \ + -p='[{"op": "add", "path": "/spec/template/spec/containers/0/volumeMounts/-", "value": {"name": "workdriverconfig","mountPath": "/var/run/secrets/hub"}}]' + +${KUBECTL} -n open-cluster-management-agent scale --replicas=1 deployment/klusterlet-work-agent +${KUBECTL} -n open-cluster-management-agent rollout status deployment/klusterlet-work-agent --timeout=120s +${KUBECTL} -n open-cluster-management-agent get pod -l app=klusterlet-manifestwork-agent + +# TODO: add live probe for the work-agent to check if it is connected to the mqtt broker +isRunning=false +for i in {1..20}; do + if ${KUBECTL} -n open-cluster-management-agent logs deployment/klusterlet-work-agent | grep "subscribing to topics"; then + echo "klusterlet-work-agent is subscribing to topics from mqtt broker" + isRunning=true + break + fi + sleep 12 +done + +if [ "$isRunning" = false ]; then + echo "timeout waiting for klusterlet-work-agent to subscribe to topics from mqtt broker" + exit 1 +fi