From dcdf72322183862e7e1dfa04a8748410cfbadd89 Mon Sep 17 00:00:00 2001 From: Wei Liu Date: Tue, 28 Nov 2023 12:12:10 +0800 Subject: [PATCH] supporting cloudevents for work agent Signed-off-by: Wei Liu --- .github/workflows/cloudevents-integration.yml | 31 + .gitignore | 1 + deps.diff | 0 go.mod | 17 +- go.sum | 27 +- ...gement.io_clustermanagementaddons.crd.yaml | 6 +- ...gement.io_manifestworkreplicasets.crd.yaml | 6 +- .../klusterlet-agent-deployment.yaml | 2 + .../klusterlet-work-deployment.yaml | 3 +- .../klusterlet_controller_test.go | 3 +- pkg/work/spoke/options.go | 18 +- pkg/work/spoke/spokeagent.go | 135 +- test/integration-test.mk | 5 + .../cloudevents/deleteoption_test.go | 201 + test/integration/cloudevents/source/codec.go | 217 + .../integration/cloudevents/source/handler.go | 77 + test/integration/cloudevents/source/lister.go | 17 + .../cloudevents/source/manifestwork.go | 184 + test/integration/cloudevents/source/source.go | 126 + .../cloudevents/source/workclientset.go | 46 + .../cloudevents/statusfeedback_test.go | 424 ++ test/integration/cloudevents/suite_test.go | 131 + .../cloudevents/updatestrategy_test.go | 441 ++ test/integration/cloudevents/work_test.go | 165 + test/integration/operator/klusterlet_test.go | 2 +- test/integration/util/assertion.go | 9 +- test/integration/work/deleteoption_test.go | 13 +- test/integration/work/executor_test.go | 4 +- .../work/manifestworkreplicaset_test.go | 6 +- test/integration/work/statusfeedback_test.go | 29 +- test/integration/work/suite_test.go | 24 +- .../work/unmanaged_appliedwork_test.go | 9 +- test/integration/work/updatestrategy_test.go | 7 +- test/integration/work/work_test.go | 12 +- .../sdk-go/protocol/mqtt_paho/v2/LICENSE | 201 + .../sdk-go/protocol/mqtt_paho/v2/message.go | 119 + .../sdk-go/protocol/mqtt_paho/v2/option.go | 48 + .../sdk-go/protocol/mqtt_paho/v2/protocol.go | 155 + .../protocol/mqtt_paho/v2/write_message.go | 133 + .../github.com/cloudevents/sdk-go/v2/LICENSE | 201 + .../github.com/cloudevents/sdk-go/v2/alias.go | 187 + .../sdk-go/v2/binding/binary_writer.go | 52 + .../cloudevents/sdk-go/v2/binding/doc.go | 68 + .../cloudevents/sdk-go/v2/binding/encoding.go | 50 + .../sdk-go/v2/binding/event_message.go | 108 + .../sdk-go/v2/binding/finish_message.go | 42 + .../sdk-go/v2/binding/format/doc.go | 12 + .../sdk-go/v2/binding/format/format.go | 105 + .../cloudevents/sdk-go/v2/binding/message.go | 153 + .../sdk-go/v2/binding/spec/attributes.go | 141 + .../cloudevents/sdk-go/v2/binding/spec/doc.go | 13 + .../v2/binding/spec/match_exact_version.go | 81 + .../sdk-go/v2/binding/spec/spec.go | 189 + .../sdk-go/v2/binding/structured_writer.go | 22 + .../cloudevents/sdk-go/v2/binding/to_event.go | 153 + .../sdk-go/v2/binding/transformer.go | 42 + .../cloudevents/sdk-go/v2/binding/write.go | 179 + .../cloudevents/sdk-go/v2/client/client.go | 288 ++ .../sdk-go/v2/client/client_http.go | 35 + .../sdk-go/v2/client/client_observed.go | 12 + .../sdk-go/v2/client/defaulters.go | 57 + .../cloudevents/sdk-go/v2/client/doc.go | 11 + .../sdk-go/v2/client/http_receiver.go | 45 + .../cloudevents/sdk-go/v2/client/invoker.go | 137 + .../sdk-go/v2/client/observability.go | 54 + .../cloudevents/sdk-go/v2/client/options.go | 128 + .../cloudevents/sdk-go/v2/client/receiver.go | 194 + .../cloudevents/sdk-go/v2/context/context.go | 110 + .../sdk-go/v2/context/delegating.go | 25 + .../cloudevents/sdk-go/v2/context/doc.go | 10 + .../cloudevents/sdk-go/v2/context/logger.go | 48 + .../cloudevents/sdk-go/v2/context/retry.go | 76 + .../sdk-go/v2/event/content_type.go | 47 + .../sdk-go/v2/event/data_content_encoding.go | 16 + .../sdk-go/v2/event/datacodec/codec.go | 78 + .../sdk-go/v2/event/datacodec/doc.go | 10 + .../sdk-go/v2/event/datacodec/json/data.go | 56 + .../sdk-go/v2/event/datacodec/json/doc.go | 9 + .../sdk-go/v2/event/datacodec/text/data.go | 30 + .../sdk-go/v2/event/datacodec/text/doc.go | 9 + .../sdk-go/v2/event/datacodec/xml/data.go | 40 + .../sdk-go/v2/event/datacodec/xml/doc.go | 9 + .../cloudevents/sdk-go/v2/event/doc.go | 9 + .../cloudevents/sdk-go/v2/event/event.go | 126 + .../cloudevents/sdk-go/v2/event/event_data.go | 118 + .../sdk-go/v2/event/event_interface.go | 102 + .../sdk-go/v2/event/event_marshal.go | 203 + .../sdk-go/v2/event/event_reader.go | 103 + .../sdk-go/v2/event/event_unmarshal.go | 480 ++ .../sdk-go/v2/event/event_validation.go | 50 + .../sdk-go/v2/event/event_writer.go | 117 + .../sdk-go/v2/event/eventcontext.go | 125 + .../sdk-go/v2/event/eventcontext_v03.go | 329 ++ .../v2/event/eventcontext_v03_reader.go | 99 + .../v2/event/eventcontext_v03_writer.go | 103 + .../sdk-go/v2/event/eventcontext_v1.go | 315 ++ .../sdk-go/v2/event/eventcontext_v1_reader.go | 104 + .../sdk-go/v2/event/eventcontext_v1_writer.go | 97 + .../cloudevents/sdk-go/v2/event/extensions.go | 57 + .../cloudevents/sdk-go/v2/protocol/doc.go | 26 + .../cloudevents/sdk-go/v2/protocol/error.go | 42 + .../v2/protocol/http/abuse_protection.go | 128 + .../sdk-go/v2/protocol/http/context.go | 48 + .../sdk-go/v2/protocol/http/doc.go | 9 + .../sdk-go/v2/protocol/http/headers.go | 55 + .../sdk-go/v2/protocol/http/message.go | 175 + .../sdk-go/v2/protocol/http/options.go | 301 ++ .../sdk-go/v2/protocol/http/protocol.go | 408 ++ .../v2/protocol/http/protocol_lifecycle.go | 143 + .../sdk-go/v2/protocol/http/protocol_rate.go | 34 + .../sdk-go/v2/protocol/http/protocol_retry.go | 145 + .../sdk-go/v2/protocol/http/result.go | 60 + .../sdk-go/v2/protocol/http/retries_result.go | 59 + .../sdk-go/v2/protocol/http/utility.go | 89 + .../sdk-go/v2/protocol/http/write_request.go | 141 + .../v2/protocol/http/write_responsewriter.go | 126 + .../cloudevents/sdk-go/v2/protocol/inbound.go | 54 + .../sdk-go/v2/protocol/lifecycle.go | 23 + .../sdk-go/v2/protocol/outbound.go | 49 + .../cloudevents/sdk-go/v2/protocol/result.go | 127 + .../cloudevents/sdk-go/v2/staticcheck.conf | 3 + .../cloudevents/sdk-go/v2/types/allocate.go | 41 + .../cloudevents/sdk-go/v2/types/doc.go | 46 + .../cloudevents/sdk-go/v2/types/timestamp.go | 75 + .../cloudevents/sdk-go/v2/types/uri.go | 86 + .../cloudevents/sdk-go/v2/types/uriref.go | 82 + .../cloudevents/sdk-go/v2/types/value.go | 335 ++ vendor/github.com/eclipse/paho.golang/LICENSE | 277 ++ .../eclipse/paho.golang/packets/auth.go | 77 + .../eclipse/paho.golang/packets/connack.go | 145 + .../eclipse/paho.golang/packets/connect.go | 189 + .../eclipse/paho.golang/packets/disconnect.go | 152 + .../eclipse/paho.golang/packets/packets.go | 447 ++ .../eclipse/paho.golang/packets/pingreq.go | 34 + .../eclipse/paho.golang/packets/pingresp.go | 34 + .../eclipse/paho.golang/packets/properties.go | 804 ++++ .../eclipse/paho.golang/packets/puback.go | 115 + .../eclipse/paho.golang/packets/pubcomp.go | 95 + .../eclipse/paho.golang/packets/publish.go | 80 + .../eclipse/paho.golang/packets/pubrec.go | 117 + .../eclipse/paho.golang/packets/pubrel.go | 77 + .../eclipse/paho.golang/packets/suback.go | 103 + .../eclipse/paho.golang/packets/subscribe.go | 116 + .../eclipse/paho.golang/packets/unsuback.go | 88 + .../paho.golang/packets/unsubscribe.go | 67 + .../eclipse/paho.golang/paho/acks_tracker.go | 79 + .../eclipse/paho.golang/paho/auth.go | 8 + .../eclipse/paho.golang/paho/client.go | 923 ++++ .../eclipse/paho.golang/paho/cp_auth.go | 92 + .../eclipse/paho.golang/paho/cp_connack.go | 84 + .../eclipse/paho.golang/paho/cp_connect.go | 180 + .../eclipse/paho.golang/paho/cp_disconnect.go | 58 + .../eclipse/paho.golang/paho/cp_publish.go | 123 + .../eclipse/paho.golang/paho/cp_pubresp.go | 55 + .../eclipse/paho.golang/paho/cp_suback.go | 41 + .../eclipse/paho.golang/paho/cp_subscribe.go | 67 + .../eclipse/paho.golang/paho/cp_unsuback.go | 41 + .../paho.golang/paho/cp_unsubscribe.go | 31 + .../eclipse/paho.golang/paho/cp_utils.go | 100 + .../eclipse/paho.golang/paho/message_ids.go | 93 + .../paho.golang/paho/noop_persistence.go | 23 + .../eclipse/paho.golang/paho/persistence.go | 98 + .../eclipse/paho.golang/paho/pinger.go | 122 + .../eclipse/paho.golang/paho/router.go | 212 + .../eclipse/paho.golang/paho/trace.go | 22 + .../github.com/gorilla/websocket/.gitignore | 25 + vendor/github.com/gorilla/websocket/AUTHORS | 9 + vendor/github.com/gorilla/websocket/LICENSE | 22 + vendor/github.com/gorilla/websocket/README.md | 39 + vendor/github.com/gorilla/websocket/client.go | 422 ++ .../gorilla/websocket/compression.go | 148 + vendor/github.com/gorilla/websocket/conn.go | 1230 +++++ vendor/github.com/gorilla/websocket/doc.go | 227 + vendor/github.com/gorilla/websocket/join.go | 42 + vendor/github.com/gorilla/websocket/json.go | 60 + vendor/github.com/gorilla/websocket/mask.go | 55 + .../github.com/gorilla/websocket/mask_safe.go | 16 + .../github.com/gorilla/websocket/prepared.go | 102 + vendor/github.com/gorilla/websocket/proxy.go | 77 + vendor/github.com/gorilla/websocket/server.go | 365 ++ .../gorilla/websocket/tls_handshake.go | 21 + .../gorilla/websocket/tls_handshake_116.go | 21 + vendor/github.com/gorilla/websocket/util.go | 283 ++ .../gorilla/websocket/x_net_proxy.go | 473 ++ vendor/github.com/mattn/go-colorable/LICENSE | 21 + .../github.com/mattn/go-colorable/README.md | 48 + .../mattn/go-colorable/colorable_appengine.go | 38 + .../mattn/go-colorable/colorable_others.go | 38 + .../mattn/go-colorable/colorable_windows.go | 1047 +++++ .../github.com/mattn/go-colorable/go.test.sh | 12 + .../mattn/go-colorable/noncolorable.go | 57 + vendor/github.com/mattn/go-isatty/LICENSE | 9 + vendor/github.com/mattn/go-isatty/README.md | 50 + vendor/github.com/mattn/go-isatty/doc.go | 2 + vendor/github.com/mattn/go-isatty/go.test.sh | 12 + .../github.com/mattn/go-isatty/isatty_bsd.go | 19 + .../mattn/go-isatty/isatty_others.go | 16 + .../mattn/go-isatty/isatty_plan9.go | 23 + .../mattn/go-isatty/isatty_solaris.go | 21 + .../mattn/go-isatty/isatty_tcgets.go | 19 + .../mattn/go-isatty/isatty_windows.go | 125 + .../mochi-mqtt/server/v2/.gitignore | 4 + .../mochi-mqtt/server/v2/.golangci.yml | 103 + .../mochi-mqtt/server/v2/Dockerfile | 31 + .../mochi-mqtt/server/v2/LICENSE.md | 23 + .../github.com/mochi-mqtt/server/v2/README.md | 424 ++ .../mochi-mqtt/server/v2/clients.go | 574 +++ .../github.com/mochi-mqtt/server/v2/hooks.go | 846 ++++ .../server/v2/hooks/auth/allow_all.go | 41 + .../mochi-mqtt/server/v2/hooks/auth/auth.go | 107 + .../mochi-mqtt/server/v2/hooks/auth/ledger.go | 246 + .../server/v2/hooks/storage/storage.go | 194 + .../mochi-mqtt/server/v2/inflight.go | 156 + .../server/v2/listeners/http_healthcheck.go | 104 + .../server/v2/listeners/http_sysinfo.go | 118 + .../server/v2/listeners/listeners.go | 135 + .../mochi-mqtt/server/v2/listeners/mock.go | 103 + .../mochi-mqtt/server/v2/listeners/net.go | 92 + .../mochi-mqtt/server/v2/listeners/tcp.go | 108 + .../server/v2/listeners/unixsock.go | 98 + .../server/v2/listeners/websocket.go | 194 + .../mochi-mqtt/server/v2/packets/codec.go | 172 + .../mochi-mqtt/server/v2/packets/codes.go | 148 + .../server/v2/packets/fixedheader.go | 63 + .../mochi-mqtt/server/v2/packets/packets.go | 1148 +++++ .../server/v2/packets/properties.go | 477 ++ .../mochi-mqtt/server/v2/packets/tpackets.go | 3939 +++++++++++++++++ .../github.com/mochi-mqtt/server/v2/server.go | 1533 +++++++ .../mochi-mqtt/server/v2/system/system.go | 61 + .../github.com/mochi-mqtt/server/v2/topics.go | 707 +++ vendor/github.com/rs/xid/.appveyor.yml | 27 + vendor/github.com/rs/xid/.travis.yml | 8 + vendor/github.com/rs/xid/LICENSE | 19 + vendor/github.com/rs/xid/README.md | 116 + vendor/github.com/rs/xid/error.go | 11 + vendor/github.com/rs/xid/hostid_darwin.go | 9 + vendor/github.com/rs/xid/hostid_fallback.go | 9 + vendor/github.com/rs/xid/hostid_freebsd.go | 9 + vendor/github.com/rs/xid/hostid_linux.go | 13 + vendor/github.com/rs/xid/hostid_windows.go | 38 + vendor/github.com/rs/xid/id.go | 392 ++ vendor/github.com/rs/zerolog/.gitignore | 25 + vendor/github.com/rs/zerolog/CNAME | 1 + vendor/github.com/rs/zerolog/LICENSE | 21 + vendor/github.com/rs/zerolog/README.md | 716 +++ vendor/github.com/rs/zerolog/_config.yml | 1 + vendor/github.com/rs/zerolog/array.go | 240 + vendor/github.com/rs/zerolog/console.go | 446 ++ vendor/github.com/rs/zerolog/context.go | 433 ++ vendor/github.com/rs/zerolog/ctx.go | 51 + vendor/github.com/rs/zerolog/encoder.go | 56 + vendor/github.com/rs/zerolog/encoder_cbor.go | 42 + vendor/github.com/rs/zerolog/encoder_json.go | 39 + vendor/github.com/rs/zerolog/event.go | 780 ++++ vendor/github.com/rs/zerolog/fields.go | 277 ++ vendor/github.com/rs/zerolog/globals.go | 142 + vendor/github.com/rs/zerolog/go112.go | 7 + vendor/github.com/rs/zerolog/hook.go | 64 + .../rs/zerolog/internal/cbor/README.md | 56 + .../rs/zerolog/internal/cbor/base.go | 19 + .../rs/zerolog/internal/cbor/cbor.go | 101 + .../rs/zerolog/internal/cbor/decode_stream.go | 614 +++ .../rs/zerolog/internal/cbor/string.go | 95 + .../rs/zerolog/internal/cbor/time.go | 93 + .../rs/zerolog/internal/cbor/types.go | 477 ++ .../rs/zerolog/internal/json/base.go | 19 + .../rs/zerolog/internal/json/bytes.go | 85 + .../rs/zerolog/internal/json/string.go | 149 + .../rs/zerolog/internal/json/time.go | 113 + .../rs/zerolog/internal/json/types.go | 405 ++ vendor/github.com/rs/zerolog/log.go | 476 ++ vendor/github.com/rs/zerolog/not_go112.go | 5 + vendor/github.com/rs/zerolog/pretty.png | Bin 0 -> 84064 bytes vendor/github.com/rs/zerolog/sampler.go | 134 + vendor/github.com/rs/zerolog/syslog.go | 80 + vendor/github.com/rs/zerolog/writer.go | 154 + .../golang.org/x/sync/semaphore/semaphore.go | 136 + vendor/modules.txt | 66 +- ...gement.io_clustermanagementaddons.crd.yaml | 6 +- .../api/cloudevents/generic/agentclient.go | 300 ++ .../api/cloudevents/generic/baseclient.go | 209 + .../api/cloudevents/generic/interface.go | 65 + .../generic/options/mqtt/agentoptions.go | 87 + .../generic/options/mqtt/options.go | 234 + .../generic/options/mqtt/sourceoptions.go | 83 + .../cloudevents/generic/options/options.go | 66 + .../cloudevents/generic/payload/payload.go | 48 + .../api/cloudevents/generic/ratelimiter.go | 34 + .../api/cloudevents/generic/sourceclient.go | 309 ++ .../api/cloudevents/generic/types/types.go | 227 + .../work/agent/client/manifestwork.go | 171 + .../cloudevents/work/agent/codec/manifest.go | 184 + .../work/agent/codec/manifestbundle.go | 137 + .../work/agent/handler/resourcehandler.go | 76 + .../api/cloudevents/work/clientbuilder.go | 152 + .../cloudevents/work/internal/clientset.go | 50 + .../api/cloudevents/work/lister.go | 19 + .../api/cloudevents/work/payload/mainfiest.go | 54 + .../work/payload/manifestbundle.go | 43 + .../api/cloudevents/work/statushash.go | 18 + .../api/cloudevents/work/utils/utils.go | 47 + .../api/cloudevents/work/watcher/watcher.go | 64 + .../cluster/v1alpha1/types_rolloutstrategy.go | 2 +- .../api/utils/work/v1/utils/utils.go | 76 + .../utils/work/v1/workvalidator/validator.go | 64 + ...gement.io_manifestworkreplicasets.crd.yaml | 6 +- 306 files changed, 44427 insertions(+), 118 deletions(-) create mode 100644 .github/workflows/cloudevents-integration.yml delete mode 100644 deps.diff create mode 100644 test/integration/cloudevents/deleteoption_test.go create mode 100644 test/integration/cloudevents/source/codec.go create mode 100644 test/integration/cloudevents/source/handler.go create mode 100644 test/integration/cloudevents/source/lister.go create mode 100644 test/integration/cloudevents/source/manifestwork.go create mode 100644 test/integration/cloudevents/source/source.go create mode 100644 test/integration/cloudevents/source/workclientset.go create mode 100644 test/integration/cloudevents/statusfeedback_test.go create mode 100644 test/integration/cloudevents/suite_test.go create mode 100644 test/integration/cloudevents/updatestrategy_test.go create mode 100644 test/integration/cloudevents/work_test.go create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/LICENSE create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/option.go create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/protocol.go create mode 100644 vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/write_message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/LICENSE create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/alias.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/binding/write.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/client.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/observability.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/options.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/context/context.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/context/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/context/logger.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/context/retry.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/staticcheck.conf create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/doc.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/uri.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go create mode 100644 vendor/github.com/cloudevents/sdk-go/v2/types/value.go create mode 100644 vendor/github.com/eclipse/paho.golang/LICENSE create mode 100644 vendor/github.com/eclipse/paho.golang/packets/auth.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/connack.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/connect.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/disconnect.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/packets.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/pingreq.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/pingresp.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/properties.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/puback.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/pubcomp.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/publish.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/pubrec.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/pubrel.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/suback.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/subscribe.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/unsuback.go create mode 100644 vendor/github.com/eclipse/paho.golang/packets/unsubscribe.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/acks_tracker.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/auth.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/client.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_auth.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_connack.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_connect.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_disconnect.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_publish.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_pubresp.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_suback.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_subscribe.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_unsuback.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_unsubscribe.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/cp_utils.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/message_ids.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/noop_persistence.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/persistence.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/pinger.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/router.go create mode 100644 vendor/github.com/eclipse/paho.golang/paho/trace.go create mode 100644 vendor/github.com/gorilla/websocket/.gitignore create mode 100644 vendor/github.com/gorilla/websocket/AUTHORS create mode 100644 vendor/github.com/gorilla/websocket/LICENSE create mode 100644 vendor/github.com/gorilla/websocket/README.md create mode 100644 vendor/github.com/gorilla/websocket/client.go create mode 100644 vendor/github.com/gorilla/websocket/compression.go create mode 100644 vendor/github.com/gorilla/websocket/conn.go create mode 100644 vendor/github.com/gorilla/websocket/doc.go create mode 100644 vendor/github.com/gorilla/websocket/join.go create mode 100644 vendor/github.com/gorilla/websocket/json.go create mode 100644 vendor/github.com/gorilla/websocket/mask.go create mode 100644 vendor/github.com/gorilla/websocket/mask_safe.go create mode 100644 vendor/github.com/gorilla/websocket/prepared.go create mode 100644 vendor/github.com/gorilla/websocket/proxy.go create mode 100644 vendor/github.com/gorilla/websocket/server.go create mode 100644 vendor/github.com/gorilla/websocket/tls_handshake.go create mode 100644 vendor/github.com/gorilla/websocket/tls_handshake_116.go create mode 100644 vendor/github.com/gorilla/websocket/util.go create mode 100644 vendor/github.com/gorilla/websocket/x_net_proxy.go create mode 100644 vendor/github.com/mattn/go-colorable/LICENSE create mode 100644 vendor/github.com/mattn/go-colorable/README.md create mode 100644 vendor/github.com/mattn/go-colorable/colorable_appengine.go create mode 100644 vendor/github.com/mattn/go-colorable/colorable_others.go create mode 100644 vendor/github.com/mattn/go-colorable/colorable_windows.go create mode 100644 vendor/github.com/mattn/go-colorable/go.test.sh create mode 100644 vendor/github.com/mattn/go-colorable/noncolorable.go create mode 100644 vendor/github.com/mattn/go-isatty/LICENSE create mode 100644 vendor/github.com/mattn/go-isatty/README.md create mode 100644 vendor/github.com/mattn/go-isatty/doc.go create mode 100644 vendor/github.com/mattn/go-isatty/go.test.sh create mode 100644 vendor/github.com/mattn/go-isatty/isatty_bsd.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_others.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_plan9.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_solaris.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_tcgets.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_windows.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/.gitignore create mode 100644 vendor/github.com/mochi-mqtt/server/v2/.golangci.yml create mode 100644 vendor/github.com/mochi-mqtt/server/v2/Dockerfile create mode 100644 vendor/github.com/mochi-mqtt/server/v2/LICENSE.md create mode 100644 vendor/github.com/mochi-mqtt/server/v2/README.md create mode 100644 vendor/github.com/mochi-mqtt/server/v2/clients.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/hooks.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/hooks/auth/allow_all.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/hooks/auth/auth.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/hooks/auth/ledger.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/hooks/storage/storage.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/inflight.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/http_healthcheck.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/http_sysinfo.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/listeners.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/mock.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/net.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/tcp.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/unixsock.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/websocket.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/packets/codec.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/packets/codes.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/packets/fixedheader.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/packets/packets.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/packets/properties.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/packets/tpackets.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/server.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/system/system.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/topics.go create mode 100644 vendor/github.com/rs/xid/.appveyor.yml create mode 100644 vendor/github.com/rs/xid/.travis.yml create mode 100644 vendor/github.com/rs/xid/LICENSE create mode 100644 vendor/github.com/rs/xid/README.md create mode 100644 vendor/github.com/rs/xid/error.go create mode 100644 vendor/github.com/rs/xid/hostid_darwin.go create mode 100644 vendor/github.com/rs/xid/hostid_fallback.go create mode 100644 vendor/github.com/rs/xid/hostid_freebsd.go create mode 100644 vendor/github.com/rs/xid/hostid_linux.go create mode 100644 vendor/github.com/rs/xid/hostid_windows.go create mode 100644 vendor/github.com/rs/xid/id.go create mode 100644 vendor/github.com/rs/zerolog/.gitignore create mode 100644 vendor/github.com/rs/zerolog/CNAME create mode 100644 vendor/github.com/rs/zerolog/LICENSE create mode 100644 vendor/github.com/rs/zerolog/README.md create mode 100644 vendor/github.com/rs/zerolog/_config.yml create mode 100644 vendor/github.com/rs/zerolog/array.go create mode 100644 vendor/github.com/rs/zerolog/console.go create mode 100644 vendor/github.com/rs/zerolog/context.go create mode 100644 vendor/github.com/rs/zerolog/ctx.go create mode 100644 vendor/github.com/rs/zerolog/encoder.go create mode 100644 vendor/github.com/rs/zerolog/encoder_cbor.go create mode 100644 vendor/github.com/rs/zerolog/encoder_json.go create mode 100644 vendor/github.com/rs/zerolog/event.go create mode 100644 vendor/github.com/rs/zerolog/fields.go create mode 100644 vendor/github.com/rs/zerolog/globals.go create mode 100644 vendor/github.com/rs/zerolog/go112.go create mode 100644 vendor/github.com/rs/zerolog/hook.go create mode 100644 vendor/github.com/rs/zerolog/internal/cbor/README.md create mode 100644 vendor/github.com/rs/zerolog/internal/cbor/base.go create mode 100644 vendor/github.com/rs/zerolog/internal/cbor/cbor.go create mode 100644 vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go create mode 100644 vendor/github.com/rs/zerolog/internal/cbor/string.go create mode 100644 vendor/github.com/rs/zerolog/internal/cbor/time.go create mode 100644 vendor/github.com/rs/zerolog/internal/cbor/types.go create mode 100644 vendor/github.com/rs/zerolog/internal/json/base.go create mode 100644 vendor/github.com/rs/zerolog/internal/json/bytes.go create mode 100644 vendor/github.com/rs/zerolog/internal/json/string.go create mode 100644 vendor/github.com/rs/zerolog/internal/json/time.go create mode 100644 vendor/github.com/rs/zerolog/internal/json/types.go create mode 100644 vendor/github.com/rs/zerolog/log.go create mode 100644 vendor/github.com/rs/zerolog/not_go112.go create mode 100644 vendor/github.com/rs/zerolog/pretty.png create mode 100644 vendor/github.com/rs/zerolog/sampler.go create mode 100644 vendor/github.com/rs/zerolog/syslog.go create mode 100644 vendor/github.com/rs/zerolog/writer.go create mode 100644 vendor/golang.org/x/sync/semaphore/semaphore.go create mode 100644 vendor/open-cluster-management.io/api/cloudevents/generic/agentclient.go create mode 100644 vendor/open-cluster-management.io/api/cloudevents/generic/baseclient.go create mode 100644 vendor/open-cluster-management.io/api/cloudevents/generic/interface.go create mode 100644 vendor/open-cluster-management.io/api/cloudevents/generic/options/mqtt/agentoptions.go create mode 100644 vendor/open-cluster-management.io/api/cloudevents/generic/options/mqtt/options.go create mode 100644 vendor/open-cluster-management.io/api/cloudevents/generic/options/mqtt/sourceoptions.go create mode 100644 vendor/open-cluster-management.io/api/cloudevents/generic/options/options.go create mode 100644 vendor/open-cluster-management.io/api/cloudevents/generic/payload/payload.go create mode 100644 vendor/open-cluster-management.io/api/cloudevents/generic/ratelimiter.go create mode 100644 vendor/open-cluster-management.io/api/cloudevents/generic/sourceclient.go create mode 100644 vendor/open-cluster-management.io/api/cloudevents/generic/types/types.go create mode 100644 vendor/open-cluster-management.io/api/cloudevents/work/agent/client/manifestwork.go create mode 100644 vendor/open-cluster-management.io/api/cloudevents/work/agent/codec/manifest.go create mode 100644 vendor/open-cluster-management.io/api/cloudevents/work/agent/codec/manifestbundle.go create mode 100644 vendor/open-cluster-management.io/api/cloudevents/work/agent/handler/resourcehandler.go create mode 100644 vendor/open-cluster-management.io/api/cloudevents/work/clientbuilder.go create mode 100644 vendor/open-cluster-management.io/api/cloudevents/work/internal/clientset.go create mode 100644 vendor/open-cluster-management.io/api/cloudevents/work/lister.go create mode 100644 vendor/open-cluster-management.io/api/cloudevents/work/payload/mainfiest.go create mode 100644 vendor/open-cluster-management.io/api/cloudevents/work/payload/manifestbundle.go create mode 100644 vendor/open-cluster-management.io/api/cloudevents/work/statushash.go create mode 100644 vendor/open-cluster-management.io/api/cloudevents/work/utils/utils.go create mode 100644 vendor/open-cluster-management.io/api/cloudevents/work/watcher/watcher.go create mode 100644 vendor/open-cluster-management.io/api/utils/work/v1/utils/utils.go create mode 100644 vendor/open-cluster-management.io/api/utils/work/v1/workvalidator/validator.go diff --git a/.github/workflows/cloudevents-integration.yml b/.github/workflows/cloudevents-integration.yml new file mode 100644 index 000000000..222d6809a --- /dev/null +++ b/.github/workflows/cloudevents-integration.yml @@ -0,0 +1,31 @@ +name: CloudEventsIntegration + +on: + workflow_dispatch: {} + pull_request: + paths: + - 'pkg/work/spoke/*.go' + branches: + - main + - release-* + +env: + GO_VERSION: '1.20' + GO_REQUIRED_MIN_VERSION: '' + +permissions: + contents: read + +jobs: + integration: + name: cloudevents-integration + runs-on: ubuntu-latest + steps: + - name: checkout code + uses: actions/checkout@v3 + - name: install Go + uses: actions/setup-go@v3 + with: + go-version: ${{ env.GO_VERSION }} + - name: integration + run: make test-cloudevents-integration diff --git a/.gitignore b/.gitignore index eea4ddc89..fd95b7207 100644 --- a/.gitignore +++ b/.gitignore @@ -25,6 +25,7 @@ munge-csv *.out _output/ .idea/ +*.diff .kubeconfig .hub-kubeconfig diff --git a/deps.diff b/deps.diff deleted file mode 100644 index e69de29bb..000000000 diff --git a/go.mod b/go.mod index 9133cbf1a..8f566d105 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,13 @@ module open-cluster-management.io/ocm go 1.20 require ( + github.com/cloudevents/sdk-go/v2 v2.14.0 github.com/davecgh/go-spew v1.1.1 github.com/evanphx/json-patch v5.6.0+incompatible + github.com/ghodss/yaml v1.0.0 github.com/google/go-cmp v0.5.9 + github.com/google/uuid v1.3.0 + github.com/mochi-mqtt/server/v2 v2.3.0 github.com/onsi/ginkgo/v2 v2.9.5 github.com/onsi/gomega v1.27.7 github.com/openshift/api v0.0.0-20230911111751-da2f2ca9ae0f @@ -17,6 +21,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.2 github.com/valyala/fasttemplate v1.2.2 + go.uber.org/zap v1.26.0 golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 golang.org/x/net v0.17.0 k8s.io/api v0.28.1 @@ -29,7 +34,7 @@ require ( k8s.io/kube-aggregator v0.28.1 k8s.io/utils v0.0.0-20230726121419-3b25d923346b open-cluster-management.io/addon-framework v0.8.1-0.20231102082339-51742bc299f2 - open-cluster-management.io/api v0.12.1-0.20231124100313-881401342553 + open-cluster-management.io/api v0.12.1-0.20231130134655-97a8a92a7f30 sigs.k8s.io/controller-runtime v0.15.0 sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 ) @@ -46,15 +51,16 @@ require ( github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect + github.com/eclipse/paho.golang v0.11.0 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/fatih/structs v1.1.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/ghodss/yaml v1.0.0 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.2.4 // indirect @@ -70,7 +76,7 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect github.com/huandu/xstrings v1.3.3 // indirect @@ -79,6 +85,8 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -92,6 +100,8 @@ require ( github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect github.com/robfig/cron v1.2.0 // indirect + github.com/rs/xid v1.4.0 // indirect + github.com/rs/zerolog v1.28.0 // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/spf13/cast v1.4.1 // indirect @@ -114,7 +124,6 @@ require ( go.opentelemetry.io/otel/trace v1.10.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.26.0 // indirect golang.org/x/crypto v0.14.0 // indirect golang.org/x/oauth2 v0.8.0 // indirect golang.org/x/sync v0.5.0 // indirect diff --git a/go.sum b/go.sum index 738168798..c468f6dc5 100644 --- a/go.sum +++ b/go.sum @@ -68,6 +68,10 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995 h1:pXyRKZ0T5WoB6X9QnHS5cEyW0Got39bNQIECxGUKVO4= +github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995/go.mod h1:mz9oS2Yhh/S7cvrrsgGMMR+6Shy0ZyL2lDN1sHQO1wE= +github.com/cloudevents/sdk-go/v2 v2.14.0 h1:Nrob4FwVgi5L4tV9lhjzZcjYqFVyJzsA56CwPaPfv6s= +github.com/cloudevents/sdk-go/v2 v2.14.0/go.mod h1:xDmKfzNjM8gBvjaF8ijFjM1VYOVUEeUfapHMUX1T5To= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= @@ -77,6 +81,7 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -87,6 +92,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/eclipse/paho.golang v0.11.0 h1:6Avu5dkkCfcB61/y1vx+XrPQ0oAl4TPYtY0uw3HbQdM= +github.com/eclipse/paho.golang v0.11.0/go.mod h1:rhrV37IEwauUyx8FHrvmXOKo+QRKng5ncoN1vJiJMcs= github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -205,7 +212,9 @@ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= @@ -225,6 +234,7 @@ github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= @@ -243,6 +253,10 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= @@ -251,6 +265,8 @@ github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HK github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mochi-mqtt/server/v2 v2.3.0 h1:vcFb7X7ANH1Qy2yGHMvp86N9VxjoUkZpr5mkIbfMLfw= +github.com/mochi-mqtt/server/v2 v2.3.0/go.mod h1:47GGVR0/5gbM1DzsI0f1yo25jcR1aaUIgj4dzmP5MNY= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -293,6 +309,10 @@ github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfm github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY= +github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.28.0 h1:MirSo27VyNi7RJYP3078AA1+Cyzd2GB66qy3aUHvsWY= +github.com/rs/zerolog v1.28.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -481,6 +501,7 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= @@ -515,6 +536,8 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -740,8 +763,8 @@ k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSn k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= open-cluster-management.io/addon-framework v0.8.1-0.20231102082339-51742bc299f2 h1:38vY9paEGugvXfYGJ0oFabL4/8Jxrg+GnxxjUO2DMio= open-cluster-management.io/addon-framework v0.8.1-0.20231102082339-51742bc299f2/go.mod h1:aj97pgpGJ0/LpQzBVtU2oDFqqIiZLOPnsjLKG/sVkFw= -open-cluster-management.io/api v0.12.1-0.20231124100313-881401342553 h1:irm/uF6n9rICWOuvMn8f/SJIqQX5oHiXWbI25xJKzUE= -open-cluster-management.io/api v0.12.1-0.20231124100313-881401342553/go.mod h1:/I/nFccB0tmF+dZg7pHuzY3SaXOX86MI4vcFtidJ0OM= +open-cluster-management.io/api v0.12.1-0.20231130134655-97a8a92a7f30 h1:qzkatL1pCsMvA2KkuJ0ywWUqJ0ZI13ouMRVuAPTrhWk= +open-cluster-management.io/api v0.12.1-0.20231130134655-97a8a92a7f30/go.mod h1:fnoEBW9pbikOWOzF4zuT9DQAgWbY3PpPT/MSDZ/4bxw= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/manifests/cluster-manager/hub/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml b/manifests/cluster-manager/hub/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml index e08074e70..9f2cc65ec 100644 --- a/manifests/cluster-manager/hub/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml +++ b/manifests/cluster-manager/hub/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml @@ -152,7 +152,7 @@ spec: anyOf: - type: integer - type: string - default: "0" + default: 0 description: MaxFailures is a percentage or number of clusters in the current rollout that can fail before proceeding to the next rollout. MaxFailures @@ -249,7 +249,7 @@ spec: anyOf: - type: integer - type: string - default: "0" + default: 0 description: MaxFailures is a percentage or number of clusters in the current rollout that can fail before proceeding to the next rollout. MaxFailures @@ -335,7 +335,7 @@ spec: anyOf: - type: integer - type: string - default: "0" + default: 0 description: MaxFailures is a percentage or number of clusters in the current rollout that can fail before proceeding to the next rollout. MaxFailures diff --git a/manifests/cluster-manager/hub/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml b/manifests/cluster-manager/hub/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml index c5a083042..938832f75 100644 --- a/manifests/cluster-manager/hub/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml +++ b/manifests/cluster-manager/hub/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml @@ -341,7 +341,7 @@ spec: anyOf: - type: integer - type: string - default: "0" + default: 0 description: MaxFailures is a percentage or number of clusters in the current rollout that can fail before proceeding to the next rollout. MaxFailures is only @@ -435,7 +435,7 @@ spec: anyOf: - type: integer - type: string - default: "0" + default: 0 description: MaxFailures is a percentage or number of clusters in the current rollout that can fail before proceeding to the next rollout. MaxFailures is only @@ -519,7 +519,7 @@ spec: anyOf: - type: integer - type: string - default: "0" + default: 0 description: MaxFailures is a percentage or number of clusters in the current rollout that can fail before proceeding to the next rollout. MaxFailures is only diff --git a/manifests/klusterlet/management/klusterlet-agent-deployment.yaml b/manifests/klusterlet/management/klusterlet-agent-deployment.yaml index f82c13763..aad09d788 100644 --- a/manifests/klusterlet/management/klusterlet-agent-deployment.yaml +++ b/manifests/klusterlet/management/klusterlet-agent-deployment.yaml @@ -55,6 +55,8 @@ spec: - "--spoke-cluster-name={{ .ClusterName }}" - "--bootstrap-kubeconfig=/spoke/bootstrap/kubeconfig" - "--agent-id={{ .AgentID }}" + - "--workload-source-driver=kube" + - "--workload-source-config=/spoke/hub-kubeconfig/kubeconfig" {{ if gt (len .WorkFeatureGates) 0 }} {{range .WorkFeatureGates}} - {{ . }} diff --git a/manifests/klusterlet/management/klusterlet-work-deployment.yaml b/manifests/klusterlet/management/klusterlet-work-deployment.yaml index 494a64a72..f84a230c5 100644 --- a/manifests/klusterlet/management/klusterlet-work-deployment.yaml +++ b/manifests/klusterlet/management/klusterlet-work-deployment.yaml @@ -53,7 +53,8 @@ spec: - "/work" - "agent" - "--spoke-cluster-name={{ .ClusterName }}" - - "--hub-kubeconfig=/spoke/hub-kubeconfig/kubeconfig" + - "--workload-source-driver=kube" + - "--workload-source-config=/spoke/hub-kubeconfig/kubeconfig" - "--agent-id={{ .AgentID }}" {{ if gt (len .WorkFeatureGates) 0 }} {{range .WorkFeatureGates}} diff --git a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go index 7454b14e4..0387839fc 100644 --- a/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go +++ b/pkg/operator/operators/klusterlet/controllers/klusterletcontroller/klusterlet_controller_test.go @@ -414,7 +414,8 @@ func assertWorkDeployment(t *testing.T, actions []clienttesting.Action, verb, cl "/work", "agent", fmt.Sprintf("--spoke-cluster-name=%s", clusterName), - "--hub-kubeconfig=/spoke/hub-kubeconfig/kubeconfig", + "--workload-source-driver=kube", + "--workload-source-config=/spoke/hub-kubeconfig/kubeconfig", "--agent-id=", } diff --git a/pkg/work/spoke/options.go b/pkg/work/spoke/options.go index b37d77f48..3f96257bd 100644 --- a/pkg/work/spoke/options.go +++ b/pkg/work/spoke/options.go @@ -6,10 +6,21 @@ import ( "github.com/spf13/pflag" ) +const ( + KubeDriver = "kube" + MQTTDriver = "mqtt" +) + +type WorkloadSourceDriver struct { + Type string + Config string +} + // WorkloadAgentOptions defines the flags for workload agent type WorkloadAgentOptions struct { StatusSyncInterval time.Duration AppliedManifestWorkEvictionGracePeriod time.Duration + WorkloadSourceDriver WorkloadSourceDriver } // NewWorkloadAgentOptions returns the flags with default value set @@ -22,7 +33,12 @@ func NewWorkloadAgentOptions() *WorkloadAgentOptions { // AddFlags register and binds the default flags func (o *WorkloadAgentOptions) AddFlags(fs *pflag.FlagSet) { - fs.DurationVar(&o.StatusSyncInterval, "status-sync-interval", o.StatusSyncInterval, "Interval to sync resource status to hub.") + fs.DurationVar(&o.StatusSyncInterval, "status-sync-interval", + o.StatusSyncInterval, "Interval to sync resource status to hub.") fs.DurationVar(&o.AppliedManifestWorkEvictionGracePeriod, "appliedmanifestwork-eviction-grace-period", o.AppliedManifestWorkEvictionGracePeriod, "Grace period for appliedmanifestwork eviction") + fs.StringVar(&o.WorkloadSourceDriver.Type, "workload-source-driver", + o.WorkloadSourceDriver.Type, "The type of workload source driver, currently it can be kube or mqtt") + fs.StringVar(&o.WorkloadSourceDriver.Config, "workload-source-config", + o.WorkloadSourceDriver.Config, "The config file path of current workload source") } diff --git a/pkg/work/spoke/spokeagent.go b/pkg/work/spoke/spokeagent.go index d9310142b..d914d14fa 100644 --- a/pkg/work/spoke/spokeagent.go +++ b/pkg/work/spoke/spokeagent.go @@ -2,10 +2,12 @@ package spoke import ( "context" + "fmt" "time" "github.com/openshift/library-go/pkg/controller/controllercmd" apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -14,6 +16,9 @@ import ( workclientset "open-cluster-management.io/api/client/work/clientset/versioned" workinformers "open-cluster-management.io/api/client/work/informers/externalversions" + "open-cluster-management.io/api/cloudevents/generic/options/mqtt" + cloudeventswork "open-cluster-management.io/api/cloudevents/work" + "open-cluster-management.io/api/cloudevents/work/agent/codec" ocmfeature "open-cluster-management.io/api/feature" commonoptions "open-cluster-management.io/ocm/pkg/common/options" @@ -53,26 +58,6 @@ func NewWorkAgentConfig(commonOpts *commonoptions.AgentOptions, opts *WorkloadAg // RunWorkloadAgent starts the controllers on agent to process work from hub. func (o *WorkAgentConfig) RunWorkloadAgent(ctx context.Context, controllerContext *controllercmd.ControllerContext) error { - // build hub client and informer - hubRestConfig, err := clientcmd.BuildConfigFromFlags("" /* leave masterurl as empty */, o.agentOptions.HubKubeconfigFile) - if err != nil { - return err - } - hubhash := helper.HubHash(hubRestConfig.Host) - - agentID := o.agentOptions.AgentID - if len(agentID) == 0 { - agentID = hubhash - } - - hubWorkClient, err := workclientset.NewForConfig(hubRestConfig) - if err != nil { - return err - } - // Only watch the cluster namespace on hub - workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(hubWorkClient, 5*time.Minute, - workinformers.WithNamespace(o.agentOptions.SpokeClusterName)) - // load spoke client config and create spoke clients, // the work agent may not running in the spoke/managed cluster. spokeRestConfig, err := o.agentOptions.SpokeKubeConfig(controllerContext.KubeConfig) @@ -107,10 +92,20 @@ func (o *WorkAgentConfig) RunWorkloadAgent(ctx context.Context, controllerContex return err } + // build hub client and informer + clientHolder, hubHash, agentID, err := o.buildHubClientHolder(ctx, o.agentOptions.SpokeClusterName, restMapper) + if err != nil { + return err + } + + hubWorkClient := clientHolder.ManifestWorks(o.agentOptions.SpokeClusterName) + hubWorkInformer := clientHolder.ManifestWorkInformer() + + // create controllers validator := auth.NewFactory( spokeRestConfig, spokeKubeClient, - workInformerFactory.Work().V1().ManifestWorks(), + hubWorkInformer, o.agentOptions.SpokeClusterName, controllerContext.EventRecorder, restMapper, @@ -121,20 +116,20 @@ func (o *WorkAgentConfig) RunWorkloadAgent(ctx context.Context, controllerContex spokeDynamicClient, spokeKubeClient, spokeAPIExtensionClient, - hubWorkClient.WorkV1().ManifestWorks(o.agentOptions.SpokeClusterName), - workInformerFactory.Work().V1().ManifestWorks(), - workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.agentOptions.SpokeClusterName), + hubWorkClient, + hubWorkInformer, + hubWorkInformer.Lister().ManifestWorks(o.agentOptions.SpokeClusterName), spokeWorkClient.WorkV1().AppliedManifestWorks(), spokeWorkInformerFactory.Work().V1().AppliedManifestWorks(), - hubhash, agentID, + hubHash, agentID, restMapper, validator, ) addFinalizerController := finalizercontroller.NewAddFinalizerController( controllerContext.EventRecorder, - hubWorkClient.WorkV1().ManifestWorks(o.agentOptions.SpokeClusterName), - workInformerFactory.Work().V1().ManifestWorks(), - workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.agentOptions.SpokeClusterName), + hubWorkClient, + hubWorkInformer, + hubWorkInformer.Lister().ManifestWorks(o.agentOptions.SpokeClusterName), ) appliedManifestWorkFinalizeController := finalizercontroller.NewAppliedManifestWorkFinalizeController( controllerContext.EventRecorder, @@ -145,42 +140,43 @@ func (o *WorkAgentConfig) RunWorkloadAgent(ctx context.Context, controllerContex ) manifestWorkFinalizeController := finalizercontroller.NewManifestWorkFinalizeController( controllerContext.EventRecorder, - hubWorkClient.WorkV1().ManifestWorks(o.agentOptions.SpokeClusterName), - workInformerFactory.Work().V1().ManifestWorks(), - workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.agentOptions.SpokeClusterName), + hubWorkClient, + hubWorkInformer, + hubWorkInformer.Lister().ManifestWorks(o.agentOptions.SpokeClusterName), spokeWorkClient.WorkV1().AppliedManifestWorks(), spokeWorkInformerFactory.Work().V1().AppliedManifestWorks(), - hubhash, + hubHash, ) unmanagedAppliedManifestWorkController := finalizercontroller.NewUnManagedAppliedWorkController( controllerContext.EventRecorder, - workInformerFactory.Work().V1().ManifestWorks(), - workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.agentOptions.SpokeClusterName), + hubWorkInformer, + hubWorkInformer.Lister().ManifestWorks(o.agentOptions.SpokeClusterName), spokeWorkClient.WorkV1().AppliedManifestWorks(), spokeWorkInformerFactory.Work().V1().AppliedManifestWorks(), o.workOptions.AppliedManifestWorkEvictionGracePeriod, - hubhash, agentID, + hubHash, agentID, ) appliedManifestWorkController := appliedmanifestcontroller.NewAppliedManifestWorkController( controllerContext.EventRecorder, spokeDynamicClient, - workInformerFactory.Work().V1().ManifestWorks(), - workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.agentOptions.SpokeClusterName), + hubWorkInformer, + hubWorkInformer.Lister().ManifestWorks(o.agentOptions.SpokeClusterName), spokeWorkClient.WorkV1().AppliedManifestWorks(), spokeWorkInformerFactory.Work().V1().AppliedManifestWorks(), - hubhash, + hubHash, ) availableStatusController := statuscontroller.NewAvailableStatusController( controllerContext.EventRecorder, spokeDynamicClient, - hubWorkClient.WorkV1().ManifestWorks(o.agentOptions.SpokeClusterName), - workInformerFactory.Work().V1().ManifestWorks(), - workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.agentOptions.SpokeClusterName), + hubWorkClient, + hubWorkInformer, + hubWorkInformer.Lister().ManifestWorks(o.agentOptions.SpokeClusterName), o.workOptions.StatusSyncInterval, ) - go workInformerFactory.Start(ctx.Done()) go spokeWorkInformerFactory.Start(ctx.Done()) + go hubWorkInformer.Informer().Run(ctx.Done()) + go addFinalizerController.Run(ctx, 1) go appliedManifestWorkFinalizeController.Run(ctx, appliedManifestWorkFinalizeControllerWorkers) go unmanagedAppliedManifestWorkController.Run(ctx, 1) @@ -188,6 +184,61 @@ func (o *WorkAgentConfig) RunWorkloadAgent(ctx context.Context, controllerContex go manifestWorkController.Run(ctx, 1) go manifestWorkFinalizeController.Run(ctx, manifestWorkFinalizeControllerWorkers) go availableStatusController.Run(ctx, availableStatusControllerWorkers) + <-ctx.Done() + return nil } + +// To support consuming ManifestWorks from different drivers (like the Kubernetes apiserver or MQTT broker), we build +// ManifestWork client that implements the ManifestWorkInterface and ManifestWork informer based on different +// driver configuration. +// Refer to Event Based Manifestwork proposal in enhancements repo to get more details. +func (o *WorkAgentConfig) buildHubClientHolder(ctx context.Context, + clusterName string, restMapper meta.RESTMapper) (*cloudeventswork.ClientHolder, string, string, error) { + agentID := o.agentOptions.AgentID + switch o.workOptions.WorkloadSourceDriver.Type { + case KubeDriver: + hubRestConfig, err := clientcmd.BuildConfigFromFlags("", o.workOptions.WorkloadSourceDriver.Config) + if err != nil { + return nil, "", "", err + } + + hubHash := helper.HubHash(hubRestConfig.Host) + if len(agentID) == 0 { + agentID = hubHash + } + + // Only watch the cluster namespace on hub + clientHolder, err := cloudeventswork.NewClientHolderBuilder(agentID, hubRestConfig). + WithInformerConfig(5*time.Minute, workinformers.WithNamespace(o.agentOptions.SpokeClusterName)). + NewClientHolder(ctx) + if err != nil { + return nil, "", "", err + } + + return clientHolder, hubHash, agentID, nil + case MQTTDriver: + mqttOptions, err := mqtt.BuildMQTTOptionsFromFlags(o.workOptions.WorkloadSourceDriver.Config) + if err != nil { + return nil, "", "", err + } + + hubHash := helper.HubHash(mqttOptions.BrokerHost) + if len(agentID) == 0 { + agentID = fmt.Sprintf("%s-work-agent", o.agentOptions.SpokeClusterName) + } + + clientHolder, err := cloudeventswork.NewClientHolderBuilder(agentID, mqttOptions). + WithClusterName(o.agentOptions.SpokeClusterName). + WithCodecs(codec.NewManifestCodec(restMapper)). // TODO support manifestbundles + NewClientHolder(ctx) + if err != nil { + return nil, "", "", err + } + + return clientHolder, hubHash, agentID, nil + } + + return nil, "", "", fmt.Errorf("unsupported driver %s", o.workOptions.WorkloadSourceDriver.Type) +} diff --git a/test/integration-test.mk b/test/integration-test.mk index 305a480cf..1850039cd 100644 --- a/test/integration-test.mk +++ b/test/integration-test.mk @@ -51,5 +51,10 @@ test-addon-integration: ensure-kubebuilder-tools ./addon-integration.test -ginkgo.slow-spec-threshold=15s -ginkgo.v -ginkgo.fail-fast .PHONY: test-addon-integration +test-cloudevents-integration: ensure-kubebuilder-tools + go test -c ./test/integration/cloudevents -o ./cloudevents-integration.test + ./cloudevents-integration.test -ginkgo.slow-spec-threshold=15s -ginkgo.v -ginkgo.fail-fast +.PHONY: test-cloudevents-integration + test-integration: test-registration-operator-integration test-registration-integration test-placement-integration test-work-integration test-addon-integration .PHONY: test-integration diff --git a/test/integration/cloudevents/deleteoption_test.go b/test/integration/cloudevents/deleteoption_test.go new file mode 100644 index 000000000..2794f8575 --- /dev/null +++ b/test/integration/cloudevents/deleteoption_test.go @@ -0,0 +1,201 @@ +package cloudevents + +import ( + "context" + "fmt" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilrand "k8s.io/apimachinery/pkg/util/rand" + + workapiv1 "open-cluster-management.io/api/work/v1" + + commonoptions "open-cluster-management.io/ocm/pkg/common/options" + "open-cluster-management.io/ocm/pkg/work/spoke" + "open-cluster-management.io/ocm/test/integration/util" +) + +var _ = ginkgo.Describe("ManifestWork Delete Option", func() { + var o *spoke.WorkloadAgentOptions + var commOptions *commonoptions.AgentOptions + var cancel context.CancelFunc + + var work *workapiv1.ManifestWork + var manifests []workapiv1.Manifest + + var err error + + ginkgo.BeforeEach(func() { + o = spoke.NewWorkloadAgentOptions() + o.StatusSyncInterval = 3 * time.Second + o.WorkloadSourceDriver.Type = workSourceDriver + o.WorkloadSourceDriver.Config = workSourceConfigFileName + + commOptions = commonoptions.NewAgentOptions() + commOptions.SpokeClusterName = utilrand.String(5) + + ns := &corev1.Namespace{} + ns.Name = commOptions.SpokeClusterName + _, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + var ctx context.Context + ctx, cancel = context.WithCancel(context.Background()) + go startWorkAgent(ctx, o, commOptions) + + // reset manifests + manifests = nil + }) + + ginkgo.JustBeforeEach(func() { + work = util.NewManifestWork(commOptions.SpokeClusterName, "", manifests) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + + ginkgo.AfterEach(func() { + if cancel != nil { + cancel() + } + err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), commOptions.SpokeClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + + // TODO test multiple manifests after the manifestbundles is enabled + + ginkgo.Context("Delete options", func() { + ginkgo.BeforeEach(func() { + manifests = []workapiv1.Manifest{ + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, []string{})), + } + }) + + ginkgo.It("Orphan deletion of the whole manifestwork", func() { + work.Spec.DeleteOption = &workapiv1.DeleteOption{ + PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan, + } + + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + // Ensure configmap exists + util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + + // Ensure ownership of configmap is updated + gomega.Eventually(func() error { + cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{}) + if err != nil { + return err + } + + if len(cm.OwnerReferences) != 0 { + return fmt.Errorf("owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Delete the work + err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // Wait for deletion of manifest work + gomega.Eventually(func() bool { + _, err := workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + return errors.IsNotFound(err) + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // Ensure configmap exists + util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + }) + + ginkgo.It("Clean the resource when orphan deletion option is removed", func() { + work.Spec.DeleteOption = &workapiv1.DeleteOption{ + PropagationPolicy: workapiv1.DeletePropagationPolicyTypeSelectivelyOrphan, + SelectivelyOrphan: &workapiv1.SelectivelyOrphan{ + OrphaningRules: []workapiv1.OrphaningRule{ + { + Group: "", + Resource: "configmaps", + Namespace: commOptions.SpokeClusterName, + Name: cm1, + }, + }, + }, + } + + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + // Ensure configmap exists + util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + + // Ensure ownership of configmap is updated + gomega.Eventually(func() error { + cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{}) + if err != nil { + return err + } + + if len(cm.OwnerReferences) != 0 { + return fmt.Errorf("owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Remove the delete option + gomega.Eventually(func() error { + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + work.Spec.DeleteOption = nil + _, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Ensure ownership of configmap is updated + gomega.Eventually(func() error { + cm, err := spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{}) + if err != nil { + return err + } + + if len(cm.OwnerReferences) != 1 { + return fmt.Errorf("owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Delete the work + err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // Wait for deletion of manifest work + gomega.Eventually(func() bool { + _, err := workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + return errors.IsNotFound(err) + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // All of the resource should be deleted. + _, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{}) + gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) + }) + }) +}) diff --git a/test/integration/cloudevents/source/codec.go b/test/integration/cloudevents/source/codec.go new file mode 100644 index 000000000..1249f9a69 --- /dev/null +++ b/test/integration/cloudevents/source/codec.go @@ -0,0 +1,217 @@ +package source + +import ( + "fmt" + "strconv" + + cloudevents "github.com/cloudevents/sdk-go/v2" + cloudeventstypes "github.com/cloudevents/sdk-go/v2/types" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + kubetypes "k8s.io/apimachinery/pkg/types" + + "open-cluster-management.io/api/cloudevents/generic/types" + "open-cluster-management.io/api/cloudevents/work/payload" + workv1 "open-cluster-management.io/api/work/v1" +) + +type ManifestCodec struct{} + +func (c *ManifestCodec) EventDataType() types.CloudEventsDataType { + return payload.ManifestEventDataType +} + +func (d *ManifestCodec) Encode(source string, eventType types.CloudEventsType, work *workv1.ManifestWork) (*cloudevents.Event, error) { + if eventType.CloudEventsDataType != payload.ManifestEventDataType { + return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) + } + + if len(work.Spec.Workload.Manifests) != 1 { + return nil, fmt.Errorf("too many manifests in the work") + } + + eventBuilder := types.NewEventBuilder(source, eventType). + WithResourceID(string(work.UID)). + WithResourceVersion(work.Generation). + WithClusterName(work.Namespace) + + if !work.GetDeletionTimestamp().IsZero() { + evt := eventBuilder.WithDeletionTimestamp(work.GetDeletionTimestamp().Time).NewEvent() + return &evt, nil + } + + evt := eventBuilder.NewEvent() + + manifest := work.Spec.Workload.Manifests[0] + unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&manifest) + if err != nil { + return nil, fmt.Errorf("failed to convert manifest to unstructured object: %v", err) + } + + evtPayload := &payload.Manifest{ + Manifest: unstructured.Unstructured{Object: unstructuredObj}, + DeleteOption: work.Spec.DeleteOption, + } + + if len(work.Spec.ManifestConfigs) == 1 { + evtPayload.ConfigOption = &payload.ManifestConfigOption{ + FeedbackRules: work.Spec.ManifestConfigs[0].FeedbackRules, + UpdateStrategy: work.Spec.ManifestConfigs[0].UpdateStrategy, + } + } + + if err := evt.SetData(cloudevents.ApplicationJSON, evtPayload); err != nil { + return nil, fmt.Errorf("failed to encode manifests to cloud event: %v", err) + } + + return &evt, nil +} + +func (c *ManifestCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWork, error) { + eventType, err := types.ParseCloudEventsType(evt.Type()) + if err != nil { + return nil, fmt.Errorf("failed to parse cloud event type %s, %v", evt.Type(), err) + } + + if eventType.CloudEventsDataType != payload.ManifestEventDataType { + return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) + } + + evtExtensions := evt.Context.GetExtensions() + + resourceID, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionResourceID]) + if err != nil { + return nil, fmt.Errorf("failed to get resourceid extension: %v", err) + } + + resourceVersion, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionResourceVersion]) + if err != nil { + return nil, fmt.Errorf("failed to get resourceversion extension: %v", err) + } + + resourceVersionInt, err := strconv.ParseInt(resourceVersion, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to convert resourceversion - %v to int64", resourceVersion) + } + + clusterName, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionClusterName]) + if err != nil { + return nil, fmt.Errorf("failed to get clustername extension: %v", err) + } + + manifestStatus := &payload.ManifestStatus{} + if err := evt.DataAs(manifestStatus); err != nil { + return nil, fmt.Errorf("failed to unmarshal event data %s, %v", string(evt.Data()), err) + } + + work := &workv1.ManifestWork{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + UID: kubetypes.UID(resourceID), + ResourceVersion: resourceVersion, + Generation: resourceVersionInt, + Namespace: clusterName, + }, + Status: workv1.ManifestWorkStatus{ + Conditions: manifestStatus.Conditions, + ResourceStatus: workv1.ManifestResourceStatus{ + Manifests: []workv1.ManifestCondition{ + { + Conditions: manifestStatus.Status.Conditions, + StatusFeedbacks: manifestStatus.Status.StatusFeedbacks, + ResourceMeta: manifestStatus.Status.ResourceMeta, + }, + }, + }, + }, + } + + return work, nil +} + +type ManifestBundleCodec struct{} + +func (c *ManifestBundleCodec) EventDataType() types.CloudEventsDataType { + return payload.ManifestBundleEventDataType +} + +func (d *ManifestBundleCodec) Encode(source string, eventType types.CloudEventsType, work *workv1.ManifestWork) (*cloudevents.Event, error) { + if eventType.CloudEventsDataType != payload.ManifestBundleEventDataType { + return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) + } + + eventBuilder := types.NewEventBuilder(source, eventType). + WithResourceID(string(work.UID)). + WithResourceVersion(work.Generation). + WithClusterName(work.Namespace) + + if !work.GetDeletionTimestamp().IsZero() { + evt := eventBuilder.WithDeletionTimestamp(work.GetDeletionTimestamp().Time).NewEvent() + return &evt, nil + } + + evt := eventBuilder.NewEvent() + + if err := evt.SetData(cloudevents.ApplicationJSON, &payload.ManifestBundle{Manifests: work.Spec.Workload.Manifests}); err != nil { + return nil, fmt.Errorf("failed to encode manifests to cloud event: %v", err) + } + + return &evt, nil +} + +func (c *ManifestBundleCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWork, error) { + eventType, err := types.ParseCloudEventsType(evt.Type()) + if err != nil { + return nil, fmt.Errorf("failed to parse cloud event type %s, %v", evt.Type(), err) + } + + if eventType.CloudEventsDataType != payload.ManifestBundleEventDataType { + return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) + } + + evtExtensions := evt.Context.GetExtensions() + + resourceID, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionResourceID]) + if err != nil { + return nil, fmt.Errorf("failed to get resourceid extension: %v", err) + } + + resourceVersion, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionResourceVersion]) + if err != nil { + return nil, fmt.Errorf("failed to get resourceversion extension: %v", err) + } + + resourceVersionInt, err := strconv.ParseInt(resourceVersion, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to convert resourceversion - %v to int64", resourceVersion) + } + + clusterName, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionClusterName]) + if err != nil { + return nil, fmt.Errorf("failed to get clustername extension: %v", err) + } + + manifestStatus := &payload.ManifestBundleStatus{} + if err := evt.DataAs(manifestStatus); err != nil { + return nil, fmt.Errorf("failed to unmarshal event data %s, %v", string(evt.Data()), err) + } + + work := &workv1.ManifestWork{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + UID: kubetypes.UID(resourceID), + ResourceVersion: resourceVersion, + Generation: resourceVersionInt, + Namespace: clusterName, + }, + Status: workv1.ManifestWorkStatus{ + Conditions: manifestStatus.Conditions, + ResourceStatus: workv1.ManifestResourceStatus{ + Manifests: manifestStatus.ResourceStatus, + }, + }, + } + + return work, nil +} diff --git a/test/integration/cloudevents/source/handler.go b/test/integration/cloudevents/source/handler.go new file mode 100644 index 000000000..c7bb4100e --- /dev/null +++ b/test/integration/cloudevents/source/handler.go @@ -0,0 +1,77 @@ +package source + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog/v2" + + workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" + "open-cluster-management.io/api/cloudevents/generic" + "open-cluster-management.io/api/cloudevents/generic/types" + "open-cluster-management.io/api/cloudevents/work/watcher" + workv1 "open-cluster-management.io/api/work/v1" +) + +const ManifestWorkFinalizer = "cluster.open-cluster-management.io/manifest-work-cleanup" + +func newManifestWorkStatusHandler(lister workv1lister.ManifestWorkLister, watcher *watcher.ManifestWorkWatcher) generic.ResourceHandler[*workv1.ManifestWork] { + return func(action types.ResourceAction, work *workv1.ManifestWork) error { + switch action { + case types.StatusModified: + works, err := lister.ManifestWorks(work.Namespace).List(labels.Everything()) + if err != nil { + return err + } + + var lastWork *workv1.ManifestWork + for _, w := range works { + if w.UID == work.UID { + lastWork = w + break + } + } + + if lastWork == nil { + return fmt.Errorf("failed to find last work with id %s", work.UID) + } + + if work.Generation < lastWork.Generation { + klog.Infof("The work %s generation %d is less than cached generation %d, ignore", + work.UID, work.Generation, lastWork.Generation) + return nil + } + + // no status change + if equality.Semantic.DeepEqual(lastWork.Status, work.Status) { + return nil + } + + // restore the fields that are maintained by local agent + work.Name = lastWork.Name + work.Namespace = lastWork.Namespace + work.Labels = lastWork.Labels + work.Annotations = lastWork.Annotations + work.DeletionTimestamp = lastWork.DeletionTimestamp + work.Spec = lastWork.Spec + + if meta.IsStatusConditionTrue(work.Status.Conditions, ManifestsDeleted) { + work.Finalizers = []string{} + klog.Infof("delete work %s/%s in the source", work.Namespace, work.Name) + watcher.Receive(watch.Event{Type: watch.Deleted, Object: work}) + return nil + } + + // the work is handled by agent, we make sure the finalizer here + work.Finalizers = []string{ManifestWorkFinalizer} + watcher.Receive(watch.Event{Type: watch.Modified, Object: work}) + default: + return fmt.Errorf("unsupported resource action %s", action) + } + + return nil + } +} diff --git a/test/integration/cloudevents/source/lister.go b/test/integration/cloudevents/source/lister.go new file mode 100644 index 000000000..538f1605f --- /dev/null +++ b/test/integration/cloudevents/source/lister.go @@ -0,0 +1,17 @@ +package source + +import ( + "k8s.io/apimachinery/pkg/labels" + + workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" + "open-cluster-management.io/api/cloudevents/generic/types" + workv1 "open-cluster-management.io/api/work/v1" +) + +type manifestWorkLister struct { + Lister workv1lister.ManifestWorkLister +} + +func (l *manifestWorkLister) List(options types.ListOptions) ([]*workv1.ManifestWork, error) { + return l.Lister.ManifestWorks(options.ClusterName).List(labels.Everything()) +} diff --git a/test/integration/cloudevents/source/manifestwork.go b/test/integration/cloudevents/source/manifestwork.go new file mode 100644 index 000000000..3e357cd82 --- /dev/null +++ b/test/integration/cloudevents/source/manifestwork.go @@ -0,0 +1,184 @@ +package source + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + kubetypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog/v2" + + workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" + workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" + "open-cluster-management.io/api/cloudevents/generic" + "open-cluster-management.io/api/cloudevents/generic/types" + "open-cluster-management.io/api/cloudevents/work/payload" + "open-cluster-management.io/api/cloudevents/work/watcher" + workv1 "open-cluster-management.io/api/work/v1" +) + +const ManifestsDeleted = "Deleted" + +const ( + UpdateRequestAction = "update_request" + DeleteRequestAction = "delete_request" +) + +type manifestWorkSourceClient struct { + cloudEventsClient *generic.CloudEventSourceClient[*workv1.ManifestWork] + watcher *watcher.ManifestWorkWatcher + lister workv1lister.ManifestWorkLister + namespace string +} + +var manifestWorkGR = schema.GroupResource{Group: workv1.GroupName, Resource: "manifestworks"} + +var _ workv1client.ManifestWorkInterface = &manifestWorkSourceClient{} + +func newManifestWorkSourceClient(cloudEventsClient *generic.CloudEventSourceClient[*workv1.ManifestWork], + watcher *watcher.ManifestWorkWatcher) *manifestWorkSourceClient { + return &manifestWorkSourceClient{ + cloudEventsClient: cloudEventsClient, + watcher: watcher, + } +} + +func (c *manifestWorkSourceClient) SetNamespace(namespace string) *manifestWorkSourceClient { + c.namespace = namespace + return c +} + +func (c *manifestWorkSourceClient) SetLister(lister workv1lister.ManifestWorkLister) { + c.lister = lister +} + +func (c *manifestWorkSourceClient) Create(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.CreateOptions) (*workv1.ManifestWork, error) { + if manifestWork.Name == "" { + manifestWork.Name = manifestWork.GenerateName + rand.String(5) + } + + klog.Infof("create manifestwork %s/%s", c.namespace, manifestWork.Name) + _, err := c.lister.ManifestWorks(c.namespace).Get(manifestWork.Name) + if errors.IsNotFound(err) { + newObj := manifestWork.DeepCopy() + newObj.UID = kubetypes.UID(uuid.New().String()) + newObj.ResourceVersion = "1" + newObj.Generation = 1 + newObj.Namespace = c.namespace + + //TODO support manifestbundles + eventType := types.CloudEventsType{ + CloudEventsDataType: payload.ManifestEventDataType, + SubResource: types.SubResourceSpec, + Action: "create_request", + } + if err := c.cloudEventsClient.Publish(ctx, eventType, newObj); err != nil { + return nil, err + } + + // refresh cache + c.watcher.Receive(watch.Event{Type: watch.Added, Object: newObj}) + return newObj, nil + } + + if err != nil { + return nil, err + } + + return nil, errors.NewAlreadyExists(manifestWorkGR, manifestWork.Name) +} + +func (c *manifestWorkSourceClient) Update(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.UpdateOptions) (*workv1.ManifestWork, error) { + klog.Infof("update manifestwork %s/%s", c.namespace, manifestWork.Name) + lastWork, err := c.lister.ManifestWorks(c.namespace).Get(manifestWork.Name) + if err != nil { + return nil, err + } + + if equality.Semantic.DeepEqual(lastWork.Spec, manifestWork.Spec) { + return manifestWork, nil + } + + updatedObj := manifestWork.DeepCopy() + updatedObj.Generation = updatedObj.Generation + 1 + updatedObj.ResourceVersion = fmt.Sprintf("%d", updatedObj.Generation) + + //TODO support manifestbundles + eventType := types.CloudEventsType{ + CloudEventsDataType: payload.ManifestEventDataType, + SubResource: types.SubResourceSpec, + Action: "update_request", + } + if err := c.cloudEventsClient.Publish(ctx, eventType, updatedObj); err != nil { + return nil, err + } + + // refresh cache + c.watcher.Receive(watch.Event{Type: watch.Modified, Object: updatedObj}) + + return updatedObj, nil +} + +func (c *manifestWorkSourceClient) UpdateStatus(ctx context.Context, + manifestWork *workv1.ManifestWork, opts metav1.UpdateOptions) (*workv1.ManifestWork, error) { + return nil, errors.NewMethodNotSupported(manifestWorkGR, "updatestatus") +} + +func (c *manifestWorkSourceClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + klog.Infof("delete manifestwork %s/%s", c.namespace, name) + manifestWork, err := c.lister.ManifestWorks(c.namespace).Get(name) + if err != nil { + return err + } + + // actual deletion should be done after hub receive delete status + deletedObj := manifestWork.DeepCopy() + now := metav1.Now() + deletedObj.DeletionTimestamp = &now + + //TODO support manifestbundles + eventType := types.CloudEventsType{ + CloudEventsDataType: payload.ManifestEventDataType, + SubResource: types.SubResourceSpec, + Action: "delete_request", + } + + if err := c.cloudEventsClient.Publish(ctx, eventType, deletedObj); err != nil { + return err + } + + // refresh cache + c.watcher.Receive(watch.Event{Type: watch.Modified, Object: deletedObj}) + return nil +} + +func (c *manifestWorkSourceClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + return errors.NewMethodNotSupported(manifestWorkGR, "deletecollection") +} + +func (c *manifestWorkSourceClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*workv1.ManifestWork, error) { + work, err := c.lister.ManifestWorks(c.namespace).Get(name) + if err != nil { + return nil, err + } + return work.DeepCopy(), nil +} + +func (c *manifestWorkSourceClient) List(ctx context.Context, opts metav1.ListOptions) (*workv1.ManifestWorkList, error) { + return &workv1.ManifestWorkList{}, nil +} + +func (c *manifestWorkSourceClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.watcher, nil +} + +func (c *manifestWorkSourceClient) Patch(ctx context.Context, name string, pt kubetypes.PatchType, data []byte, + opts metav1.PatchOptions, subresources ...string) (result *workv1.ManifestWork, err error) { + return nil, errors.NewMethodNotSupported(manifestWorkGR, "patch") +} diff --git a/test/integration/cloudevents/source/source.go b/test/integration/cloudevents/source/source.go new file mode 100644 index 000000000..20a5fe940 --- /dev/null +++ b/test/integration/cloudevents/source/source.go @@ -0,0 +1,126 @@ +package source + +import ( + "context" + "log" + "os" + "time" + + "github.com/ghodss/yaml" + mochimqtt "github.com/mochi-mqtt/server/v2" + "github.com/mochi-mqtt/server/v2/hooks/auth" + "github.com/mochi-mqtt/server/v2/listeners" + + workclientset "open-cluster-management.io/api/client/work/clientset/versioned" + workinformers "open-cluster-management.io/api/client/work/informers/externalversions" + "open-cluster-management.io/api/cloudevents/generic" + "open-cluster-management.io/api/cloudevents/generic/options/mqtt" + "open-cluster-management.io/api/cloudevents/work" + "open-cluster-management.io/api/cloudevents/work/watcher" + workv1 "open-cluster-management.io/api/work/v1" +) + +const ( + sourceID = "cloudevents-mqtt-integration-test" + mqttBrokerHost = "127.0.0.1:1883" +) + +var mqttBroker *mochimqtt.Server + +type Source interface { + Host() string + Start(ctx context.Context) error + Stop() error + Workclientset() workclientset.Interface +} + +type MQTTSource struct { + configFile string + workClientSet workclientset.Interface +} + +func NewMQTTSource(configFile string) *MQTTSource { + return &MQTTSource{ + configFile: configFile, + } +} + +func (m *MQTTSource) Host() string { + return mqttBrokerHost +} + +func (m *MQTTSource) Start(ctx context.Context) error { + // start a MQTT broker + mqttBroker = mochimqtt.New(nil) + + // allow all connections + if err := mqttBroker.AddHook(new(auth.AllowHook), nil); err != nil { + return err + } + + if err := mqttBroker.AddListener(listeners.NewTCP("mqtt-test-broker", mqttBrokerHost, nil)); err != nil { + return err + } + + go func() { + if err := mqttBroker.Serve(); err != nil { + log.Fatal(err) + } + }() + + // write the mqtt broker config to a file + config := mqtt.MQTTConfig{ + BrokerHost: mqttBrokerHost, + } + + configData, err := yaml.Marshal(config) + if err != nil { + return err + } + if err := os.WriteFile(m.configFile, configData, 0600); err != nil { + return err + } + + // build a source client + workLister := &manifestWorkLister{} + watcher := watcher.NewManifestWorkWatcher() + mqttOptions, err := mqtt.BuildMQTTOptionsFromFlags(m.configFile) + if err != nil { + return err + } + cloudEventsClient, err := generic.NewCloudEventSourceClient[*workv1.ManifestWork]( + ctx, + mqtt.NewSourceOptions(mqttOptions, sourceID), + workLister, + work.ManifestWorkStatusHash, + &ManifestCodec{}, + ) + if err != nil { + return err + } + + manifestWorkClient := newManifestWorkSourceClient(cloudEventsClient, watcher) + workClient := &workV1ClientWrapper{ManifestWorkClient: manifestWorkClient} + workClientSet := &workClientSetWrapper{WorkV1ClientWrapper: workClient} + factory := workinformers.NewSharedInformerFactoryWithOptions(workClientSet, 1*time.Hour) + informers := factory.Work().V1().ManifestWorks() + manifestWorkLister := informers.Lister() + workLister.Lister = manifestWorkLister + manifestWorkClient.SetLister(manifestWorkLister) + + // start the source client + cloudEventsClient.Subscribe(ctx, newManifestWorkStatusHandler(manifestWorkLister, watcher)) + m.workClientSet = workClientSet + + go informers.Informer().Run(ctx.Done()) + + return nil +} + +func (m *MQTTSource) Stop() error { + return mqttBroker.Close() +} + +func (m *MQTTSource) Workclientset() workclientset.Interface { + return m.workClientSet +} diff --git a/test/integration/cloudevents/source/workclientset.go b/test/integration/cloudevents/source/workclientset.go new file mode 100644 index 000000000..68f1aa8d0 --- /dev/null +++ b/test/integration/cloudevents/source/workclientset.go @@ -0,0 +1,46 @@ +package source + +import ( + discovery "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + + workclientset "open-cluster-management.io/api/client/work/clientset/versioned" + workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" + workv1alpha1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1alpha1" +) + +type workClientSetWrapper struct { + WorkV1ClientWrapper *workV1ClientWrapper +} + +var _ workclientset.Interface = &workClientSetWrapper{} + +func (c *workClientSetWrapper) WorkV1() workv1client.WorkV1Interface { + return c.WorkV1ClientWrapper +} + +func (c *workClientSetWrapper) WorkV1alpha1() workv1alpha1client.WorkV1alpha1Interface { + return nil +} + +func (c *workClientSetWrapper) Discovery() discovery.DiscoveryInterface { + return nil +} + +type workV1ClientWrapper struct { + ManifestWorkClient *manifestWorkSourceClient +} + +var _ workv1client.WorkV1Interface = &workV1ClientWrapper{} + +func (c *workV1ClientWrapper) ManifestWorks(namespace string) workv1client.ManifestWorkInterface { + return c.ManifestWorkClient.SetNamespace(namespace) +} + +func (c *workV1ClientWrapper) AppliedManifestWorks() workv1client.AppliedManifestWorkInterface { + return nil +} + +func (c *workV1ClientWrapper) RESTClient() rest.Interface { + return nil +} diff --git a/test/integration/cloudevents/statusfeedback_test.go b/test/integration/cloudevents/statusfeedback_test.go new file mode 100644 index 000000000..21f6282f0 --- /dev/null +++ b/test/integration/cloudevents/statusfeedback_test.go @@ -0,0 +1,424 @@ +package cloudevents + +import ( + "context" + "fmt" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilrand "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/utils/ptr" + + ocmfeature "open-cluster-management.io/api/feature" + workapiv1 "open-cluster-management.io/api/work/v1" + + commonoptions "open-cluster-management.io/ocm/pkg/common/options" + "open-cluster-management.io/ocm/pkg/features" + "open-cluster-management.io/ocm/pkg/work/spoke" + "open-cluster-management.io/ocm/test/integration/util" +) + +var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { + var o *spoke.WorkloadAgentOptions + var commOptions *commonoptions.AgentOptions + var cancel context.CancelFunc + + var work *workapiv1.ManifestWork + var manifests []workapiv1.Manifest + + var err error + + ginkgo.BeforeEach(func() { + o = spoke.NewWorkloadAgentOptions() + o.StatusSyncInterval = 3 * time.Second + o.WorkloadSourceDriver.Type = workSourceDriver + o.WorkloadSourceDriver.Config = workSourceConfigFileName + + commOptions = commonoptions.NewAgentOptions() + commOptions.SpokeClusterName = utilrand.String(5) + + ns := &corev1.Namespace{} + ns.Name = commOptions.SpokeClusterName + _, err = spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // reset manifests + manifests = nil + }) + + ginkgo.JustBeforeEach(func() { + work = util.NewManifestWork(commOptions.SpokeClusterName, "", manifests) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + + ginkgo.AfterEach(func() { + err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), commOptions.SpokeClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + + ginkgo.Context("Deployment Status feedback", func() { + ginkgo.BeforeEach(func() { + u, _, err := util.NewDeployment(commOptions.SpokeClusterName, "deploy1", "sa") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + manifests = append(manifests, util.ToManifest(u)) + + var ctx context.Context + ctx, cancel = context.WithCancel(context.Background()) + go startWorkAgent(ctx, o, commOptions) + }) + + ginkgo.AfterEach(func() { + if cancel != nil { + cancel() + } + }) + + ginkgo.It("should return well known statuses", func() { + work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ + { + ResourceIdentifier: workapiv1.ResourceIdentifier{ + Group: "apps", + Resource: "deployments", + Namespace: commOptions.SpokeClusterName, + Name: "deploy1", + }, + FeedbackRules: []workapiv1.FeedbackRule{ + { + Type: workapiv1.WellKnownStatusType, + }, + }, + }, + } + + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + // Update Deployment status on spoke + gomega.Eventually(func() error { + deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) + if err != nil { + return err + } + + deploy.Status.AvailableReplicas = 2 + deploy.Status.Replicas = 3 + deploy.Status.ReadyReplicas = 2 + + _, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Check if we get status of deployment on work api + gomega.Eventually(func() error { + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + if len(work.Status.ResourceStatus.Manifests) != 1 { + return fmt.Errorf("the size of resource status is not correct, expect to be 1 but got %d", len(work.Status.ResourceStatus.Manifests)) + } + + values := work.Status.ResourceStatus.Manifests[0].StatusFeedbacks.Values + + expectedValues := []workapiv1.FeedbackValue{ + { + Name: "ReadyReplicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: ptr.To[int64](2), + }, + }, + { + Name: "Replicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: ptr.To[int64](3), + }, + }, + { + Name: "AvailableReplicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: ptr.To[int64](2), + }, + }, + } + if !apiequality.Semantic.DeepEqual(values, expectedValues) { + return fmt.Errorf("status feedback values are not correct, we got %v", values) + } + + if !util.HaveManifestCondition(work.Status.ResourceStatus.Manifests, "StatusFeedbackSynced", []metav1.ConditionStatus{metav1.ConditionTrue}) { + return fmt.Errorf("status sync condition should be True") + } + + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Update replica of deployment + gomega.Eventually(func() error { + deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) + if err != nil { + return err + } + + deploy.Status.AvailableReplicas = 3 + deploy.Status.Replicas = 3 + deploy.Status.ReadyReplicas = 3 + + _, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Check if the status of deployment is synced on work api + gomega.Eventually(func() error { + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + if len(work.Status.ResourceStatus.Manifests) != 1 { + return fmt.Errorf("the size of resource status is not correct, expect to be 1 but got %d", len(work.Status.ResourceStatus.Manifests)) + } + + values := work.Status.ResourceStatus.Manifests[0].StatusFeedbacks.Values + + expectedValues := []workapiv1.FeedbackValue{ + { + Name: "ReadyReplicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: ptr.To[int64](3), + }, + }, + { + Name: "Replicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: ptr.To[int64](3), + }, + }, + { + Name: "AvailableReplicas", + Value: workapiv1.FieldValue{ + Type: workapiv1.Integer, + Integer: ptr.To[int64](3), + }, + }, + } + if !apiequality.Semantic.DeepEqual(values, expectedValues) { + return fmt.Errorf("status feedback values are not correct, we got %v", values) + } + + if !util.HaveManifestCondition(work.Status.ResourceStatus.Manifests, "StatusFeedbackSynced", []metav1.ConditionStatus{metav1.ConditionTrue}) { + return fmt.Errorf("status sync condition should be True") + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) + + ginkgo.It("should return statuses by JSONPaths", func() { + work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ + { + ResourceIdentifier: workapiv1.ResourceIdentifier{ + Group: "apps", + Resource: "deployments", + Namespace: commOptions.SpokeClusterName, + Name: "deploy1", + }, + FeedbackRules: []workapiv1.FeedbackRule{ + { + Type: workapiv1.JSONPathsType, + JsonPaths: []workapiv1.JsonPath{ + { + Name: "Available", + Path: ".status.conditions[?(@.type==\"Available\")].status", + }, + { + Name: "wrong json path", + Path: ".status.conditions", + }, + }, + }, + }, + }, + } + + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + gomega.Eventually(func() error { + deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) + if err != nil { + return err + } + + deploy.Status.Conditions = []appsv1.DeploymentCondition{ + { + Type: "Available", + Status: "True", + }, + } + + _, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Check if we get status of deployment on work api + gomega.Eventually(func() error { + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + if len(work.Status.ResourceStatus.Manifests) != 1 { + return fmt.Errorf("the size of resource status is not correct, expect to be 1 but got %d", len(work.Status.ResourceStatus.Manifests)) + } + + values := work.Status.ResourceStatus.Manifests[0].StatusFeedbacks.Values + + expectedValues := []workapiv1.FeedbackValue{ + { + Name: "Available", + Value: workapiv1.FieldValue{ + Type: workapiv1.String, + String: ptr.To[string]("True"), + }, + }, + } + if !apiequality.Semantic.DeepEqual(values, expectedValues) { + return fmt.Errorf("status feedback values are not correct, we got %v", values) + } + + if !util.HaveManifestCondition(work.Status.ResourceStatus.Manifests, "StatusFeedbackSynced", []metav1.ConditionStatus{metav1.ConditionFalse}) { + return fmt.Errorf("status sync condition should be False") + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) + }) + + // TODO should return none for resources with no wellknown status + + ginkgo.Context("Deployment Status feedback with RawJsonString enabled", func() { + ginkgo.BeforeEach(func() { + u, _, err := util.NewDeployment(commOptions.SpokeClusterName, "deploy1", "sa") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + manifests = append(manifests, util.ToManifest(u)) + + err = features.SpokeMutableFeatureGate.Set(fmt.Sprintf("%s=true", ocmfeature.RawFeedbackJsonString)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + var ctx context.Context + ctx, cancel = context.WithCancel(context.Background()) + go startWorkAgent(ctx, o, commOptions) + }) + + ginkgo.AfterEach(func() { + if cancel != nil { + cancel() + } + }) + + ginkgo.It("Should return raw json string if the result is a structure", func() { + work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ + { + ResourceIdentifier: workapiv1.ResourceIdentifier{ + Group: "apps", + Resource: "deployments", + Namespace: commOptions.SpokeClusterName, + Name: "deploy1", + }, + FeedbackRules: []workapiv1.FeedbackRule{ + { + Type: workapiv1.JSONPathsType, + JsonPaths: []workapiv1.JsonPath{ + { + Name: "conditions", + Path: ".status.conditions", + }, + }, + }, + }, + }, + } + + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + gomega.Eventually(func() error { + deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) + if err != nil { + return err + } + + deploy.Status.Conditions = []appsv1.DeploymentCondition{ + { + Type: "Available", + Status: "True", + }, + } + + _, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).UpdateStatus(context.Background(), deploy, metav1.UpdateOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Check if we get status of deployment on work api + gomega.Eventually(func() error { + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + if len(work.Status.ResourceStatus.Manifests) != 1 { + return fmt.Errorf("the size of resource status is not correct, expect to be 1 but got %d", len(work.Status.ResourceStatus.Manifests)) + } + + values := work.Status.ResourceStatus.Manifests[0].StatusFeedbacks.Values + + expectedValues := []workapiv1.FeedbackValue{ + { + Name: "conditions", + Value: workapiv1.FieldValue{ + Type: workapiv1.JsonRaw, + JsonRaw: ptr.To[string](`[{"lastTransitionTime":null,"lastUpdateTime":null,"status":"True","type":"Available"}]`), + }, + }, + } + if !apiequality.Semantic.DeepEqual(values, expectedValues) { + if len(values) > 0 { + return fmt.Errorf("status feedback values are not correct, we got %v", *values[0].Value.JsonRaw) + } + return fmt.Errorf("status feedback values are not correct, we got %v", values) + } + + if !util.HaveManifestCondition(work.Status.ResourceStatus.Manifests, "StatusFeedbackSynced", []metav1.ConditionStatus{metav1.ConditionTrue}) { + return fmt.Errorf("status sync condition should be True") + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) + }) +}) diff --git a/test/integration/cloudevents/suite_test.go b/test/integration/cloudevents/suite_test.go new file mode 100644 index 000000000..78b8146d9 --- /dev/null +++ b/test/integration/cloudevents/suite_test.go @@ -0,0 +1,131 @@ +package cloudevents + +import ( + "context" + "fmt" + "os" + "path" + "testing" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "go.uber.org/zap/zapcore" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + workclientset "open-cluster-management.io/api/client/work/clientset/versioned" + ocmfeature "open-cluster-management.io/api/feature" + workapiv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/ocm/pkg/features" + "open-cluster-management.io/ocm/pkg/work/helper" + "open-cluster-management.io/ocm/test/integration/cloudevents/source" +) + +const ( + eventuallyTimeout = 30 // seconds + eventuallyInterval = 1 // seconds + cm1, cm2 = "cm1", "cm2" +) + +// TODO consider to use one integration with part +// focus on source is a MQTT broker +const workSourceDriver = "mqtt" + +var tempDir string + +var testEnv *envtest.Environment +var envCtx context.Context +var envCancel context.CancelFunc + +var workSource source.Source +var workSourceConfigFileName string +var workSourceWorkClient workclientset.Interface +var workSourceHash string + +var spokeRestConfig *rest.Config +var spokeKubeClient kubernetes.Interface +var spokeWorkClient workclientset.Interface + +var CRDPaths = []string{ + // hub + "./vendor/open-cluster-management.io/api/work/v1/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml", + "./vendor/open-cluster-management.io/api/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml", + "./vendor/open-cluster-management.io/api/cluster/v1beta1/0000_02_clusters.open-cluster-management.io_placements.crd.yaml", + "./vendor/open-cluster-management.io/api/cluster/v1beta1/0000_03_clusters.open-cluster-management.io_placementdecisions.crd.yaml", + // spoke + "./vendor/open-cluster-management.io/api/work/v1/0000_01_work.open-cluster-management.io_appliedmanifestworks.crd.yaml", +} + +func TestIntegration(t *testing.T) { + gomega.RegisterFailHandler(ginkgo.Fail) + ginkgo.RunSpecs(t, "Integration Suite") +} + +var _ = ginkgo.BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(ginkgo.GinkgoWriter), zap.UseDevMode(true), zap.Level(zapcore.DebugLevel))) + ginkgo.By("bootstrapping test environment") + + // start a kube-apiserver + testEnv = &envtest.Environment{ + ErrorIfCRDPathMissing: true, + CRDDirectoryPaths: CRDPaths, + } + envCtx, envCancel = context.WithCancel(context.TODO()) + cfg, err := testEnv.Start() + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(cfg).ToNot(gomega.BeNil()) + + tempDir, err = os.MkdirTemp("", "test") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(tempDir).ToNot(gomega.BeEmpty()) + + err = workapiv1.Install(scheme.Scheme) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + features.SpokeMutableFeatureGate.Add(ocmfeature.DefaultSpokeWorkFeatureGates) + + spokeRestConfig = cfg + spokeKubeClient, err = kubernetes.NewForConfig(cfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + spokeWorkClient, err = workclientset.NewForConfig(cfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + switch workSourceDriver { + case "mqtt": + // create kubeconfig file for hub in a tmp dir + workSourceConfigFileName = path.Join(tempDir, "mqttconfig") + + workSource = source.NewMQTTSource(workSourceConfigFileName) + err := workSource.Start(envCtx) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + workSourceHash = helper.HubHash(workSource.Host()) + + workSourceWorkClient = workSource.Workclientset() + gomega.Expect(workSourceWorkClient).ToNot(gomega.BeNil()) + default: + ginkgo.AbortSuite(fmt.Sprintf("unsupported source driver: %s", workSourceDriver)) + } +}) + +var _ = ginkgo.AfterSuite(func() { + ginkgo.By("tearing down the test environment") + + envCancel() + + err := workSource.Stop() + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + err = testEnv.Stop() + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + if tempDir != "" { + os.RemoveAll(tempDir) + } +}) diff --git a/test/integration/cloudevents/updatestrategy_test.go b/test/integration/cloudevents/updatestrategy_test.go new file mode 100644 index 000000000..fea0f5937 --- /dev/null +++ b/test/integration/cloudevents/updatestrategy_test.go @@ -0,0 +1,441 @@ +package cloudevents + +import ( + "context" + "fmt" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + utilrand "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/utils/ptr" + + workapiv1 "open-cluster-management.io/api/work/v1" + + commonoptions "open-cluster-management.io/ocm/pkg/common/options" + "open-cluster-management.io/ocm/pkg/work/spoke" + "open-cluster-management.io/ocm/test/integration/util" +) + +var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { + var o *spoke.WorkloadAgentOptions + var commOptions *commonoptions.AgentOptions + var cancel context.CancelFunc + + var work *workapiv1.ManifestWork + var manifests []workapiv1.Manifest + + var err error + + ginkgo.BeforeEach(func() { + o = spoke.NewWorkloadAgentOptions() + o.StatusSyncInterval = 3 * time.Second + o.WorkloadSourceDriver.Type = workSourceDriver + o.WorkloadSourceDriver.Config = workSourceConfigFileName + + commOptions = commonoptions.NewAgentOptions() + commOptions.SpokeClusterName = utilrand.String(5) + + ns := &corev1.Namespace{} + ns.Name = commOptions.SpokeClusterName + _, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + var ctx context.Context + ctx, cancel = context.WithCancel(context.Background()) + go startWorkAgent(ctx, o, commOptions) + + // reset manifests + manifests = nil + }) + + ginkgo.JustBeforeEach(func() { + work = util.NewManifestWork(commOptions.SpokeClusterName, "", manifests) + }) + + ginkgo.AfterEach(func() { + if cancel != nil { + cancel() + } + err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), commOptions.SpokeClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + + ginkgo.Context("Create only strategy", func() { + var object *unstructured.Unstructured + + ginkgo.BeforeEach(func() { + object, _, err = util.NewDeployment(commOptions.SpokeClusterName, "deploy1", "sa") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + manifests = append(manifests, util.ToManifest(object)) + }) + + ginkgo.It("deployed resource should not be updated when work is updated", func() { + work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ + { + ResourceIdentifier: workapiv1.ResourceIdentifier{ + Group: "apps", + Resource: "deployments", + Namespace: commOptions.SpokeClusterName, + Name: "deploy1", + }, + UpdateStrategy: &workapiv1.UpdateStrategy{ + Type: workapiv1.UpdateStrategyTypeCreateOnly, + }, + }, + } + + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + // update work + err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Eventually(func() error { + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + work.Spec.Workload.Manifests[0] = util.ToManifest(object) + _, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + gomega.Eventually(func() error { + deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) + if err != nil { + return err + } + + if *deploy.Spec.Replicas != 1 { + return fmt.Errorf("replicas should not be changed") + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) + }) + + ginkgo.Context("Server side apply strategy", func() { + var object *unstructured.Unstructured + + ginkgo.BeforeEach(func() { + object, _, err = util.NewDeployment(commOptions.SpokeClusterName, "deploy1", "sa") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + manifests = append(manifests, util.ToManifest(object)) + }) + + ginkgo.It("deployed resource should be applied when work is updated", func() { + work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ + { + ResourceIdentifier: workapiv1.ResourceIdentifier{ + Group: "apps", + Resource: "deployments", + Namespace: commOptions.SpokeClusterName, + Name: "deploy1", + }, + UpdateStrategy: &workapiv1.UpdateStrategy{ + Type: workapiv1.UpdateStrategyTypeServerSideApply, + }, + }, + } + + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + // update work + err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas") + gomega.Eventually(func() error { + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + work.Spec.Workload.Manifests[0] = util.ToManifest(object) + _, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) + if err != nil { + return err + } + + if *deploy.Spec.Replicas != 3 { + return fmt.Errorf("replicas should be updated to 3 but got %d", *deploy.Spec.Replicas) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) + + ginkgo.It("should get conflict if a field is taken by another manager", func() { + work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ + { + ResourceIdentifier: workapiv1.ResourceIdentifier{ + Group: "apps", + Resource: "deployments", + Namespace: commOptions.SpokeClusterName, + Name: "deploy1", + }, + UpdateStrategy: &workapiv1.UpdateStrategy{ + Type: workapiv1.UpdateStrategyTypeServerSideApply, + }, + }, + } + + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + // update deployment with another field manager + err = unstructured.SetNestedField(object.Object, int64(2), "spec", "replicas") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + patch, err := object.MarshalJSON() + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + _, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Patch( + context.Background(), "deploy1", types.ApplyPatchType, patch, metav1.PatchOptions{Force: ptr.To[bool](true), FieldManager: "test-integration"}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // Update deployment by work + err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Eventually(func() error { + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + work.Spec.Workload.Manifests[0] = util.ToManifest(object) + _, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Failed to apply due to conflict + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionFalse, + []metav1.ConditionStatus{metav1.ConditionFalse}, eventuallyTimeout, eventuallyInterval) + + // remove the replica field and apply should work + unstructured.RemoveNestedField(object.Object, "spec", "replicas") + gomega.Eventually(func() error { + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + work.Spec.Workload.Manifests[0] = util.ToManifest(object) + _, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + }) + + ginkgo.It("two manifest works with different field manager", func() { + work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ + { + ResourceIdentifier: workapiv1.ResourceIdentifier{ + Group: "apps", + Resource: "deployments", + Namespace: commOptions.SpokeClusterName, + Name: "deploy1", + }, + UpdateStrategy: &workapiv1.UpdateStrategy{ + Type: workapiv1.UpdateStrategyTypeServerSideApply, + }, + }, + } + + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + // Create another work with different fieldmanager + objCopy := object.DeepCopy() + // work1 does not want to own replica field + unstructured.RemoveNestedField(objCopy.Object, "spec", "replicas") + work1 := util.NewManifestWork(commOptions.SpokeClusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)}) + work1.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ + { + ResourceIdentifier: workapiv1.ResourceIdentifier{ + Group: "apps", + Resource: "deployments", + Namespace: commOptions.SpokeClusterName, + Name: "deploy1", + }, + UpdateStrategy: &workapiv1.UpdateStrategy{ + Type: workapiv1.UpdateStrategyTypeServerSideApply, + ServerSideApply: &workapiv1.ServerSideApplyConfig{ + Force: true, + FieldManager: "work-agent-another", + }, + }, + }, + } + + _, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work1, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + util.AssertWorkCondition(work1.Namespace, work1.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + // Update deployment replica by work should work since this work still owns the replicas field + err = unstructured.SetNestedField(object.Object, int64(3), "spec", "replicas") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Eventually(func() error { + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + work.Spec.Workload.Manifests[0] = util.ToManifest(object) + _, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // This should work since this work still own replicas + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + gomega.Eventually(func() error { + deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) + if err != nil { + return err + } + + if *deploy.Spec.Replicas != 3 { + return fmt.Errorf("expected replica is not correct, got %d", *deploy.Spec.Replicas) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Update sa field will not work + err = unstructured.SetNestedField(object.Object, "another-sa", "spec", "template", "spec", "serviceAccountName") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Eventually(func() error { + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + work.Spec.Workload.Manifests[0] = util.ToManifest(object) + _, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // This should work since this work still own replicas + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionFalse, + []metav1.ConditionStatus{metav1.ConditionFalse}, eventuallyTimeout, eventuallyInterval) + }) + + ginkgo.It("with delete options", func() { + work.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ + { + ResourceIdentifier: workapiv1.ResourceIdentifier{ + Group: "apps", + Resource: "deployments", + Namespace: commOptions.SpokeClusterName, + Name: "deploy1", + }, + UpdateStrategy: &workapiv1.UpdateStrategy{ + Type: workapiv1.UpdateStrategyTypeServerSideApply, + }, + }, + } + + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + // Create another work with different fieldmanager + objCopy := object.DeepCopy() + // work1 does not want to own replica field + unstructured.RemoveNestedField(objCopy.Object, "spec", "replicas") + work1 := util.NewManifestWork(commOptions.SpokeClusterName, "another", []workapiv1.Manifest{util.ToManifest(objCopy)}) + work1.Spec.ManifestConfigs = []workapiv1.ManifestConfigOption{ + { + ResourceIdentifier: workapiv1.ResourceIdentifier{ + Group: "apps", + Resource: "deployments", + Namespace: commOptions.SpokeClusterName, + Name: "deploy1", + }, + UpdateStrategy: &workapiv1.UpdateStrategy{ + Type: workapiv1.UpdateStrategyTypeServerSideApply, + ServerSideApply: &workapiv1.ServerSideApplyConfig{ + Force: true, + FieldManager: "work-agent-another", + }, + }, + }, + } + + _, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work1, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + util.AssertWorkCondition(work1.Namespace, work1.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + gomega.Eventually(func() error { + deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) + if err != nil { + return err + } + + if len(deploy.OwnerReferences) != 2 { + return fmt.Errorf("expected ownerrefs is not correct, got %v", deploy.OwnerReferences) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // update deleteOption of the first work + gomega.Eventually(func() error { + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + work.Spec.DeleteOption = &workapiv1.DeleteOption{PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan} + _, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + deploy, err := spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Get(context.Background(), "deploy1", metav1.GetOptions{}) + if err != nil { + return err + } + + if len(deploy.OwnerReferences) != 1 { + return fmt.Errorf("expected ownerrefs is not correct, got %v", deploy.OwnerReferences) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) + }) +}) diff --git a/test/integration/cloudevents/work_test.go b/test/integration/cloudevents/work_test.go new file mode 100644 index 000000000..fc7edb808 --- /dev/null +++ b/test/integration/cloudevents/work_test.go @@ -0,0 +1,165 @@ +package cloudevents + +import ( + "context" + "fmt" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "github.com/openshift/library-go/pkg/controller/controllercmd" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilrand "k8s.io/apimachinery/pkg/util/rand" + + workapiv1 "open-cluster-management.io/api/work/v1" + + commonoptions "open-cluster-management.io/ocm/pkg/common/options" + "open-cluster-management.io/ocm/pkg/work/spoke" + "open-cluster-management.io/ocm/test/integration/util" +) + +func startWorkAgent(ctx context.Context, o *spoke.WorkloadAgentOptions, commOption *commonoptions.AgentOptions) { + agentConfig := spoke.NewWorkAgentConfig(commOption, o) + err := agentConfig.RunWorkloadAgent(ctx, &controllercmd.ControllerContext{ + KubeConfig: spokeRestConfig, + EventRecorder: util.NewIntegrationTestEventRecorder("integration"), + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) +} + +var _ = ginkgo.Describe("ManifestWork", func() { + var o *spoke.WorkloadAgentOptions + var commOptions *commonoptions.AgentOptions + var cancel context.CancelFunc + + var work *workapiv1.ManifestWork + var manifests []workapiv1.Manifest + var appliedManifestWorkName string + + var err error + + ginkgo.BeforeEach(func() { + o = spoke.NewWorkloadAgentOptions() + o.StatusSyncInterval = 3 * time.Second + o.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second + o.WorkloadSourceDriver.Type = workSourceDriver + o.WorkloadSourceDriver.Config = workSourceConfigFileName + + commOptions = commonoptions.NewAgentOptions() + commOptions.SpokeClusterName = utilrand.String(5) + + ns := &corev1.Namespace{} + ns.Name = commOptions.SpokeClusterName + _, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + var ctx context.Context + ctx, cancel = context.WithCancel(context.Background()) + go startWorkAgent(ctx, o, commOptions) + + // reset manifests + manifests = nil + }) + + ginkgo.JustBeforeEach(func() { + work = util.NewManifestWork(commOptions.SpokeClusterName, "", manifests) + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + // if the source is not kube, the uid will be used as the manifestwork name + appliedManifestWorkName = fmt.Sprintf("%s-%s", workSourceHash, work.UID) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + + ginkgo.AfterEach(func() { + err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + if !errors.IsNotFound(err) { + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + } + + gomega.Eventually(func() error { + _, err := workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + return fmt.Errorf("work %s in namespace %s still exists", work.Name, commOptions.SpokeClusterName) + }, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) + + err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), commOptions.SpokeClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + if cancel != nil { + cancel() + } + }) + + ginkgo.Context("With a single manifest", func() { + ginkgo.BeforeEach(func() { + manifests = []workapiv1.Manifest{ + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm1, map[string]string{"a": "b"}, nil)), + } + }) + + ginkgo.It("should create work and then apply it successfully", func() { + util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + }) + + ginkgo.It("should update work and then apply it successfully", func() { + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + util.AssertWorkCondition(work.Namespace, work.Name, workSourceWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + newManifests := []workapiv1.Manifest{ + util.ToManifest(util.NewConfigmap(commOptions.SpokeClusterName, cm2, map[string]string{"x": "y"}, nil)), + } + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + work.Spec.Workload.Manifests = newManifests + + work, err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + util.AssertExistenceOfConfigMaps(newManifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + + // check if resource created by stale manifest is deleted once it is removed from applied resource list + gomega.Eventually(func() error { + appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + + for _, appliedResource := range appliedManifestWork.Status.AppliedResources { + if appliedResource.Name == cm1 { + return fmt.Errorf("found applied resource cm1") + } + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + _, err = spokeKubeClient.CoreV1().ConfigMaps(commOptions.SpokeClusterName).Get(context.Background(), cm1, metav1.GetOptions{}) + gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) + }) + + ginkgo.It("should delete work successfully", func() { + util.AssertFinalizerAdded(work.Namespace, work.Name, workSourceWorkClient, eventuallyTimeout, eventuallyInterval) + + err = workSourceWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + util.AssertWorkDeleted(work.Namespace, work.Name, fmt.Sprintf("%s-%s", workSourceHash, work.UID), manifests, + workSourceWorkClient, spokeWorkClient, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + }) + }) + + // TODO test multiple manifests after the manifestbundles is enabled +}) diff --git a/test/integration/operator/klusterlet_test.go b/test/integration/operator/klusterlet_test.go index f93d3ec0b..12b31f0ff 100644 --- a/test/integration/operator/klusterlet_test.go +++ b/test/integration/operator/klusterlet_test.go @@ -495,7 +495,7 @@ var _ = ginkgo.Describe("Klusterlet", func() { gomega.Expect(len(actual.Spec.Template.Spec.Containers)).Should(gomega.Equal(1)) // klusterlet has no condition, replica is 0 gomega.Expect(actual.Status.Replicas).Should(gomega.Equal(int32(0))) - gomega.Expect(len(actual.Spec.Template.Spec.Containers[0].Args)).Should(gomega.Equal(8)) + gomega.Expect(len(actual.Spec.Template.Spec.Containers[0].Args)).Should(gomega.Equal(9)) return actual.Spec.Template.Spec.Containers[0].Args[2] != "--spoke-cluster-name=cluster2" }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) diff --git a/test/integration/util/assertion.go b/test/integration/util/assertion.go index b94b3e427..fadaeecb3 100644 --- a/test/integration/util/assertion.go +++ b/test/integration/util/assertion.go @@ -103,8 +103,8 @@ func AssertWorkGeneration(namespace, name string, workClient workclientset.Inter } // AssertWorkDeleted check if work is deleted -func AssertWorkDeleted(namespace, name, hubhash string, manifests []workapiv1.Manifest, - workClient workclientset.Interface, kubeClient kubernetes.Interface, +func AssertWorkDeleted(namespace, name, appliedManifestWorkName string, manifests []workapiv1.Manifest, + workClient, spokeWorkClient workclientset.Interface, spokeKubeClient kubernetes.Interface, eventuallyTimeout, eventuallyInterval int) { // wait for deletion of manifest work gomega.Eventually(func() error { @@ -119,13 +119,12 @@ func AssertWorkDeleted(namespace, name, hubhash string, manifests []workapiv1.Ma }, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) // wait for deletion of appliedmanifestwork - appliedManifestWorkName := fmt.Sprintf("%s-%s", hubhash, name) - AssertAppliedManifestWorkDeleted(appliedManifestWorkName, workClient, eventuallyTimeout, eventuallyInterval) + AssertAppliedManifestWorkDeleted(appliedManifestWorkName, spokeWorkClient, eventuallyTimeout, eventuallyInterval) // Once manifest work is deleted, all applied resources should have already been deleted too for _, manifest := range manifests { expected := manifest.Object.(*corev1.ConfigMap) - _, err := kubeClient.CoreV1().ConfigMaps(expected.Namespace).Get(context.Background(), expected.Name, metav1.GetOptions{}) + _, err := spokeKubeClient.CoreV1().ConfigMaps(expected.Namespace).Get(context.Background(), expected.Name, metav1.GetOptions{}) gomega.Expect(apierrors.IsNotFound(err)).To(gomega.BeTrue()) } } diff --git a/test/integration/work/deleteoption_test.go b/test/integration/work/deleteoption_test.go index 9d7393c9c..a5bf119d3 100644 --- a/test/integration/work/deleteoption_test.go +++ b/test/integration/work/deleteoption_test.go @@ -19,7 +19,7 @@ import ( "open-cluster-management.io/ocm/test/integration/util" ) -var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { +var _ = ginkgo.Describe("ManifestWork Delete Option", func() { var o *spoke.WorkloadAgentOptions var commOptions *commonoptions.AgentOptions var cancel context.CancelFunc @@ -33,9 +33,10 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { ginkgo.BeforeEach(func() { o = spoke.NewWorkloadAgentOptions() o.StatusSyncInterval = 3 * time.Second + o.WorkloadSourceDriver.Type = sourceDriver + o.WorkloadSourceDriver.Config = sourceConfigFileName commOptions = commonoptions.NewAgentOptions() - commOptions.HubKubeconfigFile = hubKubeconfigFileName commOptions.SpokeClusterName = utilrand.String(5) ns := &corev1.Namespace{} @@ -295,8 +296,6 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name) - util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, @@ -364,8 +363,6 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name) - util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, @@ -425,8 +422,6 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name) - util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, @@ -492,8 +487,6 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { work, err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name) - util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkApplied, metav1.ConditionTrue, []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, workapiv1.WorkAvailable, metav1.ConditionTrue, diff --git a/test/integration/work/executor_test.go b/test/integration/work/executor_test.go index d2912f5c9..9b753e056 100644 --- a/test/integration/work/executor_test.go +++ b/test/integration/work/executor_test.go @@ -40,11 +40,13 @@ var _ = ginkgo.Describe("ManifestWork Executor Subject", func() { ginkgo.BeforeEach(func() { o = spoke.NewWorkloadAgentOptions() o.StatusSyncInterval = 3 * time.Second + o.WorkloadSourceDriver.Type = sourceDriver + o.WorkloadSourceDriver.Config = sourceConfigFileName + err := features.SpokeMutableFeatureGate.Set("ExecutorValidatingCaches=true") gomega.Expect(err).NotTo(gomega.HaveOccurred()) commOptions = commonoptions.NewAgentOptions() - commOptions.HubKubeconfigFile = hubKubeconfigFileName commOptions.SpokeClusterName = utilrand.String(5) ns := &corev1.Namespace{} diff --git a/test/integration/work/manifestworkreplicaset_test.go b/test/integration/work/manifestworkreplicaset_test.go index 303c3e39f..5cd8541ff 100644 --- a/test/integration/work/manifestworkreplicaset_test.go +++ b/test/integration/work/manifestworkreplicaset_test.go @@ -104,8 +104,7 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() { clusterNames.Insert(clusterName) } - decision, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementDecision.Namespace).UpdateStatus( - context.TODO(), decision, metav1.UpdateOptions{}) + _, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementDecision.Namespace).UpdateStatus(context.TODO(), decision, metav1.UpdateOptions{}) return manifestWorkReplicaSet, clusterNames, err } }) @@ -132,8 +131,7 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() { gomega.Expect(err).ToNot(gomega.HaveOccurred()) removedCluster := decision.Status.Decisions[2].ClusterName decision.Status.Decisions = decision.Status.Decisions[:2] - decision, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementDecision.Namespace).UpdateStatus( - context.TODO(), decision, metav1.UpdateOptions{}) + _, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementDecision.Namespace).UpdateStatus(context.TODO(), decision, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) clusterNames.Delete(removedCluster) gomega.Eventually(assertWorksByReplicaSet(clusterNames, manifestWorkReplicaSet), eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) diff --git a/test/integration/work/statusfeedback_test.go b/test/integration/work/statusfeedback_test.go index 1dbc48fb9..dc973496b 100644 --- a/test/integration/work/statusfeedback_test.go +++ b/test/integration/work/statusfeedback_test.go @@ -12,7 +12,7 @@ import ( apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilrand "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ocmfeature "open-cluster-management.io/api/feature" workapiv1 "open-cluster-management.io/api/work/v1" @@ -36,9 +36,10 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { ginkgo.BeforeEach(func() { o = spoke.NewWorkloadAgentOptions() o.StatusSyncInterval = 3 * time.Second + o.WorkloadSourceDriver.Type = sourceDriver + o.WorkloadSourceDriver.Config = sourceConfigFileName commOptions = commonoptions.NewAgentOptions() - commOptions.HubKubeconfigFile = hubKubeconfigFileName commOptions.SpokeClusterName = utilrand.String(5) ns := &corev1.Namespace{} @@ -135,21 +136,21 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { Name: "ReadyReplicas", Value: workapiv1.FieldValue{ Type: workapiv1.Integer, - Integer: pointer.Int64(2), + Integer: ptr.To[int64](2), }, }, { Name: "Replicas", Value: workapiv1.FieldValue{ Type: workapiv1.Integer, - Integer: pointer.Int64(3), + Integer: ptr.To[int64](3), }, }, { Name: "AvailableReplicas", Value: workapiv1.FieldValue{ Type: workapiv1.Integer, - Integer: pointer.Int64(2), + Integer: ptr.To[int64](2), }, }, } @@ -197,21 +198,21 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { Name: "ReadyReplicas", Value: workapiv1.FieldValue{ Type: workapiv1.Integer, - Integer: pointer.Int64(3), + Integer: ptr.To[int64](3), }, }, { Name: "Replicas", Value: workapiv1.FieldValue{ Type: workapiv1.Integer, - Integer: pointer.Int64(3), + Integer: ptr.To[int64](3), }, }, { Name: "AvailableReplicas", Value: workapiv1.FieldValue{ Type: workapiv1.Integer, - Integer: pointer.Int64(3), + Integer: ptr.To[int64](3), }, }, } @@ -297,7 +298,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { Name: "Available", Value: workapiv1.FieldValue{ Type: workapiv1.String, - String: pointer.String("True"), + String: ptr.To[string]("True"), }, }, } @@ -313,7 +314,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) }) - ginkgo.It("should return none for resources with no wellKnowne status", func() { + ginkgo.It("should return none for resources with no wellknown status", func() { sa, _ := util.NewServiceAccount(commOptions.SpokeClusterName, "sa") work.Spec.Workload.Manifests = append(work.Spec.Workload.Manifests, util.ToManifest(sa)) @@ -387,21 +388,21 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { Name: "ReadyReplicas", Value: workapiv1.FieldValue{ Type: workapiv1.Integer, - Integer: pointer.Int64(2), + Integer: ptr.To[int64](2), }, }, { Name: "Replicas", Value: workapiv1.FieldValue{ Type: workapiv1.Integer, - Integer: pointer.Int64(3), + Integer: ptr.To[int64](3), }, }, { Name: "AvailableReplicas", Value: workapiv1.FieldValue{ Type: workapiv1.Integer, - Integer: pointer.Int64(2), + Integer: ptr.To[int64](2), }, }, } @@ -541,7 +542,7 @@ var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { Name: "conditions", Value: workapiv1.FieldValue{ Type: workapiv1.JsonRaw, - JsonRaw: pointer.String(`[{"lastTransitionTime":null,"lastUpdateTime":null,"status":"True","type":"Available"}]`), + JsonRaw: ptr.To[string](`[{"lastTransitionTime":null,"lastUpdateTime":null,"status":"True","type":"Available"}]`), }, }, } diff --git a/test/integration/work/suite_test.go b/test/integration/work/suite_test.go index b114783ae..2c9477626 100644 --- a/test/integration/work/suite_test.go +++ b/test/integration/work/suite_test.go @@ -33,17 +33,24 @@ const ( cm1, cm2 = "cm1", "cm2" ) +// focus on hub is a kube cluster +const sourceDriver = "kube" + var tempDir string -var hubKubeconfigFileName string -var spokeRestConfig *rest.Config + var testEnv *envtest.Environment -var spokeKubeClient kubernetes.Interface -var spokeWorkClient workclientset.Interface +var envCtx context.Context +var envCancel context.CancelFunc + +var sourceConfigFileName string var hubWorkClient workclientset.Interface + var hubClusterClient clusterclientset.Interface var hubHash string -var envCtx context.Context -var envCancel context.CancelFunc + +var spokeRestConfig *rest.Config +var spokeKubeClient kubernetes.Interface +var spokeWorkClient workclientset.Interface var CRDPaths = []string{ // hub @@ -78,8 +85,9 @@ var _ = ginkgo.BeforeSuite(func() { tempDir, err = os.MkdirTemp("", "test") gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Expect(tempDir).ToNot(gomega.BeEmpty()) - hubKubeconfigFileName = path.Join(tempDir, "kubeconfig") - err = util.CreateKubeconfigFile(cfg, hubKubeconfigFileName) + + sourceConfigFileName = path.Join(tempDir, "kubeconfig") + err = util.CreateKubeconfigFile(cfg, sourceConfigFileName) gomega.Expect(err).ToNot(gomega.HaveOccurred()) err = workapiv1.Install(scheme.Scheme) diff --git a/test/integration/work/unmanaged_appliedwork_test.go b/test/integration/work/unmanaged_appliedwork_test.go index c6d8c5901..3e76fbee7 100644 --- a/test/integration/work/unmanaged_appliedwork_test.go +++ b/test/integration/work/unmanaged_appliedwork_test.go @@ -38,9 +38,10 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() { o = spoke.NewWorkloadAgentOptions() o.StatusSyncInterval = 3 * time.Second o.AppliedManifestWorkEvictionGracePeriod = 10 * time.Second + o.WorkloadSourceDriver.Type = sourceDriver + o.WorkloadSourceDriver.Config = sourceConfigFileName commOptions = commonoptions.NewAgentOptions() - commOptions.HubKubeconfigFile = hubKubeconfigFileName commOptions.SpokeClusterName = utilrand.String(5) commOptions.AgentID = utilrand.String(5) @@ -133,9 +134,10 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() { newOption := spoke.NewWorkloadAgentOptions() newOption.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second + newOption.WorkloadSourceDriver.Type = sourceDriver + newOption.WorkloadSourceDriver.Config = newHubKubeConfigFile newCommonOptions := commonoptions.NewAgentOptions() - newCommonOptions.HubKubeconfigFile = newHubKubeConfigFile newCommonOptions.SpokeClusterName = commOptions.SpokeClusterName newCommonOptions.AgentID = utilrand.String(5) @@ -180,9 +182,10 @@ var _ = ginkgo.Describe("Unmanaged ApplieManifestWork", func() { newOption := spoke.NewWorkloadAgentOptions() newOption.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second + newOption.WorkloadSourceDriver.Type = sourceDriver + newOption.WorkloadSourceDriver.Config = newHubKubeConfigFile newCommonOptions := commonoptions.NewAgentOptions() - newCommonOptions.HubKubeconfigFile = newHubKubeConfigFile newCommonOptions.SpokeClusterName = commOptions.SpokeClusterName newCommonOptions.AgentID = commOptions.AgentID diff --git a/test/integration/work/updatestrategy_test.go b/test/integration/work/updatestrategy_test.go index 1327e60cd..2d8a770fb 100644 --- a/test/integration/work/updatestrategy_test.go +++ b/test/integration/work/updatestrategy_test.go @@ -12,7 +12,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" utilrand "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" workapiv1 "open-cluster-management.io/api/work/v1" @@ -34,9 +34,10 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { ginkgo.BeforeEach(func() { o = spoke.NewWorkloadAgentOptions() o.StatusSyncInterval = 3 * time.Second + o.WorkloadSourceDriver.Type = sourceDriver + o.WorkloadSourceDriver.Config = sourceConfigFileName commOptions = commonoptions.NewAgentOptions() - commOptions.HubKubeconfigFile = hubKubeconfigFileName commOptions.SpokeClusterName = utilrand.String(5) ns := &corev1.Namespace{} @@ -210,7 +211,7 @@ var _ = ginkgo.Describe("ManifestWork Update Strategy", func() { patch, err := object.MarshalJSON() gomega.Expect(err).ToNot(gomega.HaveOccurred()) _, err = spokeKubeClient.AppsV1().Deployments(commOptions.SpokeClusterName).Patch( - context.Background(), "deploy1", types.ApplyPatchType, patch, metav1.PatchOptions{Force: pointer.Bool(true), FieldManager: "test-integration"}) + context.Background(), "deploy1", types.ApplyPatchType, patch, metav1.PatchOptions{Force: ptr.To[bool](true), FieldManager: "test-integration"}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // Update deployment by work diff --git a/test/integration/work/work_test.go b/test/integration/work/work_test.go index 3bbc4a538..0397184c5 100644 --- a/test/integration/work/work_test.go +++ b/test/integration/work/work_test.go @@ -48,9 +48,10 @@ var _ = ginkgo.Describe("ManifestWork", func() { o = spoke.NewWorkloadAgentOptions() o.StatusSyncInterval = 3 * time.Second o.AppliedManifestWorkEvictionGracePeriod = 5 * time.Second + o.WorkloadSourceDriver.Type = sourceDriver + o.WorkloadSourceDriver.Config = sourceConfigFileName commOptions = commonoptions.NewAgentOptions() - commOptions.HubKubeconfigFile = hubKubeconfigFileName commOptions.SpokeClusterName = utilrand.String(5) ns := &corev1.Namespace{} @@ -159,7 +160,8 @@ var _ = ginkgo.Describe("ManifestWork", func() { err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - util.AssertWorkDeleted(work.Namespace, work.Name, hubHash, manifests, hubWorkClient, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + util.AssertWorkDeleted(work.Namespace, work.Name, fmt.Sprintf("%s-%s", hubHash, work.Name), + manifests, hubWorkClient, spokeWorkClient, spokeKubeClient, eventuallyTimeout, eventuallyInterval) }) }) @@ -233,7 +235,8 @@ var _ = ginkgo.Describe("ManifestWork", func() { err = hubWorkClient.WorkV1().ManifestWorks(commOptions.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - util.AssertWorkDeleted(work.Namespace, work.Name, hubHash, manifests, hubWorkClient, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + util.AssertWorkDeleted(work.Namespace, work.Name, fmt.Sprintf("%s-%s", hubHash, work.Name), + manifests, hubWorkClient, spokeWorkClient, spokeKubeClient, eventuallyTimeout, eventuallyInterval) }) }) @@ -701,7 +704,8 @@ var _ = ginkgo.Describe("ManifestWork", func() { } }() - util.AssertWorkDeleted(work.Namespace, work.Name, hubHash, manifests, hubWorkClient, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + util.AssertWorkDeleted(work.Namespace, work.Name, fmt.Sprintf("%s-%s", hubHash, work.Name), manifests, + hubWorkClient, spokeWorkClient, spokeKubeClient, eventuallyTimeout, eventuallyInterval) }) ginkgo.It("should delete applied manifest work if it is orphan", func() { diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/LICENSE b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/message.go b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/message.go new file mode 100644 index 000000000..8dd938545 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/message.go @@ -0,0 +1,119 @@ +/* + Copyright 2023 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package mqtt_paho + +import ( + "bytes" + "context" + "strings" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/eclipse/paho.golang/paho" +) + +const ( + prefix = "ce-" + contentType = "Content-Type" +) + +var specs = spec.WithPrefix(prefix) + +// Message represents a MQTT message. +// This message *can* be read several times safely +type Message struct { + internal *paho.Publish + version spec.Version + format format.Format +} + +// Check if Message implements binding.Message +var ( + _ binding.Message = (*Message)(nil) + _ binding.MessageMetadataReader = (*Message)(nil) +) + +func NewMessage(msg *paho.Publish) *Message { + var f format.Format + var v spec.Version + if msg.Properties != nil { + // Use properties.User["Content-type"] to determine if message is structured + if s := msg.Properties.User.Get(contentType); format.IsFormat(s) { + f = format.Lookup(s) + } else if s := msg.Properties.User.Get(specs.PrefixedSpecVersionName()); s != "" { + v = specs.Version(s) + } + } + return &Message{ + internal: msg, + version: v, + format: f, + } +} + +func (m *Message) ReadEncoding() binding.Encoding { + if m.version != nil { + return binding.EncodingBinary + } + if m.format != nil { + return binding.EncodingStructured + } + return binding.EncodingUnknown +} + +func (m *Message) ReadStructured(ctx context.Context, encoder binding.StructuredWriter) error { + if m.version != nil { + return binding.ErrNotStructured + } + if m.format == nil { + return binding.ErrNotStructured + } + return encoder.SetStructuredEvent(ctx, m.format, bytes.NewReader(m.internal.Payload)) +} + +func (m *Message) ReadBinary(ctx context.Context, encoder binding.BinaryWriter) (err error) { + if m.format != nil { + return binding.ErrNotBinary + } + + for _, userProperty := range m.internal.Properties.User { + if strings.HasPrefix(userProperty.Key, prefix) { + attr := m.version.Attribute(userProperty.Key) + if attr != nil { + err = encoder.SetAttribute(attr, userProperty.Value) + } else { + err = encoder.SetExtension(strings.TrimPrefix(userProperty.Key, prefix), userProperty.Value) + } + } else if userProperty.Key == contentType { + err = encoder.SetAttribute(m.version.AttributeFromKind(spec.DataContentType), string(userProperty.Value)) + } + if err != nil { + return + } + } + + if m.internal.Payload != nil { + return encoder.SetData(bytes.NewBuffer(m.internal.Payload)) + } + return nil +} + +func (m *Message) Finish(error) error { + return nil +} + +func (m *Message) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) { + attr := m.version.AttributeFromKind(k) + if attr != nil { + return attr, m.internal.Properties.User.Get(prefix + attr.Name()) + } + return nil, nil +} + +func (m *Message) GetExtension(name string) interface{} { + return m.internal.Properties.User.Get(prefix + name) +} diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/option.go b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/option.go new file mode 100644 index 000000000..955a16219 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/option.go @@ -0,0 +1,48 @@ +/* + Copyright 2023 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package mqtt_paho + +import ( + "fmt" + + "github.com/eclipse/paho.golang/paho" +) + +// Option is the function signature required to be considered an mqtt_paho.Option. +type Option func(*Protocol) error + +// WithConnect sets the paho.Connect configuration for the client. This option is not required. +func WithConnect(connOpt *paho.Connect) Option { + return func(p *Protocol) error { + if connOpt == nil { + return fmt.Errorf("the paho.Connect option must not be nil") + } + p.connOption = connOpt + return nil + } +} + +// WithPublish sets the paho.Publish configuration for the client. This option is required if you want to send messages. +func WithPublish(publishOpt *paho.Publish) Option { + return func(p *Protocol) error { + if publishOpt == nil { + return fmt.Errorf("the paho.Publish option must not be nil") + } + p.publishOption = publishOpt + return nil + } +} + +// WithSubscribe sets the paho.Subscribe configuration for the client. This option is required if you want to receive messages. +func WithSubscribe(subscribeOpt *paho.Subscribe) Option { + return func(p *Protocol) error { + if subscribeOpt == nil { + return fmt.Errorf("the paho.Subscribe option must not be nil") + } + p.subscribeOption = subscribeOpt + return nil + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/protocol.go b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/protocol.go new file mode 100644 index 000000000..261fc6c37 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/protocol.go @@ -0,0 +1,155 @@ +/* + Copyright 2023 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package mqtt_paho + +import ( + "context" + "fmt" + "io" + "sync" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/protocol" + "github.com/eclipse/paho.golang/paho" + + cecontext "github.com/cloudevents/sdk-go/v2/context" +) + +type Protocol struct { + client *paho.Client + config *paho.ClientConfig + connOption *paho.Connect + publishOption *paho.Publish + subscribeOption *paho.Subscribe + + // receiver + incoming chan *paho.Publish + // inOpen + openerMutex sync.Mutex + + closeChan chan struct{} +} + +var ( + _ protocol.Sender = (*Protocol)(nil) + _ protocol.Opener = (*Protocol)(nil) + _ protocol.Receiver = (*Protocol)(nil) + _ protocol.Closer = (*Protocol)(nil) +) + +func New(ctx context.Context, config *paho.ClientConfig, opts ...Option) (*Protocol, error) { + if config == nil { + return nil, fmt.Errorf("the paho.ClientConfig must not be nil") + } + + p := &Protocol{ + client: paho.NewClient(*config), + // default connect option + connOption: &paho.Connect{ + KeepAlive: 30, + CleanStart: true, + }, + incoming: make(chan *paho.Publish), + closeChan: make(chan struct{}), + } + if err := p.applyOptions(opts...); err != nil { + return nil, err + } + + // Connect to the MQTT broker + connAck, err := p.client.Connect(ctx, p.connOption) + if err != nil { + return nil, err + } + if connAck.ReasonCode != 0 { + return nil, fmt.Errorf("failed to connect to %q : %d - %q", p.client.Conn.RemoteAddr(), connAck.ReasonCode, + connAck.Properties.ReasonString) + } + + return p, nil +} + +func (p *Protocol) applyOptions(opts ...Option) error { + for _, fn := range opts { + if err := fn(p); err != nil { + return err + } + } + return nil +} + +func (p *Protocol) Send(ctx context.Context, m binding.Message, transformers ...binding.Transformer) error { + if p.publishOption == nil { + return fmt.Errorf("the paho.Publish option must not be nil") + } + + var err error + defer m.Finish(err) + + msg := p.publishOption + if cecontext.TopicFrom(ctx) != "" { + msg.Topic = cecontext.TopicFrom(ctx) + cecontext.WithTopic(ctx, "") + } + + err = WritePubMessage(ctx, m, msg, transformers...) + if err != nil { + return err + } + + _, err = p.client.Publish(ctx, msg) + if err != nil { + return err + } + return err +} + +func (p *Protocol) OpenInbound(ctx context.Context) error { + if p.subscribeOption == nil { + return fmt.Errorf("the paho.Subscribe option must not be nil") + } + + p.openerMutex.Lock() + defer p.openerMutex.Unlock() + + logger := cecontext.LoggerFrom(ctx) + + p.client.Router = paho.NewSingleHandlerRouter(func(m *paho.Publish) { + p.incoming <- m + }) + + logger.Infof("subscribing to topics: %v", p.subscribeOption.Subscriptions) + _, err := p.client.Subscribe(ctx, p.subscribeOption) + if err != nil { + return err + } + + // Wait until external or internal context done + select { + case <-ctx.Done(): + case <-p.closeChan: + } + return p.client.Disconnect(&paho.Disconnect{ReasonCode: 0}) +} + +// Receive implements Receiver.Receive +func (p *Protocol) Receive(ctx context.Context) (binding.Message, error) { + select { + case m, ok := <-p.incoming: + if !ok { + return nil, io.EOF + } + msg := NewMessage(m) + return msg, nil + case <-ctx.Done(): + return nil, io.EOF + } +} + +func (p *Protocol) Close(ctx context.Context) error { + close(p.closeChan) + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/write_message.go b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/write_message.go new file mode 100644 index 000000000..a4b87f4aa --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/write_message.go @@ -0,0 +1,133 @@ +/* +Copyright 2023 The CloudEvents Authors +SPDX-License-Identifier: Apache-2.0 +*/ + +package mqtt_paho + +import ( + "bytes" + "context" + "io" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/types" + "github.com/eclipse/paho.golang/paho" +) + +// WritePubMessage fills the provided pubMessage with the message m. +// Using context you can tweak the encoding processing (more details on binding.Write documentation). +func WritePubMessage(ctx context.Context, m binding.Message, pubMessage *paho.Publish, transformers ...binding.Transformer) error { + structuredWriter := (*pubMessageWriter)(pubMessage) + binaryWriter := (*pubMessageWriter)(pubMessage) + + _, err := binding.Write( + ctx, + m, + structuredWriter, + binaryWriter, + transformers..., + ) + return err +} + +type pubMessageWriter paho.Publish + +var ( + _ binding.StructuredWriter = (*pubMessageWriter)(nil) + _ binding.BinaryWriter = (*pubMessageWriter)(nil) +) + +func (b *pubMessageWriter) SetStructuredEvent(ctx context.Context, f format.Format, event io.Reader) error { + if b.Properties == nil { + b.Properties = &paho.PublishProperties{ + User: make([]paho.UserProperty, 0), + } + } + b.Properties.User.Add(contentType, f.MediaType()) + var buf bytes.Buffer + _, err := io.Copy(&buf, event) + if err != nil { + return err + } + b.Payload = buf.Bytes() + return nil +} + +func (b *pubMessageWriter) Start(ctx context.Context) error { + if b.Properties == nil { + b.Properties = &paho.PublishProperties{} + } + // the UserProperties of publish message is used to load event extensions + b.Properties.User = make([]paho.UserProperty, 0) + return nil +} + +func (b *pubMessageWriter) End(ctx context.Context) error { + return nil +} + +func (b *pubMessageWriter) SetData(reader io.Reader) error { + buf, ok := reader.(*bytes.Buffer) + if !ok { + buf = new(bytes.Buffer) + _, err := io.Copy(buf, reader) + if err != nil { + return err + } + } + b.Payload = buf.Bytes() + return nil +} + +func (b *pubMessageWriter) SetAttribute(attribute spec.Attribute, value interface{}) error { + if attribute.Kind() == spec.DataContentType { + if value == nil { + b.removeProperty(contentType) + } + s, err := types.Format(value) + if err != nil { + return err + } + if err := b.addProperty(contentType, s); err != nil { + return err + } + } else { + if value == nil { + b.removeProperty(prefix + attribute.Name()) + } + return b.addProperty(prefix+attribute.Name(), value) + } + return nil +} + +func (b *pubMessageWriter) SetExtension(name string, value interface{}) error { + if value == nil { + b.removeProperty(prefix + name) + } + return b.addProperty(prefix+name, value) +} + +func (b *pubMessageWriter) removeProperty(key string) { + for i, v := range b.Properties.User { + if v.Key == key { + b.Properties.User = append(b.Properties.User[:i], b.Properties.User[i+1:]...) + break + } + } +} + +func (b *pubMessageWriter) addProperty(key string, value interface{}) error { + s, err := types.Format(value) + if err != nil { + return err + } + + b.Properties.User = append(b.Properties.User, paho.UserProperty{ + Key: key, + Value: s, + }) + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/LICENSE b/vendor/github.com/cloudevents/sdk-go/v2/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/alias.go b/vendor/github.com/cloudevents/sdk-go/v2/alias.go new file mode 100644 index 000000000..2fbfaa9a7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/alias.go @@ -0,0 +1,187 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +// Package v2 reexports a subset of the SDK v2 API. +package v2 + +// Package cloudevents alias' common functions and types to improve discoverability and reduce +// the number of imports for simple HTTP clients. + +import ( + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/client" + "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" + "github.com/cloudevents/sdk-go/v2/protocol/http" + "github.com/cloudevents/sdk-go/v2/types" +) + +// Client + +type ClientOption = client.Option +type Client = client.Client + +// Event + +type Event = event.Event +type Result = protocol.Result + +// Context + +type EventContext = event.EventContext +type EventContextV1 = event.EventContextV1 +type EventContextV03 = event.EventContextV03 + +// Custom Types + +type Timestamp = types.Timestamp +type URIRef = types.URIRef + +// HTTP Protocol + +type HTTPOption = http.Option + +type HTTPProtocol = http.Protocol + +// Encoding + +type Encoding = binding.Encoding + +// Message + +type Message = binding.Message + +const ( + // ReadEncoding + + ApplicationXML = event.ApplicationXML + ApplicationJSON = event.ApplicationJSON + TextPlain = event.TextPlain + ApplicationCloudEventsJSON = event.ApplicationCloudEventsJSON + ApplicationCloudEventsBatchJSON = event.ApplicationCloudEventsBatchJSON + Base64 = event.Base64 + + // Event Versions + + VersionV1 = event.CloudEventsVersionV1 + VersionV03 = event.CloudEventsVersionV03 + + // Encoding + + EncodingBinary = binding.EncodingBinary + EncodingStructured = binding.EncodingStructured +) + +var ( + + // ContentType Helpers + + StringOfApplicationJSON = event.StringOfApplicationJSON + StringOfApplicationXML = event.StringOfApplicationXML + StringOfTextPlain = event.StringOfTextPlain + StringOfApplicationCloudEventsJSON = event.StringOfApplicationCloudEventsJSON + StringOfApplicationCloudEventsBatchJSON = event.StringOfApplicationCloudEventsBatchJSON + StringOfBase64 = event.StringOfBase64 + + // Client Creation + + NewClient = client.New + NewClientHTTP = client.NewHTTP + // Deprecated: please use New with the observability options. + NewClientObserved = client.NewObserved + // Deprecated: Please use NewClientHTTP with the observability options. + NewDefaultClient = client.NewDefault + NewHTTPReceiveHandler = client.NewHTTPReceiveHandler + + // Client Options + + WithEventDefaulter = client.WithEventDefaulter + WithUUIDs = client.WithUUIDs + WithTimeNow = client.WithTimeNow + // Deprecated: this is now noop and will be removed in future releases. + WithTracePropagation = client.WithTracePropagation() + + // Event Creation + + NewEvent = event.New + + // Results + + NewResult = protocol.NewResult + ResultIs = protocol.ResultIs + ResultAs = protocol.ResultAs + + // Receipt helpers + + NewReceipt = protocol.NewReceipt + + ResultACK = protocol.ResultACK + ResultNACK = protocol.ResultNACK + + IsACK = protocol.IsACK + IsNACK = protocol.IsNACK + IsUndelivered = protocol.IsUndelivered + + // HTTP Results + + NewHTTPResult = http.NewResult + NewHTTPRetriesResult = http.NewRetriesResult + + // Message Creation + + ToMessage = binding.ToMessage + + // Event Creation + + NewEventFromHTTPRequest = http.NewEventFromHTTPRequest + NewEventFromHTTPResponse = http.NewEventFromHTTPResponse + NewEventsFromHTTPRequest = http.NewEventsFromHTTPRequest + NewEventsFromHTTPResponse = http.NewEventsFromHTTPResponse + NewHTTPRequestFromEvent = http.NewHTTPRequestFromEvent + NewHTTPRequestFromEvents = http.NewHTTPRequestFromEvents + IsHTTPBatch = http.IsHTTPBatch + + // HTTP Messages + + WriteHTTPRequest = http.WriteRequest + + // Context + + ContextWithTarget = context.WithTarget + TargetFromContext = context.TargetFrom + ContextWithRetriesConstantBackoff = context.WithRetriesConstantBackoff + ContextWithRetriesLinearBackoff = context.WithRetriesLinearBackoff + ContextWithRetriesExponentialBackoff = context.WithRetriesExponentialBackoff + + WithEncodingBinary = binding.WithForceBinary + WithEncodingStructured = binding.WithForceStructured + + // Custom Types + + ParseTimestamp = types.ParseTimestamp + ParseURIRef = types.ParseURIRef + ParseURI = types.ParseURI + + // HTTP Protocol + + NewHTTP = http.New + + // HTTP Protocol Options + + WithTarget = http.WithTarget + WithHeader = http.WithHeader + WithShutdownTimeout = http.WithShutdownTimeout + //WithEncoding = http.WithEncoding + //WithStructuredEncoding = http.WithStructuredEncoding // TODO: expose new way + WithPort = http.WithPort + WithPath = http.WithPath + WithMiddleware = http.WithMiddleware + WithListener = http.WithListener + WithRoundTripper = http.WithRoundTripper + WithGetHandlerFunc = http.WithGetHandlerFunc + WithOptionsHandlerFunc = http.WithOptionsHandlerFunc + WithDefaultOptionsHandlerFunc = http.WithDefaultOptionsHandlerFunc +) diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go new file mode 100644 index 000000000..97f2c4dd7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go @@ -0,0 +1,52 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "context" + "io" + + "github.com/cloudevents/sdk-go/v2/binding/spec" +) + +// MessageMetadataWriter is used to set metadata when a binary Message is visited. +type MessageMetadataWriter interface { + // Set a standard attribute. + // + // The value can either be the correct golang type for the attribute, or a canonical + // string encoding, or nil. If value is nil, then the attribute should be deleted. + // See package types to perform the needed conversions. + SetAttribute(attribute spec.Attribute, value interface{}) error + + // Set an extension attribute. + // + // The value can either be the correct golang type for the attribute, or a canonical + // string encoding, or nil. If value is nil, then the extension should be deleted. + // See package types to perform the needed conversions. + SetExtension(name string, value interface{}) error +} + +// BinaryWriter is used to visit a binary Message and generate a new representation. +// +// Protocols that supports binary encoding should implement this interface to implement direct +// binary to binary encoding and event to binary encoding. +// +// Start() and End() methods must be invoked by the caller of Message.ReadBinary() every time +// the BinaryWriter implementation is used to visit a Message. +type BinaryWriter interface { + MessageMetadataWriter + + // Method invoked at the beginning of the visit. Useful to perform initial memory allocations + Start(ctx context.Context) error + + // SetData receives an io.Reader for the data attribute. + // io.Reader is not invoked when the data attribute is empty + SetData(data io.Reader) error + + // End method is invoked only after the whole encoding process ends successfully. + // If it fails, it's never invoked. It can be used to finalize the message. + End(ctx context.Context) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go new file mode 100644 index 000000000..8fa999789 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go @@ -0,0 +1,68 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* + +Package binding defines interfaces for protocol bindings. + +NOTE: Most applications that emit or consume events should use the ../client +package, which provides a simpler API to the underlying binding. + +The interfaces in this package provide extra encoding and protocol information +to allow efficient forwarding and end-to-end reliable delivery between a +Receiver and a Sender belonging to different bindings. This is useful for +intermediary applications that route or forward events, but not necessary for +most "endpoint" applications that emit or consume events. + +Protocol Bindings + +A protocol binding usually implements a Message, a Sender and Receiver, a StructuredWriter and a BinaryWriter (depending on the supported encodings of the protocol) and an Write[ProtocolMessage] method. + +Read and write events + +The core of this package is the binding.Message interface. +Through binding.MessageReader It defines how to read a protocol specific message for an +encoded event in structured mode or binary mode. +The entity who receives a protocol specific data structure representing a message +(e.g. an HttpRequest) encapsulates it in a binding.Message implementation using a NewMessage method (e.g. http.NewMessage). +Then the entity that wants to send the binding.Message back on the wire, +translates it back to the protocol specific data structure (e.g. a Kafka ConsumerMessage), using +the writers BinaryWriter and StructuredWriter specific to that protocol. +Binding implementations exposes their writers +through a specific Write[ProtocolMessage] function (e.g. kafka.EncodeProducerMessage), +in order to simplify the encoding process. + +The encoding process can be customized in order to mutate the final result with binding.TransformerFactory. +A bunch of these are provided directly by the binding/transformer module. + +Usually binding.Message implementations can be encoded only one time, because the encoding process drain the message itself. +In order to consume a message several times, the binding/buffering package provides several APIs to buffer the Message. + +A message can be converted to an event.Event using binding.ToEvent() method. +An event.Event can be used as Message casting it to binding.EventMessage. + +In order to simplify the encoding process for each protocol, this package provide several utility methods like binding.Write and binding.DirectWrite. +The binding.Write method tries to preserve the structured/binary encoding, in order to be as much efficient as possible. + +Messages can be eventually wrapped to change their behaviours and binding their lifecycle, like the binding.FinishMessage. +Every Message wrapper implements the MessageWrapper interface + +Sender and Receiver + +A Receiver receives protocol specific messages and wraps them to into binding.Message implementations. + +A Sender converts arbitrary Message implementations to a protocol-specific form using the protocol specific Write method +and sends them. + +Message and ExactlyOnceMessage provide methods to allow acknowledgments to +propagate when a reliable messages is forwarded from a Receiver to a Sender. +QoS 0 (unreliable), 1 (at-least-once) and 2 (exactly-once) are supported. + +Transport + +A binding implementation providing Sender and Receiver implementations can be used as a Transport through the BindingTransport adapter. + +*/ +package binding diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go new file mode 100644 index 000000000..5070b7295 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go @@ -0,0 +1,50 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import "errors" + +// Encoding enum specifies the type of encodings supported by binding interfaces +type Encoding int + +const ( + // Binary encoding as specified in https://github.com/cloudevents/spec/blob/master/spec.md#message + EncodingBinary Encoding = iota + // Structured encoding as specified in https://github.com/cloudevents/spec/blob/master/spec.md#message + EncodingStructured + // Message is an instance of EventMessage or it contains EventMessage nested (through MessageWrapper) + EncodingEvent + // When the encoding is unknown (which means that the message is a non-event) + EncodingUnknown + + // EncodingBatch is an instance of JSON Batched Events + EncodingBatch +) + +func (e Encoding) String() string { + switch e { + case EncodingBinary: + return "binary" + case EncodingStructured: + return "structured" + case EncodingEvent: + return "event" + case EncodingBatch: + return "batch" + case EncodingUnknown: + return "unknown" + } + return "" +} + +// ErrUnknownEncoding specifies that the Message is not an event or it is encoded with an unknown encoding +var ErrUnknownEncoding = errors.New("unknown Message encoding") + +// ErrNotStructured returned by Message.Structured for non-structured messages. +var ErrNotStructured = errors.New("message is not in structured mode") + +// ErrNotBinary returned by Message.Binary for non-binary messages. +var ErrNotBinary = errors.New("message is not in binary mode") diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go new file mode 100644 index 000000000..f82c729c4 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go @@ -0,0 +1,108 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "bytes" + "context" + + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/event" +) + +type eventFormatKey int + +const ( + formatEventStructured eventFormatKey = iota +) + +// EventMessage type-converts a event.Event object to implement Message. +// This allows local event.Event objects to be sent directly via Sender.Send() +// s.Send(ctx, binding.EventMessage(e)) +// When an event is wrapped into a EventMessage, the original event could be +// potentially mutated. If you need to use the Event again, after wrapping it into +// an Event message, you should copy it before +type EventMessage event.Event + +func ToMessage(e *event.Event) Message { + return (*EventMessage)(e) +} + +func (m *EventMessage) ReadEncoding() Encoding { + return EncodingEvent +} + +func (m *EventMessage) ReadStructured(ctx context.Context, builder StructuredWriter) error { + f := GetOrDefaultFromCtx(ctx, formatEventStructured, format.JSON).(format.Format) + b, err := f.Marshal((*event.Event)(m)) + if err != nil { + return err + } + return builder.SetStructuredEvent(ctx, f, bytes.NewReader(b)) +} + +func (m *EventMessage) ReadBinary(ctx context.Context, b BinaryWriter) (err error) { + err = eventContextToBinaryWriter(m.Context, b) + if err != nil { + return err + } + // Pass the body + body := (*event.Event)(m).Data() + if len(body) > 0 { + err = b.SetData(bytes.NewBuffer(body)) + if err != nil { + return err + } + } + return nil +} + +func (m *EventMessage) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) { + sv := spec.VS.Version(m.Context.GetSpecVersion()) + a := sv.AttributeFromKind(k) + if a != nil { + return a, a.Get(m.Context) + } + return nil, nil +} + +func (m *EventMessage) GetExtension(name string) interface{} { + ext, _ := m.Context.GetExtension(name) + return ext +} + +func eventContextToBinaryWriter(c event.EventContext, b BinaryWriter) (err error) { + // Pass all attributes + sv := spec.VS.Version(c.GetSpecVersion()) + for _, a := range sv.Attributes() { + value := a.Get(c) + if value != nil { + err = b.SetAttribute(a, value) + } + if err != nil { + return err + } + } + // Pass all extensions + for k, v := range c.GetExtensions() { + err = b.SetExtension(k, v) + if err != nil { + return err + } + } + return nil +} + +func (*EventMessage) Finish(error) error { return nil } + +var _ Message = (*EventMessage)(nil) // Test it conforms to the interface +var _ MessageMetadataReader = (*EventMessage)(nil) // Test it conforms to the interface + +// UseFormatForEvent configures which format to use when marshalling the event to structured mode +func UseFormatForEvent(ctx context.Context, f format.Format) context.Context { + return context.WithValue(ctx, formatEventStructured, f) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go new file mode 100644 index 000000000..8b51c4c61 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go @@ -0,0 +1,42 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import "github.com/cloudevents/sdk-go/v2/binding/spec" + +type finishMessage struct { + Message + finish func(error) +} + +func (m *finishMessage) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) { + return m.Message.(MessageMetadataReader).GetAttribute(k) +} + +func (m *finishMessage) GetExtension(s string) interface{} { + return m.Message.(MessageMetadataReader).GetExtension(s) +} + +func (m *finishMessage) GetWrappedMessage() Message { + return m.Message +} + +func (m *finishMessage) Finish(err error) error { + err2 := m.Message.Finish(err) // Finish original message first + if m.finish != nil { + m.finish(err) // Notify callback + } + return err2 +} + +var _ MessageWrapper = (*finishMessage)(nil) + +// WithFinish returns a wrapper for m that calls finish() and +// m.Finish() in its Finish(). +// Allows code to be notified when a message is Finished. +func WithFinish(m Message, finish func(error)) Message { + return &finishMessage{Message: m, finish: finish} +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go new file mode 100644 index 000000000..54c3f1a8c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go @@ -0,0 +1,12 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package format formats structured events. + +The "application/cloudevents+json" format is built-in and always +available. Other formats may be added. +*/ +package format diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go new file mode 100644 index 000000000..6bdd1842b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go @@ -0,0 +1,105 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package format + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + + "github.com/cloudevents/sdk-go/v2/event" +) + +// Format marshals and unmarshals structured events to bytes. +type Format interface { + // MediaType identifies the format + MediaType() string + // Marshal event to bytes + Marshal(*event.Event) ([]byte, error) + // Unmarshal bytes to event + Unmarshal([]byte, *event.Event) error +} + +// Prefix for event-format media types. +const Prefix = "application/cloudevents" + +// IsFormat returns true if mediaType begins with "application/cloudevents" +func IsFormat(mediaType string) bool { return strings.HasPrefix(mediaType, Prefix) } + +// JSON is the built-in "application/cloudevents+json" format. +var JSON = jsonFmt{} + +type jsonFmt struct{} + +func (jsonFmt) MediaType() string { return event.ApplicationCloudEventsJSON } + +func (jsonFmt) Marshal(e *event.Event) ([]byte, error) { return json.Marshal(e) } +func (jsonFmt) Unmarshal(b []byte, e *event.Event) error { + return json.Unmarshal(b, e) +} + +// JSONBatch is the built-in "application/cloudevents-batch+json" format. +var JSONBatch = jsonBatchFmt{} + +type jsonBatchFmt struct{} + +func (jb jsonBatchFmt) MediaType() string { + return event.ApplicationCloudEventsBatchJSON +} + +// Marshal will return an error for jsonBatchFmt since the Format interface doesn't support batch Marshalling, and we +// know it's structured batch json, we'll go direct to the json.UnMarshall() (see `ToEvents()`) since that is the best +// way to support batch operations for now. +func (jb jsonBatchFmt) Marshal(e *event.Event) ([]byte, error) { + return nil, errors.New("not supported for batch events") +} + +func (jb jsonBatchFmt) Unmarshal(b []byte, e *event.Event) error { + return errors.New("not supported for batch events") +} + +// built-in formats +var formats map[string]Format + +func init() { + formats = map[string]Format{} + Add(JSON) + Add(JSONBatch) +} + +// Lookup returns the format for contentType, or nil if not found. +func Lookup(contentType string) Format { + i := strings.IndexRune(contentType, ';') + if i == -1 { + i = len(contentType) + } + contentType = strings.TrimSpace(strings.ToLower(contentType[0:i])) + return formats[contentType] +} + +func unknown(mediaType string) error { + return fmt.Errorf("unknown event format media-type %#v", mediaType) +} + +// Add a new Format. It can be retrieved by Lookup(f.MediaType()) +func Add(f Format) { formats[f.MediaType()] = f } + +// Marshal an event to bytes using the mediaType event format. +func Marshal(mediaType string, e *event.Event) ([]byte, error) { + if f := formats[mediaType]; f != nil { + return f.Marshal(e) + } + return nil, unknown(mediaType) +} + +// Unmarshal bytes to an event using the mediaType event format. +func Unmarshal(mediaType string, b []byte, e *event.Event) error { + if f := formats[mediaType]; f != nil { + return f.Unmarshal(b, e) + } + return unknown(mediaType) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go new file mode 100644 index 000000000..e30e150c0 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go @@ -0,0 +1,153 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/binding/spec" +) + +// MessageReader defines the read-related portion of the Message interface. +// +// The ReadStructured and ReadBinary methods allows to perform an optimized encoding of a Message to a specific data structure. +// +// If MessageReader.ReadEncoding() can be equal to EncodingBinary, then the implementation of MessageReader +// MUST also implement MessageMetadataReader. +// +// A Sender should try each method of interest and fall back to binding.ToEvent() if none are supported. +// An out of the box algorithm is provided for writing a message: binding.Write(). +type MessageReader interface { + // Return the type of the message Encoding. + // The encoding should be preferably computed when the message is constructed. + ReadEncoding() Encoding + + // ReadStructured transfers a structured-mode event to a StructuredWriter. + // It must return ErrNotStructured if message is not in structured mode. + // + // Returns a different err if something wrong happened while trying to read the structured event. + // In this case, the caller must Finish the message with appropriate error. + // + // This allows Senders to avoid re-encoding messages that are + // already in suitable structured form. + ReadStructured(context.Context, StructuredWriter) error + + // ReadBinary transfers a binary-mode event to an BinaryWriter. + // It must return ErrNotBinary if message is not in binary mode. + // + // The implementation of ReadBinary must not control the lifecycle with BinaryWriter.Start() and BinaryWriter.End(), + // because the caller must control the lifecycle. + // + // Returns a different err if something wrong happened while trying to read the binary event + // In this case, the caller must Finish the message with appropriate error + // + // This allows Senders to avoid re-encoding messages that are + // already in suitable binary form. + ReadBinary(context.Context, BinaryWriter) error +} + +// MessageMetadataReader defines how to read metadata from a binary/event message +// +// If a message implementing MessageReader is encoded as binary (MessageReader.ReadEncoding() == EncodingBinary) +// or it's an EventMessage, then it's safe to assume that it also implements this interface +type MessageMetadataReader interface { + // GetAttribute returns: + // + // * attribute, value: if the message contains an attribute of that attribute kind + // * attribute, nil: if the message spec version supports the attribute kind, but doesn't have any value + // * nil, nil: if the message spec version doesn't support the attribute kind + GetAttribute(attributeKind spec.Kind) (spec.Attribute, interface{}) + // GetExtension returns the value of that extension, if any. + GetExtension(name string) interface{} +} + +// Message is the interface to a binding-specific message containing an event. +// +// Reliable Delivery +// +// There are 3 reliable qualities of service for messages: +// +// 0/at-most-once/unreliable: messages can be dropped silently. +// +// 1/at-least-once: messages are not dropped without signaling an error +// to the sender, but they may be duplicated in the event of a re-send. +// +// 2/exactly-once: messages are never dropped (without error) or +// duplicated, as long as both sending and receiving ends maintain +// some binding-specific delivery state. Whether this is persisted +// depends on the configuration of the binding implementations. +// +// The Message interface supports QoS 0 and 1, the ExactlyOnceMessage interface +// supports QoS 2 +// +// Message includes the MessageReader interface to read messages. Every binding.Message implementation *must* specify if the message can be accessed one or more times. +// +// When a Message can be forgotten by the entity who produced the message, Message.Finish() *must* be invoked. +type Message interface { + MessageReader + + // Finish *must* be called when message from a Receiver can be forgotten by + // the receiver. A QoS 1 sender should not call Finish() until it gets an acknowledgment of + // receipt on the underlying transport. For QoS 2 see ExactlyOnceMessage. + // + // Note that, depending on the Message implementation, forgetting to Finish the message + // could produce memory/resources leaks! + // + // Passing a non-nil err indicates sending or processing failed. + // A non-nil return indicates that the message was not accepted + // by the receivers peer. + Finish(error) error +} + +// ExactlyOnceMessage is implemented by received Messages +// that support QoS 2. Only transports that support QoS 2 need to +// implement or use this interface. +type ExactlyOnceMessage interface { + Message + + // Received is called by a forwarding QoS2 Sender when it gets + // acknowledgment of receipt (e.g. AMQP 'accept' or MQTT PUBREC) + // + // The receiver must call settle(nil) when it get's the ack-of-ack + // (e.g. AMQP 'settle' or MQTT PUBCOMP) or settle(err) if the + // transfer fails. + // + // Finally the Sender calls Finish() to indicate the message can be + // discarded. + // + // If sending fails, or if the sender does not support QoS 2, then + // Finish() may be called without any call to Received() + Received(settle func(error)) +} + +// MessageContext interface exposes the internal context that a message might contain +// Only some Message implementations implement this interface. +type MessageContext interface { + // Get the context associated with this message + Context() context.Context +} + +// MessageWrapper interface is used to walk through a decorated Message and unwrap it. +type MessageWrapper interface { + Message + MessageMetadataReader + + // Method to get the wrapped message + GetWrappedMessage() Message +} + +func UnwrapMessage(message Message) Message { + m := message + for m != nil { + switch mt := m.(type) { + case MessageWrapper: + m = mt.GetWrappedMessage() + default: + return m + } + } + return m +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go new file mode 100644 index 000000000..3c3021d46 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go @@ -0,0 +1,141 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package spec + +import ( + "fmt" + "time" + + "github.com/cloudevents/sdk-go/v2/event" + + "github.com/cloudevents/sdk-go/v2/types" +) + +// Kind is a version-independent identifier for a CloudEvent context attribute. +type Kind uint8 + +const ( + // Required cloudevents attributes + ID Kind = iota + Source + SpecVersion + Type + // Optional cloudevents attributes + DataContentType + DataSchema + Subject + Time +) +const nAttrs = int(Time) + 1 + +var kindNames = [nAttrs]string{ + "id", + "source", + "specversion", + "type", + "datacontenttype", + "dataschema", + "subject", + "time", +} + +// String is a human-readable string, for a valid attribute name use Attribute.Name +func (k Kind) String() string { return kindNames[k] } + +// IsRequired returns true for attributes defined as "required" by the CE spec. +func (k Kind) IsRequired() bool { return k < DataContentType } + +// Attribute is a named attribute accessor. +// The attribute name is specific to a Version. +type Attribute interface { + Kind() Kind + // Name of the attribute with respect to the current spec Version() with prefix + PrefixedName() string + // Name of the attribute with respect to the current spec Version() + Name() string + // Version of the spec that this attribute belongs to + Version() Version + // Get the value of this attribute from an event context + Get(event.EventContextReader) interface{} + // Set the value of this attribute on an event context + Set(event.EventContextWriter, interface{}) error + // Delete this attribute from and event context, when possible + Delete(event.EventContextWriter) error +} + +// accessor provides Kind, Get, Set. +type accessor interface { + Kind() Kind + Get(event.EventContextReader) interface{} + Set(event.EventContextWriter, interface{}) error + Delete(event.EventContextWriter) error +} + +var acc = [nAttrs]accessor{ + &aStr{aKind(ID), event.EventContextReader.GetID, event.EventContextWriter.SetID}, + &aStr{aKind(Source), event.EventContextReader.GetSource, event.EventContextWriter.SetSource}, + &aStr{aKind(SpecVersion), event.EventContextReader.GetSpecVersion, func(writer event.EventContextWriter, s string) error { return nil }}, + &aStr{aKind(Type), event.EventContextReader.GetType, event.EventContextWriter.SetType}, + &aStr{aKind(DataContentType), event.EventContextReader.GetDataContentType, event.EventContextWriter.SetDataContentType}, + &aStr{aKind(DataSchema), event.EventContextReader.GetDataSchema, event.EventContextWriter.SetDataSchema}, + &aStr{aKind(Subject), event.EventContextReader.GetSubject, event.EventContextWriter.SetSubject}, + &aTime{aKind(Time), event.EventContextReader.GetTime, event.EventContextWriter.SetTime}, +} + +// aKind implements Kind() +type aKind Kind + +func (kind aKind) Kind() Kind { return Kind(kind) } + +type aStr struct { + aKind + get func(event.EventContextReader) string + set func(event.EventContextWriter, string) error +} + +func (a *aStr) Get(c event.EventContextReader) interface{} { + if s := a.get(c); s != "" { + return s + } + return nil // Treat blank as missing +} + +func (a *aStr) Set(c event.EventContextWriter, v interface{}) error { + s, err := types.ToString(v) + if err != nil { + return fmt.Errorf("invalid value for %s: %#v", a.Kind(), v) + } + return a.set(c, s) +} + +func (a *aStr) Delete(c event.EventContextWriter) error { + return a.set(c, "") +} + +type aTime struct { + aKind + get func(event.EventContextReader) time.Time + set func(event.EventContextWriter, time.Time) error +} + +func (a *aTime) Get(c event.EventContextReader) interface{} { + if v := a.get(c); !v.IsZero() { + return v + } + return nil // Treat zero time as missing. +} + +func (a *aTime) Set(c event.EventContextWriter, v interface{}) error { + t, err := types.ToTime(v) + if err != nil { + return fmt.Errorf("invalid value for %s: %#v", a.Kind(), v) + } + return a.set(c, t) +} + +func (a *aTime) Delete(c event.EventContextWriter) error { + return a.set(c, time.Time{}) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go new file mode 100644 index 000000000..44c0b3145 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go @@ -0,0 +1,13 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package spec provides spec-version metadata. + +For use by code that maps events using (prefixed) attribute name strings. +Supports handling multiple spec versions uniformly. + +*/ +package spec diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go new file mode 100644 index 000000000..110787ddc --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go @@ -0,0 +1,81 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package spec + +import ( + "github.com/cloudevents/sdk-go/v2/event" +) + +type matchExactVersion struct { + version +} + +func (v *matchExactVersion) Attribute(name string) Attribute { return v.attrMap[name] } + +var _ Version = (*matchExactVersion)(nil) + +func newMatchExactVersionVersion( + prefix string, + attributeNameMatchMapper func(string) string, + context event.EventContext, + convert func(event.EventContextConverter) event.EventContext, + attrs ...*attribute, +) *matchExactVersion { + v := &matchExactVersion{ + version: version{ + prefix: prefix, + context: context, + convert: convert, + attrMap: map[string]Attribute{}, + attrs: make([]Attribute, len(attrs)), + }, + } + for i, a := range attrs { + a.version = v + v.attrs[i] = a + v.attrMap[attributeNameMatchMapper(a.name)] = a + } + return v +} + +// WithPrefixMatchExact returns a set of versions with prefix added to all attribute names. +func WithPrefixMatchExact(attributeNameMatchMapper func(string) string, prefix string) *Versions { + attr := func(name string, kind Kind) *attribute { + return &attribute{accessor: acc[kind], name: name} + } + vs := &Versions{ + m: map[string]Version{}, + prefix: prefix, + all: []Version{ + newMatchExactVersionVersion(prefix, attributeNameMatchMapper, event.EventContextV1{}.AsV1(), + func(c event.EventContextConverter) event.EventContext { return c.AsV1() }, + attr("id", ID), + attr("source", Source), + attr("specversion", SpecVersion), + attr("type", Type), + attr("datacontenttype", DataContentType), + attr("dataschema", DataSchema), + attr("subject", Subject), + attr("time", Time), + ), + newMatchExactVersionVersion(prefix, attributeNameMatchMapper, event.EventContextV03{}.AsV03(), + func(c event.EventContextConverter) event.EventContext { return c.AsV03() }, + attr("specversion", SpecVersion), + attr("type", Type), + attr("source", Source), + attr("schemaurl", DataSchema), + attr("subject", Subject), + attr("id", ID), + attr("time", Time), + attr("datacontenttype", DataContentType), + ), + }, + } + for _, v := range vs.all { + vs.m[v.String()] = v + } + return vs +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go new file mode 100644 index 000000000..7fa0f5840 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go @@ -0,0 +1,189 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package spec + +import ( + "strings" + + "github.com/cloudevents/sdk-go/v2/event" +) + +// Version provides meta-data for a single spec-version. +type Version interface { + // String name of the version, e.g. "1.0" + String() string + // Prefix for attribute names. + Prefix() string + // Attribute looks up a prefixed attribute name (case insensitive). + // Returns nil if not found. + Attribute(prefixedName string) Attribute + // Attribute looks up the attribute from kind. + // Returns nil if not found. + AttributeFromKind(kind Kind) Attribute + // Attributes returns all the context attributes for this version. + Attributes() []Attribute + // Convert translates a context to this version. + Convert(event.EventContextConverter) event.EventContext + // NewContext returns a new context for this version. + NewContext() event.EventContext + // SetAttribute sets named attribute to value. + // + // Name is case insensitive. + // Does nothing if name does not start with prefix. + SetAttribute(context event.EventContextWriter, name string, value interface{}) error +} + +// Versions contains all known versions with the same attribute prefix. +type Versions struct { + prefix string + all []Version + m map[string]Version +} + +// Versions returns the list of all known versions, most recent first. +func (vs *Versions) Versions() []Version { return vs.all } + +// Version returns the named version. +func (vs *Versions) Version(name string) Version { + return vs.m[name] +} + +// Latest returns the latest Version +func (vs *Versions) Latest() Version { return vs.all[0] } + +// PrefixedSpecVersionName returns the specversion attribute PrefixedName +func (vs *Versions) PrefixedSpecVersionName() string { return vs.prefix + "specversion" } + +// Prefix is the lowercase attribute name prefix. +func (vs *Versions) Prefix() string { return vs.prefix } + +type attribute struct { + accessor + name string + version Version +} + +func (a *attribute) PrefixedName() string { return a.version.Prefix() + a.name } +func (a *attribute) Name() string { return a.name } +func (a *attribute) Version() Version { return a.version } + +type version struct { + prefix string + context event.EventContext + convert func(event.EventContextConverter) event.EventContext + attrMap map[string]Attribute + attrs []Attribute +} + +func (v *version) Attribute(name string) Attribute { return v.attrMap[strings.ToLower(name)] } +func (v *version) Attributes() []Attribute { return v.attrs } +func (v *version) String() string { return v.context.GetSpecVersion() } +func (v *version) Prefix() string { return v.prefix } +func (v *version) NewContext() event.EventContext { return v.context.Clone() } + +// HasPrefix is a case-insensitive prefix check. +func (v *version) HasPrefix(name string) bool { + return strings.HasPrefix(strings.ToLower(name), v.prefix) +} + +func (v *version) Convert(c event.EventContextConverter) event.EventContext { return v.convert(c) } + +func (v *version) SetAttribute(c event.EventContextWriter, name string, value interface{}) error { + if a := v.Attribute(name); a != nil { // Standard attribute + return a.Set(c, value) + } + name = strings.ToLower(name) + var err error + if v.HasPrefix(name) { // Extension attribute + return c.SetExtension(strings.TrimPrefix(name, v.prefix), value) + } + return err +} + +func (v *version) AttributeFromKind(kind Kind) Attribute { + for _, a := range v.Attributes() { + if a.Kind() == kind { + return a + } + } + return nil +} + +func newVersion( + prefix string, + context event.EventContext, + convert func(event.EventContextConverter) event.EventContext, + attrs ...*attribute, +) *version { + v := &version{ + prefix: strings.ToLower(prefix), + context: context, + convert: convert, + attrMap: map[string]Attribute{}, + attrs: make([]Attribute, len(attrs)), + } + for i, a := range attrs { + a.version = v + v.attrs[i] = a + v.attrMap[strings.ToLower(a.PrefixedName())] = a + } + return v +} + +// WithPrefix returns a set of versions with prefix added to all attribute names. +func WithPrefix(prefix string) *Versions { + attr := func(name string, kind Kind) *attribute { + return &attribute{accessor: acc[kind], name: name} + } + vs := &Versions{ + m: map[string]Version{}, + prefix: prefix, + all: []Version{ + newVersion(prefix, event.EventContextV1{}.AsV1(), + func(c event.EventContextConverter) event.EventContext { return c.AsV1() }, + attr("id", ID), + attr("source", Source), + attr("specversion", SpecVersion), + attr("type", Type), + attr("datacontenttype", DataContentType), + attr("dataschema", DataSchema), + attr("subject", Subject), + attr("time", Time), + ), + newVersion(prefix, event.EventContextV03{}.AsV03(), + func(c event.EventContextConverter) event.EventContext { return c.AsV03() }, + attr("specversion", SpecVersion), + attr("type", Type), + attr("source", Source), + attr("schemaurl", DataSchema), + attr("subject", Subject), + attr("id", ID), + attr("time", Time), + attr("datacontenttype", DataContentType), + ), + }, + } + for _, v := range vs.all { + vs.m[v.String()] = v + } + return vs +} + +// New returns a set of versions +func New() *Versions { return WithPrefix("") } + +// Built-in un-prefixed versions. +var ( + VS *Versions + V03 Version + V1 Version +) + +func init() { + VS = New() + V03 = VS.Version("0.3") + V1 = VS.Version("1.0") +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go new file mode 100644 index 000000000..60256f2b3 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go @@ -0,0 +1,22 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "context" + "io" + + "github.com/cloudevents/sdk-go/v2/binding/format" +) + +// StructuredWriter is used to visit a structured Message and generate a new representation. +// +// Protocols that supports structured encoding should implement this interface to implement direct +// structured to structured encoding and event to structured encoding. +type StructuredWriter interface { + // Event receives an io.Reader for the whole event. + SetStructuredEvent(ctx context.Context, format format.Format, event io.Reader) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go new file mode 100644 index 000000000..d3332c158 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go @@ -0,0 +1,153 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/types" +) + +// ErrCannotConvertToEvent is a generic error when a conversion of a Message to an Event fails +var ErrCannotConvertToEvent = errors.New("cannot convert message to event") + +// ErrCannotConvertToEvents is a generic error when a conversion of a Message to a Batched Event fails +var ErrCannotConvertToEvents = errors.New("cannot convert message to batched events") + +// ToEvent translates a Message with a valid Structured or Binary representation to an Event. +// This function returns the Event generated from the Message and the original encoding of the message or +// an error that points the conversion error. +// transformers can be nil and this function guarantees that they are invoked only once during the encoding process. +func ToEvent(ctx context.Context, message MessageReader, transformers ...Transformer) (*event.Event, error) { + if message == nil { + return nil, nil + } + + messageEncoding := message.ReadEncoding() + if messageEncoding == EncodingEvent { + m := message + for m != nil { + switch mt := m.(type) { + case *EventMessage: + e := (*event.Event)(mt) + return e, Transformers(transformers).Transform(mt, (*messageToEventBuilder)(e)) + case MessageWrapper: + m = mt.GetWrappedMessage() + default: + break + } + } + return nil, ErrCannotConvertToEvent + } + + e := event.New() + encoder := (*messageToEventBuilder)(&e) + _, err := DirectWrite( + context.Background(), + message, + encoder, + encoder, + ) + if err != nil { + return nil, err + } + return &e, Transformers(transformers).Transform((*EventMessage)(&e), encoder) +} + +// ToEvents translates a Batch Message and corresponding Reader data to a slice of Events. +// This function returns the Events generated from the body data, or an error that points +// to the conversion issue. +func ToEvents(ctx context.Context, message MessageReader, body io.Reader) ([]event.Event, error) { + messageEncoding := message.ReadEncoding() + if messageEncoding != EncodingBatch { + return nil, ErrCannotConvertToEvents + } + + // Since Format doesn't support batch Marshalling, and we know it's structured batch json, we'll go direct to the + // json.UnMarshall(), since that is the best way to support batch operations for now. + var events []event.Event + return events, json.NewDecoder(body).Decode(&events) +} + +type messageToEventBuilder event.Event + +var _ StructuredWriter = (*messageToEventBuilder)(nil) +var _ BinaryWriter = (*messageToEventBuilder)(nil) + +func (b *messageToEventBuilder) SetStructuredEvent(ctx context.Context, format format.Format, ev io.Reader) error { + var buf bytes.Buffer + _, err := io.Copy(&buf, ev) + if err != nil { + return err + } + return format.Unmarshal(buf.Bytes(), (*event.Event)(b)) +} + +func (b *messageToEventBuilder) Start(ctx context.Context) error { + return nil +} + +func (b *messageToEventBuilder) End(ctx context.Context) error { + return nil +} + +func (b *messageToEventBuilder) SetData(data io.Reader) error { + buf, ok := data.(*bytes.Buffer) + if !ok { + buf = new(bytes.Buffer) + _, err := io.Copy(buf, data) + if err != nil { + return err + } + } + if buf.Len() > 0 { + b.DataEncoded = buf.Bytes() + } + return nil +} + +func (b *messageToEventBuilder) SetAttribute(attribute spec.Attribute, value interface{}) error { + if value == nil { + _ = attribute.Delete(b.Context) + return nil + } + // If spec version we need to change to right context struct + if attribute.Kind() == spec.SpecVersion { + str, err := types.ToString(value) + if err != nil { + return err + } + switch str { + case event.CloudEventsVersionV03: + b.Context = b.Context.AsV03() + case event.CloudEventsVersionV1: + b.Context = b.Context.AsV1() + default: + return fmt.Errorf("unrecognized event version %s", str) + } + return nil + } + return attribute.Set(b.Context, value) +} + +func (b *messageToEventBuilder) SetExtension(name string, value interface{}) error { + if value == nil { + return b.Context.SetExtension(name, nil) + } + value, err := types.Validate(value) + if err != nil { + return err + } + return b.Context.SetExtension(name, value) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go new file mode 100644 index 000000000..de3bec44f --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go @@ -0,0 +1,42 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +// Transformer is an interface that implements a transformation +// process while transferring the event from the Message +// implementation to the provided encoder +// +// When a write function (binding.Write, binding.ToEvent, buffering.CopyMessage, etc.) +// takes Transformer(s) as parameter, it eventually converts the message to a form +// which correctly implements MessageMetadataReader, in order to guarantee that transformation +// is applied +type Transformer interface { + Transform(MessageMetadataReader, MessageMetadataWriter) error +} + +// TransformerFunc is a type alias to implement a Transformer through a function pointer +type TransformerFunc func(MessageMetadataReader, MessageMetadataWriter) error + +func (t TransformerFunc) Transform(r MessageMetadataReader, w MessageMetadataWriter) error { + return t(r, w) +} + +var _ Transformer = (TransformerFunc)(nil) + +// Transformers is a utility alias to run several Transformer +type Transformers []Transformer + +func (t Transformers) Transform(r MessageMetadataReader, w MessageMetadataWriter) error { + for _, transformer := range t { + err := transformer.Transform(r, w) + if err != nil { + return err + } + } + return nil +} + +var _ Transformer = (Transformers)(nil) diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go new file mode 100644 index 000000000..cb498e62d --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go @@ -0,0 +1,179 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/event" +) + +type eventEncodingKey int + +const ( + skipDirectStructuredEncoding eventEncodingKey = iota + skipDirectBinaryEncoding + preferredEventEncoding +) + +// DirectWrite invokes the encoders. structuredWriter and binaryWriter could be nil if the protocol doesn't support it. +// transformers can be nil and this function guarantees that they are invoked only once during the encoding process. +// This function MUST be invoked only if message.ReadEncoding() == EncodingBinary or message.ReadEncoding() == EncodingStructured +// +// Returns: +// * EncodingStructured, nil if message is correctly encoded in structured encoding +// * EncodingBinary, nil if message is correctly encoded in binary encoding +// * EncodingStructured, err if message was structured but error happened during the encoding +// * EncodingBinary, err if message was binary but error happened during the encoding +// * EncodingUnknown, ErrUnknownEncoding if message is not a structured or a binary Message +func DirectWrite( + ctx context.Context, + message MessageReader, + structuredWriter StructuredWriter, + binaryWriter BinaryWriter, + transformers ...Transformer, +) (Encoding, error) { + if structuredWriter != nil && len(transformers) == 0 && !GetOrDefaultFromCtx(ctx, skipDirectStructuredEncoding, false).(bool) { + if err := message.ReadStructured(ctx, structuredWriter); err == nil { + return EncodingStructured, nil + } else if err != ErrNotStructured { + return EncodingStructured, err + } + } + + if binaryWriter != nil && !GetOrDefaultFromCtx(ctx, skipDirectBinaryEncoding, false).(bool) && message.ReadEncoding() == EncodingBinary { + return EncodingBinary, writeBinaryWithTransformer(ctx, message, binaryWriter, transformers) + } + + return EncodingUnknown, ErrUnknownEncoding +} + +// Write executes the full algorithm to encode a Message using transformers: +// 1. It first tries direct encoding using DirectWrite +// 2. If no direct encoding is possible, it uses ToEvent to generate an Event representation +// 3. From the Event, the message is encoded back to the provided structured or binary encoders +// You can tweak the encoding process using the context decorators WithForceStructured, WithForceStructured, etc. +// transformers can be nil and this function guarantees that they are invoked only once during the encoding process. +// Returns: +// * EncodingStructured, nil if message is correctly encoded in structured encoding +// * EncodingBinary, nil if message is correctly encoded in binary encoding +// * EncodingUnknown, ErrUnknownEncoding if message.ReadEncoding() == EncodingUnknown +// * _, err if error happened during the encoding +func Write( + ctx context.Context, + message MessageReader, + structuredWriter StructuredWriter, + binaryWriter BinaryWriter, + transformers ...Transformer, +) (Encoding, error) { + enc := message.ReadEncoding() + var err error + // Skip direct encoding if the event is an event message + if enc != EncodingEvent { + enc, err = DirectWrite(ctx, message, structuredWriter, binaryWriter, transformers...) + if enc != EncodingUnknown { + // Message directly encoded, nothing else to do here + return enc, err + } + } + + var e *event.Event + e, err = ToEvent(ctx, message, transformers...) + if err != nil { + return enc, err + } + + message = (*EventMessage)(e) + + if GetOrDefaultFromCtx(ctx, preferredEventEncoding, EncodingBinary).(Encoding) == EncodingStructured { + if structuredWriter != nil { + return EncodingStructured, message.ReadStructured(ctx, structuredWriter) + } + if binaryWriter != nil { + return EncodingBinary, writeBinary(ctx, message, binaryWriter) + } + } else { + if binaryWriter != nil { + return EncodingBinary, writeBinary(ctx, message, binaryWriter) + } + if structuredWriter != nil { + return EncodingStructured, message.ReadStructured(ctx, structuredWriter) + } + } + + return EncodingUnknown, ErrUnknownEncoding +} + +// WithSkipDirectStructuredEncoding skips direct structured to structured encoding during the encoding process +func WithSkipDirectStructuredEncoding(ctx context.Context, skip bool) context.Context { + return context.WithValue(ctx, skipDirectStructuredEncoding, skip) +} + +// WithSkipDirectBinaryEncoding skips direct binary to binary encoding during the encoding process +func WithSkipDirectBinaryEncoding(ctx context.Context, skip bool) context.Context { + return context.WithValue(ctx, skipDirectBinaryEncoding, skip) +} + +// WithPreferredEventEncoding defines the preferred encoding from event to message during the encoding process +func WithPreferredEventEncoding(ctx context.Context, enc Encoding) context.Context { + return context.WithValue(ctx, preferredEventEncoding, enc) +} + +// WithForceStructured forces structured encoding during the encoding process +func WithForceStructured(ctx context.Context) context.Context { + return context.WithValue(context.WithValue(ctx, preferredEventEncoding, EncodingStructured), skipDirectBinaryEncoding, true) +} + +// WithForceBinary forces binary encoding during the encoding process +func WithForceBinary(ctx context.Context) context.Context { + return context.WithValue(context.WithValue(ctx, preferredEventEncoding, EncodingBinary), skipDirectStructuredEncoding, true) +} + +// GetOrDefaultFromCtx gets a configuration value from the provided context +func GetOrDefaultFromCtx(ctx context.Context, key interface{}, def interface{}) interface{} { + if val := ctx.Value(key); val != nil { + return val + } else { + return def + } +} + +func writeBinaryWithTransformer( + ctx context.Context, + message MessageReader, + binaryWriter BinaryWriter, + transformers Transformers, +) error { + err := binaryWriter.Start(ctx) + if err != nil { + return err + } + err = message.ReadBinary(ctx, binaryWriter) + if err != nil { + return err + } + err = transformers.Transform(message.(MessageMetadataReader), binaryWriter) + if err != nil { + return err + } + return binaryWriter.End(ctx) +} + +func writeBinary( + ctx context.Context, + message MessageReader, + binaryWriter BinaryWriter, +) error { + err := binaryWriter.Start(ctx) + if err != nil { + return err + } + err = message.ReadBinary(ctx, binaryWriter) + if err != nil { + return err + } + return binaryWriter.End(ctx) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client.go new file mode 100644 index 000000000..ea8fbfbb4 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client.go @@ -0,0 +1,288 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "errors" + "fmt" + "io" + "runtime" + "sync" + + "go.uber.org/zap" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// Client interface defines the runtime contract the CloudEvents client supports. +type Client interface { + // Send will transmit the given event over the client's configured transport. + Send(ctx context.Context, event event.Event) protocol.Result + + // Request will transmit the given event over the client's configured + // transport and return any response event. + Request(ctx context.Context, event event.Event) (*event.Event, protocol.Result) + + // StartReceiver will register the provided function for callback on receipt + // of a cloudevent. It will also start the underlying protocol as it has + // been configured. + // This call is blocking. + // Valid fn signatures are: + // * func() + // * func() error + // * func(context.Context) + // * func(context.Context) protocol.Result + // * func(event.Event) + // * func(event.Event) protocol.Result + // * func(context.Context, event.Event) + // * func(context.Context, event.Event) protocol.Result + // * func(event.Event) *event.Event + // * func(event.Event) (*event.Event, protocol.Result) + // * func(context.Context, event.Event) *event.Event + // * func(context.Context, event.Event) (*event.Event, protocol.Result) + StartReceiver(ctx context.Context, fn interface{}) error +} + +// New produces a new client with the provided transport object and applied +// client options. +func New(obj interface{}, opts ...Option) (Client, error) { + c := &ceClient{ + // Running runtime.GOMAXPROCS(0) doesn't update the value, just returns the current one + pollGoroutines: runtime.GOMAXPROCS(0), + observabilityService: noopObservabilityService{}, + } + + if p, ok := obj.(protocol.Sender); ok { + c.sender = p + } + if p, ok := obj.(protocol.Requester); ok { + c.requester = p + } + if p, ok := obj.(protocol.Responder); ok { + c.responder = p + } + if p, ok := obj.(protocol.Receiver); ok { + c.receiver = p + } + if p, ok := obj.(protocol.Opener); ok { + c.opener = p + } + + if err := c.applyOptions(opts...); err != nil { + return nil, err + } + return c, nil +} + +type ceClient struct { + sender protocol.Sender + requester protocol.Requester + receiver protocol.Receiver + responder protocol.Responder + // Optional. + opener protocol.Opener + + observabilityService ObservabilityService + + inboundContextDecorators []func(context.Context, binding.Message) context.Context + outboundContextDecorators []func(context.Context) context.Context + invoker Invoker + receiverMu sync.Mutex + eventDefaulterFns []EventDefaulter + pollGoroutines int + blockingCallback bool +} + +func (c *ceClient) applyOptions(opts ...Option) error { + for _, fn := range opts { + if err := fn(c); err != nil { + return err + } + } + return nil +} + +func (c *ceClient) Send(ctx context.Context, e event.Event) protocol.Result { + var err error + if c.sender == nil { + err = errors.New("sender not set") + return err + } + + for _, f := range c.outboundContextDecorators { + ctx = f(ctx) + } + + if len(c.eventDefaulterFns) > 0 { + for _, fn := range c.eventDefaulterFns { + e = fn(ctx, e) + } + } + if err = e.Validate(); err != nil { + return err + } + + // Event has been defaulted and validated, record we are going to perform send. + ctx, cb := c.observabilityService.RecordSendingEvent(ctx, e) + err = c.sender.Send(ctx, (*binding.EventMessage)(&e)) + defer cb(err) + return err +} + +func (c *ceClient) Request(ctx context.Context, e event.Event) (*event.Event, protocol.Result) { + var resp *event.Event + var err error + + if c.requester == nil { + err = errors.New("requester not set") + return nil, err + } + for _, f := range c.outboundContextDecorators { + ctx = f(ctx) + } + + if len(c.eventDefaulterFns) > 0 { + for _, fn := range c.eventDefaulterFns { + e = fn(ctx, e) + } + } + + if err = e.Validate(); err != nil { + return nil, err + } + + // Event has been defaulted and validated, record we are going to perform request. + ctx, cb := c.observabilityService.RecordRequestEvent(ctx, e) + + // If provided a requester, use it to do request/response. + var msg binding.Message + msg, err = c.requester.Request(ctx, (*binding.EventMessage)(&e)) + if msg != nil { + defer func() { + if err := msg.Finish(err); err != nil { + cecontext.LoggerFrom(ctx).Warnw("failed calling message.Finish", zap.Error(err)) + } + }() + } + if protocol.IsUndelivered(err) { + return nil, err + } + + // try to turn msg into an event, it might not work and that is ok. + if rs, rserr := binding.ToEvent(ctx, msg); rserr != nil { + cecontext.LoggerFrom(ctx).Debugw("response: failed calling ToEvent", zap.Error(rserr), zap.Any("resp", msg)) + // If the protocol returns no error, it is an ACK on the request, but we had + // issues turning the response into an event, so make an ACK Result and pass + // down the ToEvent error as well. + err = protocol.NewReceipt(true, "failed to convert response into event: %v\n%w", rserr, err) + } else { + resp = rs + } + defer cb(err, resp) + return resp, err +} + +// StartReceiver sets up the given fn to handle Receive. +// See Client.StartReceiver for details. This is a blocking call. +func (c *ceClient) StartReceiver(ctx context.Context, fn interface{}) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + c.receiverMu.Lock() + defer c.receiverMu.Unlock() + + if c.invoker != nil { + return fmt.Errorf("client already has a receiver") + } + + invoker, err := newReceiveInvoker(fn, c.observabilityService, c.inboundContextDecorators, c.eventDefaulterFns...) + if err != nil { + return err + } + if invoker.IsReceiver() && c.receiver == nil { + return fmt.Errorf("mismatched receiver callback without protocol.Receiver supported by protocol") + } + if invoker.IsResponder() && c.responder == nil { + return fmt.Errorf("mismatched receiver callback without protocol.Responder supported by protocol") + } + c.invoker = invoker + + if c.responder == nil && c.receiver == nil { + return errors.New("responder nor receiver set") + } + + defer func() { + c.invoker = nil + }() + + // Start Polling. + wg := sync.WaitGroup{} + for i := 0; i < c.pollGoroutines; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + var msg binding.Message + var respFn protocol.ResponseFn + var err error + + if c.responder != nil { + msg, respFn, err = c.responder.Respond(ctx) + } else if c.receiver != nil { + msg, err = c.receiver.Receive(ctx) + respFn = noRespFn + } + + if err == io.EOF { // Normal close + return + } + + if err != nil { + cecontext.LoggerFrom(ctx).Warn("Error while receiving a message: ", err) + continue + } + + callback := func() { + if err := c.invoker.Invoke(ctx, msg, respFn); err != nil { + cecontext.LoggerFrom(ctx).Warn("Error while handling a message: ", err) + } + } + + if c.blockingCallback { + callback() + } else { + // Do not block on the invoker. + wg.Add(1) + go func() { + defer wg.Done() + callback() + }() + } + } + }() + } + + // Start the opener, if set. + if c.opener != nil { + if err = c.opener.OpenInbound(ctx); err != nil { + err = fmt.Errorf("error while opening the inbound connection: %w", err) + cancel() + } + } + + wg.Wait() + + return err +} + +// noRespFn is used to simply forward the protocol.Result for receivers that aren't responders +func noRespFn(_ context.Context, _ binding.Message, r protocol.Result, _ ...binding.Transformer) error { + return r +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go new file mode 100644 index 000000000..d48cc2042 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go @@ -0,0 +1,35 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "github.com/cloudevents/sdk-go/v2/protocol/http" +) + +// NewHTTP provides the good defaults for the common case using an HTTP +// Protocol client. +// The WithTimeNow, and WithUUIDs client options are also applied to the +// client, all outbound events will have a time and id set if not already +// present. +func NewHTTP(opts ...http.Option) (Client, error) { + p, err := http.New(opts...) + if err != nil { + return nil, err + } + + c, err := New(p, WithTimeNow(), WithUUIDs()) + if err != nil { + return nil, err + } + + return c, nil +} + +// NewDefault has been replaced by NewHTTP +// Deprecated. To get the same as NewDefault provided, please use NewHTTP with +// the observability service passed as an option, or client.NewClientHTTP from +// package github.com/cloudevents/sdk-go/observability/opencensus/v2/client +var NewDefault = NewHTTP diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go new file mode 100644 index 000000000..82985b8a7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go @@ -0,0 +1,12 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +// NewObserved produces a new client with the provided transport object and applied +// client options. +// Deprecated: This now has the same behaviour of New, and will be removed in future releases. +// As New, you must provide the observability service to use. +var NewObserved = New diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go b/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go new file mode 100644 index 000000000..7bfebf35c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go @@ -0,0 +1,57 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "time" + + "github.com/cloudevents/sdk-go/v2/event" + + "github.com/google/uuid" +) + +// EventDefaulter is the function signature for extensions that are able +// to perform event defaulting. +type EventDefaulter func(ctx context.Context, event event.Event) event.Event + +// DefaultIDToUUIDIfNotSet will inspect the provided event and assign a UUID to +// context.ID if it is found to be empty. +func DefaultIDToUUIDIfNotSet(ctx context.Context, event event.Event) event.Event { + if event.Context != nil { + if event.ID() == "" { + event.Context = event.Context.Clone() + event.SetID(uuid.New().String()) + } + } + return event +} + +// DefaultTimeToNowIfNotSet will inspect the provided event and assign a new +// Timestamp to context.Time if it is found to be nil or zero. +func DefaultTimeToNowIfNotSet(ctx context.Context, event event.Event) event.Event { + if event.Context != nil { + if event.Time().IsZero() { + event.Context = event.Context.Clone() + event.SetTime(time.Now()) + } + } + return event +} + +// NewDefaultDataContentTypeIfNotSet returns a defaulter that will inspect the +// provided event and set the provided content type if content type is found +// to be empty. +func NewDefaultDataContentTypeIfNotSet(contentType string) EventDefaulter { + return func(ctx context.Context, event event.Event) event.Event { + if event.Context != nil { + if event.DataContentType() == "" { + event.SetDataContentType(contentType) + } + } + return event + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go new file mode 100644 index 000000000..e09962ce6 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go @@ -0,0 +1,11 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package client holds the recommended entry points for interacting with the CloudEvents Golang SDK. The client wraps +a selected transport. The client adds validation and defaulting for sending events, and flexible receiver method +registration. For full details, read the `client.Client` documentation. +*/ +package client diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go b/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go new file mode 100644 index 000000000..94a4b4e65 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go @@ -0,0 +1,45 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + cecontext "github.com/cloudevents/sdk-go/v2/context" + thttp "github.com/cloudevents/sdk-go/v2/protocol/http" + "go.uber.org/zap" + "net/http" +) + +func NewHTTPReceiveHandler(ctx context.Context, p *thttp.Protocol, fn interface{}) (*EventReceiver, error) { + invoker, err := newReceiveInvoker(fn, noopObservabilityService{}, nil) //TODO(slinkydeveloper) maybe not nil? + if err != nil { + return nil, err + } + + return &EventReceiver{ + p: p, + invoker: invoker, + }, nil +} + +type EventReceiver struct { + p *thttp.Protocol + invoker Invoker +} + +func (r *EventReceiver) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + // Prepare to handle the message if there's one (context cancellation will ensure this closes) + go func() { + ctx := req.Context() + msg, respFn, err := r.p.Respond(ctx) + if err != nil { + cecontext.LoggerFrom(context.TODO()).Debugw("failed to call Respond", zap.Error(err)) + } else if err := r.invoker.Invoke(ctx, msg, respFn); err != nil { + cecontext.LoggerFrom(context.TODO()).Debugw("failed to call Invoke", zap.Error(err)) + } + }() + r.p.ServeHTTP(rw, req) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go b/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go new file mode 100644 index 000000000..403fb0f55 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go @@ -0,0 +1,137 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "fmt" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +type Invoker interface { + Invoke(context.Context, binding.Message, protocol.ResponseFn) error + IsReceiver() bool + IsResponder() bool +} + +var _ Invoker = (*receiveInvoker)(nil) + +func newReceiveInvoker(fn interface{}, observabilityService ObservabilityService, inboundContextDecorators []func(context.Context, binding.Message) context.Context, fns ...EventDefaulter) (Invoker, error) { + r := &receiveInvoker{ + eventDefaulterFns: fns, + observabilityService: observabilityService, + inboundContextDecorators: inboundContextDecorators, + } + + if fn, err := receiver(fn); err != nil { + return nil, err + } else { + r.fn = fn + } + + return r, nil +} + +type receiveInvoker struct { + fn *receiverFn + observabilityService ObservabilityService + eventDefaulterFns []EventDefaulter + inboundContextDecorators []func(context.Context, binding.Message) context.Context +} + +func (r *receiveInvoker) Invoke(ctx context.Context, m binding.Message, respFn protocol.ResponseFn) (err error) { + defer func() { + err = m.Finish(err) + }() + + var respMsg binding.Message + var result protocol.Result + + e, eventErr := binding.ToEvent(ctx, m) + switch { + case eventErr != nil && r.fn.hasEventIn: + r.observabilityService.RecordReceivedMalformedEvent(ctx, eventErr) + return respFn(ctx, nil, protocol.NewReceipt(false, "failed to convert Message to Event: %w", eventErr)) + case r.fn != nil: + // Check if event is valid before invoking the receiver function + if e != nil { + if validationErr := e.Validate(); validationErr != nil { + r.observabilityService.RecordReceivedMalformedEvent(ctx, validationErr) + return respFn(ctx, nil, protocol.NewReceipt(false, "validation error in incoming event: %w", validationErr)) + } + } + + // Let's invoke the receiver fn + var resp *event.Event + resp, result = func() (resp *event.Event, result protocol.Result) { + defer func() { + if r := recover(); r != nil { + result = fmt.Errorf("call to Invoker.Invoke(...) has panicked: %v", r) + cecontext.LoggerFrom(ctx).Error(result) + } + }() + ctx = computeInboundContext(m, ctx, r.inboundContextDecorators) + + var cb func(error) + ctx, cb = r.observabilityService.RecordCallingInvoker(ctx, e) + + resp, result = r.fn.invoke(ctx, e) + defer cb(result) + return + }() + + if respFn == nil { + break + } + + // Apply the defaulter chain to the outgoing event. + if resp != nil && len(r.eventDefaulterFns) > 0 { + for _, fn := range r.eventDefaulterFns { + *resp = fn(ctx, *resp) + } + // Validate the event conforms to the CloudEvents Spec. + if vErr := resp.Validate(); vErr != nil { + cecontext.LoggerFrom(ctx).Errorf("cloudevent validation failed on response event: %v", vErr) + } + } + + // because binding.Message is an interface, casting a nil resp + // here would make future comparisons to nil false + if resp != nil { + respMsg = (*binding.EventMessage)(resp) + } + } + + if respFn == nil { + // let the protocol ACK based on the result + return result + } + + return respFn(ctx, respMsg, result) +} + +func (r *receiveInvoker) IsReceiver() bool { + return !r.fn.hasEventOut +} + +func (r *receiveInvoker) IsResponder() bool { + return r.fn.hasEventOut +} + +func computeInboundContext(message binding.Message, fallback context.Context, inboundContextDecorators []func(context.Context, binding.Message) context.Context) context.Context { + result := fallback + if mctx, ok := message.(binding.MessageContext); ok { + result = cecontext.ValuesDelegating(mctx.Context(), fallback) + } + for _, f := range inboundContextDecorators { + result = f(result, message) + } + return result +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go b/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go new file mode 100644 index 000000000..75005d3bb --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go @@ -0,0 +1,54 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/event" +) + +// ObservabilityService is an interface users can implement to record metrics, create tracing spans, and plug other observability tools in the Client +type ObservabilityService interface { + // InboundContextDecorators is a method that returns the InboundContextDecorators that must be mounted in the Client to properly propagate some tracing informations. + InboundContextDecorators() []func(context.Context, binding.Message) context.Context + + // RecordReceivedMalformedEvent is invoked when an event was received but it's malformed or invalid. + RecordReceivedMalformedEvent(ctx context.Context, err error) + // RecordCallingInvoker is invoked before the user function is invoked. + // The returned callback will be invoked after the user finishes to process the event with the eventual processing error + // The error provided to the callback could be both a processing error, or a result + RecordCallingInvoker(ctx context.Context, event *event.Event) (context.Context, func(errOrResult error)) + // RecordSendingEvent is invoked before the event is sent. + // The returned callback will be invoked when the response is received + // The error provided to the callback could be both a processing error, or a result + RecordSendingEvent(ctx context.Context, event event.Event) (context.Context, func(errOrResult error)) + + // RecordRequestEvent is invoked before the event is requested. + // The returned callback will be invoked when the response is received + RecordRequestEvent(ctx context.Context, event event.Event) (context.Context, func(errOrResult error, event *event.Event)) +} + +type noopObservabilityService struct{} + +func (n noopObservabilityService) InboundContextDecorators() []func(context.Context, binding.Message) context.Context { + return nil +} + +func (n noopObservabilityService) RecordReceivedMalformedEvent(ctx context.Context, err error) {} + +func (n noopObservabilityService) RecordCallingInvoker(ctx context.Context, event *event.Event) (context.Context, func(errOrResult error)) { + return ctx, func(errOrResult error) {} +} + +func (n noopObservabilityService) RecordSendingEvent(ctx context.Context, event event.Event) (context.Context, func(errOrResult error)) { + return ctx, func(errOrResult error) {} +} + +func (n noopObservabilityService) RecordRequestEvent(ctx context.Context, e event.Event) (context.Context, func(errOrResult error, event *event.Event)) { + return ctx, func(errOrResult error, event *event.Event) {} +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/options.go b/vendor/github.com/cloudevents/sdk-go/v2/client/options.go new file mode 100644 index 000000000..938478162 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/options.go @@ -0,0 +1,128 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "fmt" + + "github.com/cloudevents/sdk-go/v2/binding" +) + +// Option is the function signature required to be considered an client.Option. +type Option func(interface{}) error + +// WithEventDefaulter adds an event defaulter to the end of the defaulter chain. +func WithEventDefaulter(fn EventDefaulter) Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + if fn == nil { + return fmt.Errorf("client option was given an nil event defaulter") + } + c.eventDefaulterFns = append(c.eventDefaulterFns, fn) + } + return nil + } +} + +func WithForceBinary() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.outboundContextDecorators = append(c.outboundContextDecorators, binding.WithForceBinary) + } + return nil + } +} + +func WithForceStructured() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.outboundContextDecorators = append(c.outboundContextDecorators, binding.WithForceStructured) + } + return nil + } +} + +// WithUUIDs adds DefaultIDToUUIDIfNotSet event defaulter to the end of the +// defaulter chain. +func WithUUIDs() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.eventDefaulterFns = append(c.eventDefaulterFns, DefaultIDToUUIDIfNotSet) + } + return nil + } +} + +// WithTimeNow adds DefaultTimeToNowIfNotSet event defaulter to the end of the +// defaulter chain. +func WithTimeNow() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.eventDefaulterFns = append(c.eventDefaulterFns, DefaultTimeToNowIfNotSet) + } + return nil + } +} + +// WithTracePropagation enables trace propagation via the distributed tracing +// extension. +// Deprecated: this is now noop and will be removed in future releases. +// Don't use distributed tracing extension to propagate traces: +// https://github.com/cloudevents/spec/blob/v1.0.1/extensions/distributed-tracing.md#using-the-distributed-tracing-extension +func WithTracePropagation() Option { + return func(i interface{}) error { + return nil + } +} + +// WithPollGoroutines configures how much goroutines should be used to +// poll the Receiver/Responder/Protocol implementations. +// Default value is GOMAXPROCS +func WithPollGoroutines(pollGoroutines int) Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.pollGoroutines = pollGoroutines + } + return nil + } +} + +// WithObservabilityService configures the observability service to use +// to record traces and metrics +func WithObservabilityService(service ObservabilityService) Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.observabilityService = service + c.inboundContextDecorators = append(c.inboundContextDecorators, service.InboundContextDecorators()...) + } + return nil + } +} + +// WithInboundContextDecorator configures a new inbound context decorator. +// Inbound context decorators are invoked to wrap additional informations from the binding.Message +// and propagate these informations in the context passed to the event receiver. +func WithInboundContextDecorator(dec func(context.Context, binding.Message) context.Context) Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.inboundContextDecorators = append(c.inboundContextDecorators, dec) + } + return nil + } +} + +// WithBlockingCallback makes the callback passed into StartReceiver is executed as a blocking call, +// i.e. in each poll go routine, the next event will not be received until the callback on current event completes. +// To make event processing serialized (no concurrency), use this option along with WithPollGoroutines(1) +func WithBlockingCallback() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.blockingCallback = true + } + return nil + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go b/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go new file mode 100644 index 000000000..b1ab532d7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go @@ -0,0 +1,194 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "errors" + "fmt" + "reflect" + + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// ReceiveFull is the signature of a fn to be invoked for incoming cloudevents. +type ReceiveFull func(context.Context, event.Event) protocol.Result + +type receiverFn struct { + numIn int + numOut int + fnValue reflect.Value + + hasContextIn bool + hasEventIn bool + + hasEventOut bool + hasResultOut bool +} + +const ( + inParamUsage = "expected a function taking either no parameters, one or more of (context.Context, event.Event) ordered" + outParamUsage = "expected a function returning one or mode of (*event.Event, protocol.Result) ordered" +) + +var ( + contextType = reflect.TypeOf((*context.Context)(nil)).Elem() + eventType = reflect.TypeOf((*event.Event)(nil)).Elem() + eventPtrType = reflect.TypeOf((*event.Event)(nil)) // want the ptr type + resultType = reflect.TypeOf((*protocol.Result)(nil)).Elem() +) + +// receiver creates a receiverFn wrapper class that is used by the client to +// validate and invoke the provided function. +// Valid fn signatures are: +// * func() +// * func() protocol.Result +// * func(context.Context) +// * func(context.Context) protocol.Result +// * func(event.Event) +// * func(event.Event) transport.Result +// * func(context.Context, event.Event) +// * func(context.Context, event.Event) protocol.Result +// * func(event.Event) *event.Event +// * func(event.Event) (*event.Event, protocol.Result) +// * func(context.Context, event.Event) *event.Event +// * func(context.Context, event.Event) (*event.Event, protocol.Result) +// +func receiver(fn interface{}) (*receiverFn, error) { + fnType := reflect.TypeOf(fn) + if fnType.Kind() != reflect.Func { + return nil, errors.New("must pass a function to handle events") + } + + r := &receiverFn{ + fnValue: reflect.ValueOf(fn), + numIn: fnType.NumIn(), + numOut: fnType.NumOut(), + } + + if err := r.validate(fnType); err != nil { + return nil, err + } + + return r, nil +} + +func (r *receiverFn) invoke(ctx context.Context, e *event.Event) (*event.Event, protocol.Result) { + args := make([]reflect.Value, 0, r.numIn) + + if r.numIn > 0 { + if r.hasContextIn { + args = append(args, reflect.ValueOf(ctx)) + } + if r.hasEventIn { + args = append(args, reflect.ValueOf(*e)) + } + } + v := r.fnValue.Call(args) + var respOut protocol.Result + var eOut *event.Event + if r.numOut > 0 { + i := 0 + if r.hasEventOut { + if eo, ok := v[i].Interface().(*event.Event); ok { + eOut = eo + } + i++ // <-- note, need to inc i. + } + if r.hasResultOut { + if resp, ok := v[i].Interface().(protocol.Result); ok { + respOut = resp + } + } + } + return eOut, respOut +} + +// Verifies that the inputs to a function have a valid signature +// Valid input is to be [0, all] of +// context.Context, event.Event in this order. +func (r *receiverFn) validateInParamSignature(fnType reflect.Type) error { + r.hasContextIn = false + r.hasEventIn = false + + switch fnType.NumIn() { + case 2: + // has to be (context.Context, event.Event) + if !eventType.ConvertibleTo(fnType.In(1)) { + return fmt.Errorf("%s; cannot convert parameter 2 to %s from event.Event", inParamUsage, fnType.In(1)) + } else { + r.hasEventIn = true + } + fallthrough + case 1: + if !contextType.ConvertibleTo(fnType.In(0)) { + if !eventType.ConvertibleTo(fnType.In(0)) { + return fmt.Errorf("%s; cannot convert parameter 1 to %s from context.Context or event.Event", inParamUsage, fnType.In(0)) + } else if r.hasEventIn { + return fmt.Errorf("%s; duplicate parameter of type event.Event", inParamUsage) + } else { + r.hasEventIn = true + } + } else { + r.hasContextIn = true + } + fallthrough + case 0: + return nil + + default: + return fmt.Errorf("%s; function has too many parameters (%d)", inParamUsage, fnType.NumIn()) + } +} + +// Verifies that the outputs of a function have a valid signature +// Valid output signatures to be [0, all] of +// *event.Event, transport.Result in this order +func (r *receiverFn) validateOutParamSignature(fnType reflect.Type) error { + r.hasEventOut = false + r.hasResultOut = false + + switch fnType.NumOut() { + case 2: + // has to be (*event.Event, transport.Result) + if !fnType.Out(1).ConvertibleTo(resultType) { + return fmt.Errorf("%s; cannot convert parameter 2 from %s to event.Response", outParamUsage, fnType.Out(1)) + } else { + r.hasResultOut = true + } + fallthrough + case 1: + if !fnType.Out(0).ConvertibleTo(resultType) { + if !fnType.Out(0).ConvertibleTo(eventPtrType) { + return fmt.Errorf("%s; cannot convert parameter 1 from %s to *event.Event or transport.Result", outParamUsage, fnType.Out(0)) + } else { + r.hasEventOut = true + } + } else if r.hasResultOut { + return fmt.Errorf("%s; duplicate parameter of type event.Response", outParamUsage) + } else { + r.hasResultOut = true + } + fallthrough + case 0: + return nil + default: + return fmt.Errorf("%s; function has too many return types (%d)", outParamUsage, fnType.NumOut()) + } +} + +// validateReceiverFn validates that a function has the right number of in and +// out params and that they are of allowed types. +func (r *receiverFn) validate(fnType reflect.Type) error { + if err := r.validateInParamSignature(fnType); err != nil { + return err + } + if err := r.validateOutParamSignature(fnType); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/context.go b/vendor/github.com/cloudevents/sdk-go/v2/context/context.go new file mode 100644 index 000000000..fc9ef0315 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/context.go @@ -0,0 +1,110 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package context + +import ( + "context" + "net/url" + "time" +) + +// Opaque key type used to store target +type targetKeyType struct{} + +var targetKey = targetKeyType{} + +// WithTarget returns back a new context with the given target. Target is intended to be transport dependent. +// For http transport, `target` should be a full URL and will be injected into the outbound http request. +func WithTarget(ctx context.Context, target string) context.Context { + return context.WithValue(ctx, targetKey, target) +} + +// TargetFrom looks in the given context and returns `target` as a parsed url if found and valid, otherwise nil. +func TargetFrom(ctx context.Context) *url.URL { + c := ctx.Value(targetKey) + if c != nil { + if s, ok := c.(string); ok && s != "" { + if target, err := url.Parse(s); err == nil { + return target + } + } + } + return nil +} + +// Opaque key type used to store topic +type topicKeyType struct{} + +var topicKey = topicKeyType{} + +// WithTopic returns back a new context with the given topic. Topic is intended to be transport dependent. +// For pubsub transport, `topic` should be a Pub/Sub Topic ID. +func WithTopic(ctx context.Context, topic string) context.Context { + return context.WithValue(ctx, topicKey, topic) +} + +// TopicFrom looks in the given context and returns `topic` as a string if found and valid, otherwise "". +func TopicFrom(ctx context.Context) string { + c := ctx.Value(topicKey) + if c != nil { + if s, ok := c.(string); ok { + return s + } + } + return "" +} + +// Opaque key type used to store retry parameters +type retriesKeyType struct{} + +var retriesKey = retriesKeyType{} + +// WithRetriesConstantBackoff returns back a new context with retries parameters using constant backoff strategy. +// MaxTries is the maximum number for retries and delay is the time interval between retries +func WithRetriesConstantBackoff(ctx context.Context, delay time.Duration, maxTries int) context.Context { + return WithRetryParams(ctx, &RetryParams{ + Strategy: BackoffStrategyConstant, + Period: delay, + MaxTries: maxTries, + }) +} + +// WithRetriesLinearBackoff returns back a new context with retries parameters using linear backoff strategy. +// MaxTries is the maximum number for retries and delay*tries is the time interval between retries +func WithRetriesLinearBackoff(ctx context.Context, delay time.Duration, maxTries int) context.Context { + return WithRetryParams(ctx, &RetryParams{ + Strategy: BackoffStrategyLinear, + Period: delay, + MaxTries: maxTries, + }) +} + +// WithRetriesExponentialBackoff returns back a new context with retries parameters using exponential backoff strategy. +// MaxTries is the maximum number for retries and period is the amount of time to wait, used as `period * 2^retries`. +func WithRetriesExponentialBackoff(ctx context.Context, period time.Duration, maxTries int) context.Context { + return WithRetryParams(ctx, &RetryParams{ + Strategy: BackoffStrategyExponential, + Period: period, + MaxTries: maxTries, + }) +} + +// WithRetryParams returns back a new context with retries parameters. +func WithRetryParams(ctx context.Context, rp *RetryParams) context.Context { + return context.WithValue(ctx, retriesKey, rp) +} + +// RetriesFrom looks in the given context and returns the retries parameters if found. +// Otherwise returns the default retries configuration (ie. no retries). +func RetriesFrom(ctx context.Context) *RetryParams { + c := ctx.Value(retriesKey) + if c != nil { + if s, ok := c.(*RetryParams); ok { + return s + } + } + return &DefaultRetryParams +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go b/vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go new file mode 100644 index 000000000..434a4da7a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go @@ -0,0 +1,25 @@ +package context + +import "context" + +type valuesDelegating struct { + context.Context + parent context.Context +} + +// ValuesDelegating wraps a child and parent context. It will perform Value() +// lookups first on the child, and then fall back to the child. All other calls +// go solely to the child context. +func ValuesDelegating(child, parent context.Context) context.Context { + return &valuesDelegating{ + Context: child, + parent: parent, + } +} + +func (c *valuesDelegating) Value(key interface{}) interface{} { + if val := c.Context.Value(key); val != nil { + return val + } + return c.parent.Value(key) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go new file mode 100644 index 000000000..0b2dcaf70 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go @@ -0,0 +1,10 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package context holds the last resort overrides and fyi objects that can be passed to clients and transports added to +context.Context objects. +*/ +package context diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go b/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go new file mode 100644 index 000000000..b3087a79f --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go @@ -0,0 +1,48 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package context + +import ( + "context" + + "go.uber.org/zap" +) + +// Opaque key type used to store logger +type loggerKeyType struct{} + +var loggerKey = loggerKeyType{} + +// fallbackLogger is the logger is used when there is no logger attached to the context. +var fallbackLogger *zap.SugaredLogger + +func init() { + if logger, err := zap.NewProduction(); err != nil { + // We failed to create a fallback logger. + fallbackLogger = zap.NewNop().Sugar() + } else { + fallbackLogger = logger.Named("fallback").Sugar() + } +} + +// WithLogger returns a new context with the logger injected into the given context. +func WithLogger(ctx context.Context, logger *zap.SugaredLogger) context.Context { + if logger == nil { + return context.WithValue(ctx, loggerKey, fallbackLogger) + } + return context.WithValue(ctx, loggerKey, logger) +} + +// LoggerFrom returns the logger stored in context. +func LoggerFrom(ctx context.Context) *zap.SugaredLogger { + l := ctx.Value(loggerKey) + if l != nil { + if logger, ok := l.(*zap.SugaredLogger); ok { + return logger + } + } + return fallbackLogger +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/retry.go b/vendor/github.com/cloudevents/sdk-go/v2/context/retry.go new file mode 100644 index 000000000..ec17df72e --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/retry.go @@ -0,0 +1,76 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package context + +import ( + "context" + "errors" + "math" + "time" +) + +type BackoffStrategy string + +const ( + BackoffStrategyNone = "none" + BackoffStrategyConstant = "constant" + BackoffStrategyLinear = "linear" + BackoffStrategyExponential = "exponential" +) + +var DefaultRetryParams = RetryParams{Strategy: BackoffStrategyNone} + +// RetryParams holds parameters applied to retries +type RetryParams struct { + // Strategy is the backoff strategy to applies between retries + Strategy BackoffStrategy + + // MaxTries is the maximum number of times to retry request before giving up + MaxTries int + + // Period is + // - for none strategy: no delay + // - for constant strategy: the delay interval between retries + // - for linear strategy: interval between retries = Period * retries + // - for exponential strategy: interval between retries = Period * retries^2 + Period time.Duration +} + +// BackoffFor tries will return the time duration that should be used for this +// current try count. +// `tries` is assumed to be the number of times the caller has already retried. +func (r *RetryParams) BackoffFor(tries int) time.Duration { + switch r.Strategy { + case BackoffStrategyConstant: + return r.Period + case BackoffStrategyLinear: + return r.Period * time.Duration(tries) + case BackoffStrategyExponential: + exp := math.Exp2(float64(tries)) + return r.Period * time.Duration(exp) + case BackoffStrategyNone: + fallthrough // default + default: + return r.Period + } +} + +// Backoff is a blocking call to wait for the correct amount of time for the retry. +// `tries` is assumed to be the number of times the caller has already retried. +func (r *RetryParams) Backoff(ctx context.Context, tries int) error { + if tries > r.MaxTries { + return errors.New("too many retries") + } + ticker := time.NewTicker(r.BackoffFor(tries)) + select { + case <-ctx.Done(): + ticker.Stop() + return errors.New("context has been cancelled") + case <-ticker.C: + ticker.Stop() + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go b/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go new file mode 100644 index 000000000..a49522f82 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go @@ -0,0 +1,47 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +const ( + TextPlain = "text/plain" + TextJSON = "text/json" + ApplicationJSON = "application/json" + ApplicationXML = "application/xml" + ApplicationCloudEventsJSON = "application/cloudevents+json" + ApplicationCloudEventsBatchJSON = "application/cloudevents-batch+json" +) + +// StringOfApplicationJSON returns a string pointer to "application/json" +func StringOfApplicationJSON() *string { + a := ApplicationJSON + return &a +} + +// StringOfApplicationXML returns a string pointer to "application/xml" +func StringOfApplicationXML() *string { + a := ApplicationXML + return &a +} + +// StringOfTextPlain returns a string pointer to "text/plain" +func StringOfTextPlain() *string { + a := TextPlain + return &a +} + +// StringOfApplicationCloudEventsJSON returns a string pointer to +// "application/cloudevents+json" +func StringOfApplicationCloudEventsJSON() *string { + a := ApplicationCloudEventsJSON + return &a +} + +// StringOfApplicationCloudEventsBatchJSON returns a string pointer to +// "application/cloudevents-batch+json" +func StringOfApplicationCloudEventsBatchJSON() *string { + a := ApplicationCloudEventsBatchJSON + return &a +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go b/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go new file mode 100644 index 000000000..cf2152693 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go @@ -0,0 +1,16 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +const ( + Base64 = "base64" +) + +// StringOfBase64 returns a string pointer to "Base64" +func StringOfBase64() *string { + a := Base64 + return &a +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go new file mode 100644 index 000000000..3e077740b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go @@ -0,0 +1,78 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package datacodec + +import ( + "context" + "fmt" + + "github.com/cloudevents/sdk-go/v2/event/datacodec/json" + "github.com/cloudevents/sdk-go/v2/event/datacodec/text" + "github.com/cloudevents/sdk-go/v2/event/datacodec/xml" +) + +// Decoder is the expected function signature for decoding `in` to `out`. +// If Event sent the payload as base64, Decoder assumes that `in` is the +// decoded base64 byte array. +type Decoder func(ctx context.Context, in []byte, out interface{}) error + +// Encoder is the expected function signature for encoding `in` to bytes. +// Returns an error if the encoder has an issue encoding `in`. +type Encoder func(ctx context.Context, in interface{}) ([]byte, error) + +var decoder map[string]Decoder +var encoder map[string]Encoder + +func init() { + decoder = make(map[string]Decoder, 10) + encoder = make(map[string]Encoder, 10) + + AddDecoder("", json.Decode) + AddDecoder("application/json", json.Decode) + AddDecoder("text/json", json.Decode) + AddDecoder("application/xml", xml.Decode) + AddDecoder("text/xml", xml.Decode) + AddDecoder("text/plain", text.Decode) + + AddEncoder("", json.Encode) + AddEncoder("application/json", json.Encode) + AddEncoder("text/json", json.Encode) + AddEncoder("application/xml", xml.Encode) + AddEncoder("text/xml", xml.Encode) + AddEncoder("text/plain", text.Encode) +} + +// AddDecoder registers a decoder for a given content type. The codecs will use +// these to decode the data payload from a cloudevent.Event object. +func AddDecoder(contentType string, fn Decoder) { + decoder[contentType] = fn +} + +// AddEncoder registers an encoder for a given content type. The codecs will +// use these to encode the data payload for a cloudevent.Event object. +func AddEncoder(contentType string, fn Encoder) { + encoder[contentType] = fn +} + +// Decode looks up and invokes the decoder registered for the given content +// type. An error is returned if no decoder is registered for the given +// content type. +func Decode(ctx context.Context, contentType string, in []byte, out interface{}) error { + if fn, ok := decoder[contentType]; ok { + return fn(ctx, in, out) + } + return fmt.Errorf("[decode] unsupported content type: %q", contentType) +} + +// Encode looks up and invokes the encoder registered for the given content +// type. An error is returned if no encoder is registered for the given +// content type. +func Encode(ctx context.Context, contentType string, in interface{}) ([]byte, error) { + if fn, ok := encoder[contentType]; ok { + return fn(ctx, in) + } + return nil, fmt.Errorf("[encode] unsupported content type: %q", contentType) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go new file mode 100644 index 000000000..b681af887 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go @@ -0,0 +1,10 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package datacodec holds the data codec registry and adds known encoders and decoders supporting media types such as +`application/json` and `application/xml`. +*/ +package datacodec diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go new file mode 100644 index 000000000..734ade59f --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go @@ -0,0 +1,56 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package json + +import ( + "context" + "encoding/json" + "fmt" + "reflect" +) + +// Decode takes `in` as []byte. +// If Event sent the payload as base64, Decoder assumes that `in` is the +// decoded base64 byte array. +func Decode(ctx context.Context, in []byte, out interface{}) error { + if in == nil { + return nil + } + if out == nil { + return fmt.Errorf("out is nil") + } + + if err := json.Unmarshal(in, out); err != nil { + return fmt.Errorf("[json] found bytes \"%s\", but failed to unmarshal: %s", string(in), err.Error()) + } + return nil +} + +// Encode attempts to json.Marshal `in` into bytes. Encode will inspect `in` +// and returns `in` unmodified if it is detected that `in` is already a []byte; +// Or json.Marshal errors. +func Encode(ctx context.Context, in interface{}) ([]byte, error) { + if in == nil { + return nil, nil + } + + it := reflect.TypeOf(in) + switch it.Kind() { + case reflect.Slice: + if it.Elem().Kind() == reflect.Uint8 { + + if b, ok := in.([]byte); ok && len(b) > 0 { + // check to see if it is a pre-encoded byte string. + if b[0] == byte('"') || b[0] == byte('{') || b[0] == byte('[') { + return b, nil + } + } + + } + } + + return json.Marshal(in) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go new file mode 100644 index 000000000..33e1323c7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package json holds the encoder/decoder implementation for `application/json`. +*/ +package json diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go new file mode 100644 index 000000000..761a10113 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go @@ -0,0 +1,30 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package text + +import ( + "context" + "fmt" +) + +// Text codec converts []byte or string to string and vice-versa. + +func Decode(_ context.Context, in []byte, out interface{}) error { + p, _ := out.(*string) + if p == nil { + return fmt.Errorf("text.Decode out: want *string, got %T", out) + } + *p = string(in) + return nil +} + +func Encode(_ context.Context, in interface{}) ([]byte, error) { + s, ok := in.(string) + if !ok { + return nil, fmt.Errorf("text.Encode in: want string, got %T", in) + } + return []byte(s), nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go new file mode 100644 index 000000000..af10577aa --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package text holds the encoder/decoder implementation for `text/plain`. +*/ +package text diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go new file mode 100644 index 000000000..de68ec3dc --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go @@ -0,0 +1,40 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package xml + +import ( + "context" + "encoding/xml" + "fmt" +) + +// Decode takes `in` as []byte. +// If Event sent the payload as base64, Decoder assumes that `in` is the +// decoded base64 byte array. +func Decode(ctx context.Context, in []byte, out interface{}) error { + if in == nil { + return nil + } + + if err := xml.Unmarshal(in, out); err != nil { + return fmt.Errorf("[xml] found bytes, but failed to unmarshal: %s %s", err.Error(), string(in)) + } + return nil +} + +// Encode attempts to xml.Marshal `in` into bytes. Encode will inspect `in` +// and returns `in` unmodified if it is detected that `in` is already a []byte; +// Or xml.Marshal errors. +func Encode(ctx context.Context, in interface{}) ([]byte, error) { + if b, ok := in.([]byte); ok { + // check to see if it is a pre-encoded byte string. + if len(b) > 0 && b[0] == byte('"') { + return b, nil + } + } + + return xml.Marshal(in) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go new file mode 100644 index 000000000..c8d73213f --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package xml holds the encoder/decoder implementation for `application/xml`. +*/ +package xml diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go new file mode 100644 index 000000000..31c22ce67 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package event provides primitives to work with CloudEvents specification: https://github.com/cloudevents/spec. +*/ +package event diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event.go new file mode 100644 index 000000000..94b5aa0ad --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event.go @@ -0,0 +1,126 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "bytes" + "encoding/json" + "strings" +) + +// Event represents the canonical representation of a CloudEvent. +type Event struct { + Context EventContext + DataEncoded []byte + // DataBase64 indicates if the event, when serialized, represents + // the data field using the base64 encoding. + // In v0.3, this field is superseded by DataContentEncoding + DataBase64 bool + FieldErrors map[string]error +} + +const ( + defaultEventVersion = CloudEventsVersionV1 +) + +func (e *Event) fieldError(field string, err error) { + if e.FieldErrors == nil { + e.FieldErrors = make(map[string]error) + } + e.FieldErrors[field] = err +} + +func (e *Event) fieldOK(field string) { + if e.FieldErrors != nil { + delete(e.FieldErrors, field) + } +} + +// New returns a new Event, an optional version can be passed to change the +// default spec version from 1.0 to the provided version. +func New(version ...string) Event { + specVersion := defaultEventVersion + if len(version) >= 1 { + specVersion = version[0] + } + e := &Event{} + e.SetSpecVersion(specVersion) + return *e +} + +// ExtensionAs is deprecated: access extensions directly via the e.Extensions() map. +// Use functions in the types package to convert extension values. +// For example replace this: +// +// var i int +// err := e.ExtensionAs("foo", &i) +// +// With this: +// +// i, err := types.ToInteger(e.Extensions["foo"]) +// +func (e Event) ExtensionAs(name string, obj interface{}) error { + return e.Context.ExtensionAs(name, obj) +} + +// String returns a pretty-printed representation of the Event. +func (e Event) String() string { + b := strings.Builder{} + + b.WriteString(e.Context.String()) + + if e.DataEncoded != nil { + if e.DataBase64 { + b.WriteString("Data (binary),\n ") + } else { + b.WriteString("Data,\n ") + } + switch e.DataMediaType() { + case ApplicationJSON: + var prettyJSON bytes.Buffer + err := json.Indent(&prettyJSON, e.DataEncoded, " ", " ") + if err != nil { + b.Write(e.DataEncoded) + } else { + b.Write(prettyJSON.Bytes()) + } + default: + b.Write(e.DataEncoded) + } + b.WriteString("\n") + } + + return b.String() +} + +func (e Event) Clone() Event { + out := Event{} + out.Context = e.Context.Clone() + out.DataEncoded = cloneBytes(e.DataEncoded) + out.DataBase64 = e.DataBase64 + out.FieldErrors = e.cloneFieldErrors() + return out +} + +func cloneBytes(in []byte) []byte { + if in == nil { + return nil + } + out := make([]byte, len(in)) + copy(out, in) + return out +} + +func (e Event) cloneFieldErrors() map[string]error { + if e.FieldErrors == nil { + return nil + } + newFE := make(map[string]error, len(e.FieldErrors)) + for k, v := range e.FieldErrors { + newFE[k] = v + } + return newFE +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go new file mode 100644 index 000000000..8fc449ed9 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go @@ -0,0 +1,118 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "context" + "encoding/base64" + "fmt" + "strconv" + + "github.com/cloudevents/sdk-go/v2/event/datacodec" +) + +// SetData encodes the given payload with the given content type. +// If the provided payload is a byte array, when marshalled to json it will be encoded as base64. +// If the provided payload is different from byte array, datacodec.Encode is invoked to attempt a +// marshalling to byte array. +func (e *Event) SetData(contentType string, obj interface{}) error { + e.SetDataContentType(contentType) + + if e.SpecVersion() != CloudEventsVersionV1 { + return e.legacySetData(obj) + } + + // Version 1.0 and above. + switch obj := obj.(type) { + case []byte: + e.DataEncoded = obj + e.DataBase64 = true + default: + data, err := datacodec.Encode(context.Background(), e.DataMediaType(), obj) + if err != nil { + return err + } + e.DataEncoded = data + e.DataBase64 = false + } + + return nil +} + +// Deprecated: Delete when we do not have to support Spec v0.3. +func (e *Event) legacySetData(obj interface{}) error { + data, err := datacodec.Encode(context.Background(), e.DataMediaType(), obj) + if err != nil { + return err + } + if e.DeprecatedDataContentEncoding() == Base64 { + buf := make([]byte, base64.StdEncoding.EncodedLen(len(data))) + base64.StdEncoding.Encode(buf, data) + e.DataEncoded = buf + e.DataBase64 = false + } else { + data, err := datacodec.Encode(context.Background(), e.DataMediaType(), obj) + if err != nil { + return err + } + e.DataEncoded = data + e.DataBase64 = false + } + return nil +} + +const ( + quotes = `"'` +) + +func (e Event) Data() []byte { + return e.DataEncoded +} + +// DataAs attempts to populate the provided data object with the event payload. +// obj should be a pointer type. +func (e Event) DataAs(obj interface{}) error { + data := e.Data() + + if len(data) == 0 { + // No data. + return nil + } + + if e.SpecVersion() != CloudEventsVersionV1 { + var err error + if data, err = e.legacyConvertData(data); err != nil { + return err + } + } + + return datacodec.Decode(context.Background(), e.DataMediaType(), data, obj) +} + +func (e Event) legacyConvertData(data []byte) ([]byte, error) { + if e.Context.DeprecatedGetDataContentEncoding() == Base64 { + var bs []byte + // test to see if we need to unquote the data. + if data[0] == quotes[0] || data[0] == quotes[1] { + str, err := strconv.Unquote(string(data)) + if err != nil { + return nil, err + } + bs = []byte(str) + } else { + bs = data + } + + buf := make([]byte, base64.StdEncoding.DecodedLen(len(bs))) + n, err := base64.StdEncoding.Decode(buf, bs) + if err != nil { + return nil, fmt.Errorf("failed to decode data from base64: %s", err.Error()) + } + data = buf[:n] + } + + return data, nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go new file mode 100644 index 000000000..2809fed57 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go @@ -0,0 +1,102 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "time" +) + +// EventReader is the interface for reading through an event from attributes. +type EventReader interface { + // SpecVersion returns event.Context.GetSpecVersion(). + SpecVersion() string + // Type returns event.Context.GetType(). + Type() string + // Source returns event.Context.GetSource(). + Source() string + // Subject returns event.Context.GetSubject(). + Subject() string + // ID returns event.Context.GetID(). + ID() string + // Time returns event.Context.GetTime(). + Time() time.Time + // DataSchema returns event.Context.GetDataSchema(). + DataSchema() string + // DataContentType returns event.Context.GetDataContentType(). + DataContentType() string + // DataMediaType returns event.Context.GetDataMediaType(). + DataMediaType() string + // DeprecatedDataContentEncoding returns event.Context.DeprecatedGetDataContentEncoding(). + DeprecatedDataContentEncoding() string + + // Extension Attributes + + // Extensions returns the event.Context.GetExtensions(). + // Extensions use the CloudEvents type system, details in package cloudevents/types. + Extensions() map[string]interface{} + + // ExtensionAs returns event.Context.ExtensionAs(name, obj). + // + // DEPRECATED: Access extensions directly via the e.Extensions() map. + // Use functions in the types package to convert extension values. + // For example replace this: + // + // var i int + // err := e.ExtensionAs("foo", &i) + // + // With this: + // + // i, err := types.ToInteger(e.Extensions["foo"]) + // + ExtensionAs(string, interface{}) error + + // Data Attribute + + // Data returns the raw data buffer + // If the event was encoded with base64 encoding, Data returns the already decoded + // byte array + Data() []byte + + // DataAs attempts to populate the provided data object with the event payload. + DataAs(interface{}) error +} + +// EventWriter is the interface for writing through an event onto attributes. +// If an error is thrown by a sub-component, EventWriter caches the error +// internally and exposes errors with a call to event.Validate(). +type EventWriter interface { + // Context Attributes + + // SetSpecVersion performs event.Context.SetSpecVersion. + SetSpecVersion(string) + // SetType performs event.Context.SetType. + SetType(string) + // SetSource performs event.Context.SetSource. + SetSource(string) + // SetSubject( performs event.Context.SetSubject. + SetSubject(string) + // SetID performs event.Context.SetID. + SetID(string) + // SetTime performs event.Context.SetTime. + SetTime(time.Time) + // SetDataSchema performs event.Context.SetDataSchema. + SetDataSchema(string) + // SetDataContentType performs event.Context.SetDataContentType. + SetDataContentType(string) + // DeprecatedSetDataContentEncoding performs event.Context.DeprecatedSetDataContentEncoding. + SetDataContentEncoding(string) + + // Extension Attributes + + // SetExtension performs event.Context.SetExtension. + SetExtension(string, interface{}) + + // SetData encodes the given payload with the given content type. + // If the provided payload is a byte array, when marshalled to json it will be encoded as base64. + // If the provided payload is different from byte array, datacodec.Encode is invoked to attempt a + // marshalling to byte array. + SetData(string, interface{}) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go new file mode 100644 index 000000000..c5f2dc03c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go @@ -0,0 +1,203 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "strings" + + jsoniter "github.com/json-iterator/go" +) + +// WriteJson writes the in event in the provided writer. +// Note: this function assumes the input event is valid. +func WriteJson(in *Event, writer io.Writer) error { + stream := jsoniter.ConfigFastest.BorrowStream(writer) + defer jsoniter.ConfigFastest.ReturnStream(stream) + stream.WriteObjectStart() + + var ext map[string]interface{} + var dct *string + var isBase64 bool + + // Write the context (without the extensions) + switch eventContext := in.Context.(type) { + case *EventContextV03: + // Set a bunch of variables we need later + ext = eventContext.Extensions + dct = eventContext.DataContentType + + stream.WriteObjectField("specversion") + stream.WriteString(CloudEventsVersionV03) + stream.WriteMore() + + stream.WriteObjectField("id") + stream.WriteString(eventContext.ID) + stream.WriteMore() + + stream.WriteObjectField("source") + stream.WriteString(eventContext.Source.String()) + stream.WriteMore() + + stream.WriteObjectField("type") + stream.WriteString(eventContext.Type) + + if eventContext.Subject != nil { + stream.WriteMore() + stream.WriteObjectField("subject") + stream.WriteString(*eventContext.Subject) + } + + if eventContext.DataContentEncoding != nil { + isBase64 = true + stream.WriteMore() + stream.WriteObjectField("datacontentencoding") + stream.WriteString(*eventContext.DataContentEncoding) + } + + if eventContext.DataContentType != nil { + stream.WriteMore() + stream.WriteObjectField("datacontenttype") + stream.WriteString(*eventContext.DataContentType) + } + + if eventContext.SchemaURL != nil { + stream.WriteMore() + stream.WriteObjectField("schemaurl") + stream.WriteString(eventContext.SchemaURL.String()) + } + + if eventContext.Time != nil { + stream.WriteMore() + stream.WriteObjectField("time") + stream.WriteString(eventContext.Time.String()) + } + case *EventContextV1: + // Set a bunch of variables we need later + ext = eventContext.Extensions + dct = eventContext.DataContentType + isBase64 = in.DataBase64 + + stream.WriteObjectField("specversion") + stream.WriteString(CloudEventsVersionV1) + stream.WriteMore() + + stream.WriteObjectField("id") + stream.WriteString(eventContext.ID) + stream.WriteMore() + + stream.WriteObjectField("source") + stream.WriteString(eventContext.Source.String()) + stream.WriteMore() + + stream.WriteObjectField("type") + stream.WriteString(eventContext.Type) + + if eventContext.Subject != nil { + stream.WriteMore() + stream.WriteObjectField("subject") + stream.WriteString(*eventContext.Subject) + } + + if eventContext.DataContentType != nil { + stream.WriteMore() + stream.WriteObjectField("datacontenttype") + stream.WriteString(*eventContext.DataContentType) + } + + if eventContext.DataSchema != nil { + stream.WriteMore() + stream.WriteObjectField("dataschema") + stream.WriteString(eventContext.DataSchema.String()) + } + + if eventContext.Time != nil { + stream.WriteMore() + stream.WriteObjectField("time") + stream.WriteString(eventContext.Time.String()) + } + default: + return fmt.Errorf("missing event context") + } + + // Let's do a check on the error + if stream.Error != nil { + return fmt.Errorf("error while writing the event attributes: %w", stream.Error) + } + + // Let's write the body + if in.DataEncoded != nil { + stream.WriteMore() + + // We need to figure out the media type first + var mediaType string + if dct == nil { + mediaType = ApplicationJSON + } else { + // This code is required to extract the media type from the full content type string (which might contain encoding and stuff) + contentType := *dct + i := strings.IndexRune(contentType, ';') + if i == -1 { + i = len(contentType) + } + mediaType = strings.TrimSpace(strings.ToLower(contentType[0:i])) + } + + isJson := mediaType == "" || mediaType == ApplicationJSON || mediaType == TextJSON + + // If isJson and no encoding to base64, we don't need to perform additional steps + if isJson && !isBase64 { + stream.WriteObjectField("data") + _, err := stream.Write(in.DataEncoded) + if err != nil { + return fmt.Errorf("error while writing data: %w", err) + } + } else { + if in.Context.GetSpecVersion() == CloudEventsVersionV1 && isBase64 { + stream.WriteObjectField("data_base64") + } else { + stream.WriteObjectField("data") + } + // At this point of we need to write to base 64 string, or we just need to write the plain string + if isBase64 { + stream.WriteString(base64.StdEncoding.EncodeToString(in.DataEncoded)) + } else { + stream.WriteString(string(in.DataEncoded)) + } + } + + } + + // Let's do a check on the error + if stream.Error != nil { + return fmt.Errorf("error while writing the event data: %w", stream.Error) + } + + for k, v := range ext { + stream.WriteMore() + stream.WriteObjectField(k) + stream.WriteVal(v) + } + + stream.WriteObjectEnd() + + // Let's do a check on the error + if stream.Error != nil { + return fmt.Errorf("error while writing the event extensions: %w", stream.Error) + } + return stream.Flush() +} + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (e Event) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + err := WriteJson(&e, &buf) + return buf.Bytes(), err +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go new file mode 100644 index 000000000..9d1aeeb65 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go @@ -0,0 +1,103 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "time" +) + +var _ EventReader = (*Event)(nil) + +// SpecVersion implements EventReader.SpecVersion +func (e Event) SpecVersion() string { + if e.Context != nil { + return e.Context.GetSpecVersion() + } + return "" +} + +// Type implements EventReader.Type +func (e Event) Type() string { + if e.Context != nil { + return e.Context.GetType() + } + return "" +} + +// Source implements EventReader.Source +func (e Event) Source() string { + if e.Context != nil { + return e.Context.GetSource() + } + return "" +} + +// Subject implements EventReader.Subject +func (e Event) Subject() string { + if e.Context != nil { + return e.Context.GetSubject() + } + return "" +} + +// ID implements EventReader.ID +func (e Event) ID() string { + if e.Context != nil { + return e.Context.GetID() + } + return "" +} + +// Time implements EventReader.Time +func (e Event) Time() time.Time { + if e.Context != nil { + return e.Context.GetTime() + } + return time.Time{} +} + +// DataSchema implements EventReader.DataSchema +func (e Event) DataSchema() string { + if e.Context != nil { + return e.Context.GetDataSchema() + } + return "" +} + +// DataContentType implements EventReader.DataContentType +func (e Event) DataContentType() string { + if e.Context != nil { + return e.Context.GetDataContentType() + } + return "" +} + +// DataMediaType returns the parsed DataMediaType of the event. If parsing +// fails, the empty string is returned. To retrieve the parsing error, use +// `Context.GetDataMediaType` instead. +func (e Event) DataMediaType() string { + if e.Context != nil { + mediaType, _ := e.Context.GetDataMediaType() + return mediaType + } + return "" +} + +// DeprecatedDataContentEncoding implements EventReader.DeprecatedDataContentEncoding +func (e Event) DeprecatedDataContentEncoding() string { + if e.Context != nil { + return e.Context.DeprecatedGetDataContentEncoding() + } + return "" +} + +// Extensions implements EventReader.Extensions +func (e Event) Extensions() map[string]interface{} { + if e.Context != nil { + return e.Context.GetExtensions() + } + return map[string]interface{}(nil) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go new file mode 100644 index 000000000..0dd88ae5a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go @@ -0,0 +1,480 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "encoding/base64" + "errors" + "fmt" + "io" + "sync" + + jsoniter "github.com/json-iterator/go" + + "github.com/cloudevents/sdk-go/v2/types" +) + +const specVersionV03Flag uint8 = 1 << 4 +const specVersionV1Flag uint8 = 1 << 5 +const dataBase64Flag uint8 = 1 << 6 +const dataContentTypeFlag uint8 = 1 << 7 + +func checkFlag(state uint8, flag uint8) bool { + return state&flag != 0 +} + +func appendFlag(state *uint8, flag uint8) { + *state = (*state) | flag +} + +var iterPool = sync.Pool{ + New: func() interface{} { + return jsoniter.Parse(jsoniter.ConfigFastest, nil, 1024) + }, +} + +func borrowIterator(reader io.Reader) *jsoniter.Iterator { + iter := iterPool.Get().(*jsoniter.Iterator) + iter.Reset(reader) + return iter +} + +func returnIterator(iter *jsoniter.Iterator) { + iter.Error = nil + iter.Attachment = nil + iterPool.Put(iter) +} + +func ReadJson(out *Event, reader io.Reader) error { + iterator := borrowIterator(reader) + defer returnIterator(iterator) + + return readJsonFromIterator(out, iterator) +} + +// ReadJson allows you to read the bytes reader as an event +func readJsonFromIterator(out *Event, iterator *jsoniter.Iterator) error { + // Parsing dependency graph: + // SpecVersion + // ^ ^ + // | +--------------+ + // + + + // All Attributes datacontenttype (and datacontentencoding for v0.3) + // (except datacontenttype) ^ + // | + // | + // + + // Data + + var state uint8 = 0 + var cachedData []byte + + var ( + // Universally parseable fields. + id string + typ string + source types.URIRef + subject *string + time *types.Timestamp + datacontenttype *string + extensions = make(map[string]interface{}) + + // These fields require knowledge about the specversion to be parsed. + schemaurl jsoniter.Any + datacontentencoding jsoniter.Any + dataschema jsoniter.Any + dataBase64 jsoniter.Any + ) + + for key := iterator.ReadObject(); key != ""; key = iterator.ReadObject() { + // Check if we have some error in our error cache + if iterator.Error != nil { + return iterator.Error + } + + // We have a key, now we need to figure out what to do + // depending on the parsing state + + // If it's a specversion, trigger state change + if key == "specversion" { + if checkFlag(state, specVersionV1Flag|specVersionV03Flag) { + return fmt.Errorf("specversion was already provided") + } + sv := iterator.ReadString() + + // Check proper specversion + switch sv { + case CloudEventsVersionV1: + con := &EventContextV1{ + ID: id, + Type: typ, + Source: source, + Subject: subject, + Time: time, + DataContentType: datacontenttype, + } + + // Add the fields relevant for the version ... + if dataschema != nil { + var err error + con.DataSchema, err = toUriPtr(dataschema) + if err != nil { + return err + } + } + if dataBase64 != nil { + stream := jsoniter.ConfigFastest.BorrowStream(nil) + defer jsoniter.ConfigFastest.ReturnStream(stream) + dataBase64.WriteTo(stream) + cachedData = stream.Buffer() + if stream.Error != nil { + return stream.Error + } + appendFlag(&state, dataBase64Flag) + } + + // ... add all remaining fields as extensions. + if schemaurl != nil { + extensions["schemaurl"] = schemaurl.GetInterface() + } + if datacontentencoding != nil { + extensions["datacontentencoding"] = datacontentencoding.GetInterface() + } + + out.Context = con + appendFlag(&state, specVersionV1Flag) + case CloudEventsVersionV03: + con := &EventContextV03{ + ID: id, + Type: typ, + Source: source, + Subject: subject, + Time: time, + DataContentType: datacontenttype, + } + var err error + // Add the fields relevant for the version ... + if schemaurl != nil { + con.SchemaURL, err = toUriRefPtr(schemaurl) + if err != nil { + return err + } + } + if datacontentencoding != nil { + con.DataContentEncoding, err = toStrPtr(datacontentencoding) + if *con.DataContentEncoding != Base64 { + err = ValidationError{"datacontentencoding": errors.New("invalid datacontentencoding value, the only allowed value is 'base64'")} + } + if err != nil { + return err + } + appendFlag(&state, dataBase64Flag) + } + + // ... add all remaining fields as extensions. + if dataschema != nil { + extensions["dataschema"] = dataschema.GetInterface() + } + if dataBase64 != nil { + extensions["data_base64"] = dataBase64.GetInterface() + } + + out.Context = con + appendFlag(&state, specVersionV03Flag) + default: + return ValidationError{"specversion": errors.New("unknown value: " + sv)} + } + + // Apply all extensions to the context object. + for key, val := range extensions { + if err := out.Context.SetExtension(key, val); err != nil { + return err + } + } + continue + } + + // If no specversion ... + if !checkFlag(state, specVersionV03Flag|specVersionV1Flag) { + switch key { + case "id": + id = iterator.ReadString() + case "type": + typ = iterator.ReadString() + case "source": + source = readUriRef(iterator) + case "subject": + subject = readStrPtr(iterator) + case "time": + time = readTimestamp(iterator) + case "datacontenttype": + datacontenttype = readStrPtr(iterator) + appendFlag(&state, dataContentTypeFlag) + case "data": + cachedData = iterator.SkipAndReturnBytes() + case "data_base64": + dataBase64 = iterator.ReadAny() + case "dataschema": + dataschema = iterator.ReadAny() + case "schemaurl": + schemaurl = iterator.ReadAny() + case "datacontentencoding": + datacontentencoding = iterator.ReadAny() + default: + extensions[key] = iterator.Read() + } + continue + } + + // From this point downward -> we can assume the event has a context pointer non nil + + // If it's a datacontenttype, trigger state change + if key == "datacontenttype" { + if checkFlag(state, dataContentTypeFlag) { + return fmt.Errorf("datacontenttype was already provided") + } + + dct := iterator.ReadString() + + switch ctx := out.Context.(type) { + case *EventContextV03: + ctx.DataContentType = &dct + case *EventContextV1: + ctx.DataContentType = &dct + } + appendFlag(&state, dataContentTypeFlag) + continue + } + + // If it's a datacontentencoding and it's v0.3, trigger state change + if checkFlag(state, specVersionV03Flag) && key == "datacontentencoding" { + if checkFlag(state, dataBase64Flag) { + return ValidationError{"datacontentencoding": errors.New("datacontentencoding was specified twice")} + } + + dce := iterator.ReadString() + + if dce != Base64 { + return ValidationError{"datacontentencoding": errors.New("invalid datacontentencoding value, the only allowed value is 'base64'")} + } + + out.Context.(*EventContextV03).DataContentEncoding = &dce + appendFlag(&state, dataBase64Flag) + continue + } + + // We can parse all attributes, except data. + // If it's data or data_base64 and we don't have the attributes to process it, then we cache it + // The expanded form of this condition is: + // (checkFlag(state, specVersionV1Flag) && !checkFlag(state, dataContentTypeFlag) && (key == "data" || key == "data_base64")) || + // (checkFlag(state, specVersionV03Flag) && !(checkFlag(state, dataContentTypeFlag) && checkFlag(state, dataBase64Flag)) && key == "data") + if (state&(specVersionV1Flag|dataContentTypeFlag) == specVersionV1Flag && (key == "data" || key == "data_base64")) || + ((state&specVersionV03Flag == specVersionV03Flag) && (state&(dataContentTypeFlag|dataBase64Flag) != (dataContentTypeFlag | dataBase64Flag)) && key == "data") { + if key == "data_base64" { + appendFlag(&state, dataBase64Flag) + } + cachedData = iterator.SkipAndReturnBytes() + continue + } + + // At this point or this value is an attribute (excluding datacontenttype and datacontentencoding), or this value is data and this condition is valid: + // (specVersionV1Flag & dataContentTypeFlag) || (specVersionV03Flag & dataContentTypeFlag & dataBase64Flag) + switch eventContext := out.Context.(type) { + case *EventContextV03: + switch key { + case "id": + eventContext.ID = iterator.ReadString() + case "type": + eventContext.Type = iterator.ReadString() + case "source": + eventContext.Source = readUriRef(iterator) + case "subject": + eventContext.Subject = readStrPtr(iterator) + case "time": + eventContext.Time = readTimestamp(iterator) + case "schemaurl": + eventContext.SchemaURL = readUriRefPtr(iterator) + case "data": + iterator.Error = consumeData(out, checkFlag(state, dataBase64Flag), iterator) + default: + if eventContext.Extensions == nil { + eventContext.Extensions = make(map[string]interface{}, 1) + } + iterator.Error = eventContext.SetExtension(key, iterator.Read()) + } + case *EventContextV1: + switch key { + case "id": + eventContext.ID = iterator.ReadString() + case "type": + eventContext.Type = iterator.ReadString() + case "source": + eventContext.Source = readUriRef(iterator) + case "subject": + eventContext.Subject = readStrPtr(iterator) + case "time": + eventContext.Time = readTimestamp(iterator) + case "dataschema": + eventContext.DataSchema = readUriPtr(iterator) + case "data": + iterator.Error = consumeData(out, false, iterator) + case "data_base64": + iterator.Error = consumeData(out, true, iterator) + default: + if eventContext.Extensions == nil { + eventContext.Extensions = make(map[string]interface{}, 1) + } + iterator.Error = eventContext.SetExtension(key, iterator.Read()) + } + } + } + + if state&(specVersionV03Flag|specVersionV1Flag) == 0 { + return ValidationError{"specversion": errors.New("no specversion")} + } + + if iterator.Error != nil { + return iterator.Error + } + + // If there is a dataToken cached, we always defer at the end the processing + // because nor datacontenttype or datacontentencoding are mandatory. + if cachedData != nil { + return consumeDataAsBytes(out, checkFlag(state, dataBase64Flag), cachedData) + } + return nil +} + +func consumeDataAsBytes(e *Event, isBase64 bool, b []byte) error { + if isBase64 { + e.DataBase64 = true + + // Allocate payload byte buffer + base64Encoded := b[1 : len(b)-1] // remove quotes + e.DataEncoded = make([]byte, base64.StdEncoding.DecodedLen(len(base64Encoded))) + length, err := base64.StdEncoding.Decode(e.DataEncoded, base64Encoded) + if err != nil { + return err + } + e.DataEncoded = e.DataEncoded[0:length] + return nil + } + + mt, _ := e.Context.GetDataMediaType() + // Empty content type assumes json + if mt != "" && mt != ApplicationJSON && mt != TextJSON { + // If not json, then data is encoded as string + iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, b) + src := iter.ReadString() // handles escaping + e.DataEncoded = []byte(src) + if iter.Error != nil { + return fmt.Errorf("unexpected data payload for media type %q, expected a string: %w", mt, iter.Error) + } + return nil + } + + e.DataEncoded = b + return nil +} + +func consumeData(e *Event, isBase64 bool, iter *jsoniter.Iterator) error { + if isBase64 { + e.DataBase64 = true + + // Allocate payload byte buffer + base64Encoded := iter.ReadStringAsSlice() + e.DataEncoded = make([]byte, base64.StdEncoding.DecodedLen(len(base64Encoded))) + length, err := base64.StdEncoding.Decode(e.DataEncoded, base64Encoded) + if err != nil { + return err + } + e.DataEncoded = e.DataEncoded[0:length] + return nil + } + + mt, _ := e.Context.GetDataMediaType() + if mt != ApplicationJSON && mt != TextJSON { + // If not json, then data is encoded as string + src := iter.ReadString() // handles escaping + e.DataEncoded = []byte(src) + if iter.Error != nil { + return fmt.Errorf("unexpected data payload for media type %q, expected a string: %w", mt, iter.Error) + } + return nil + } + + e.DataEncoded = iter.SkipAndReturnBytes() + return nil +} + +func readUriRef(iter *jsoniter.Iterator) types.URIRef { + str := iter.ReadString() + uriRef := types.ParseURIRef(str) + if uriRef == nil { + iter.Error = fmt.Errorf("cannot parse uri ref: %v", str) + return types.URIRef{} + } + return *uriRef +} + +func readStrPtr(iter *jsoniter.Iterator) *string { + str := iter.ReadString() + if str == "" { + return nil + } + return &str +} + +func readUriRefPtr(iter *jsoniter.Iterator) *types.URIRef { + return types.ParseURIRef(iter.ReadString()) +} + +func readUriPtr(iter *jsoniter.Iterator) *types.URI { + return types.ParseURI(iter.ReadString()) +} + +func readTimestamp(iter *jsoniter.Iterator) *types.Timestamp { + t, err := types.ParseTimestamp(iter.ReadString()) + if err != nil { + iter.Error = err + } + return t +} + +func toStrPtr(val jsoniter.Any) (*string, error) { + str := val.ToString() + if val.LastError() != nil { + return nil, val.LastError() + } + if str == "" { + return nil, nil + } + return &str, nil +} + +func toUriRefPtr(val jsoniter.Any) (*types.URIRef, error) { + str := val.ToString() + if val.LastError() != nil { + return nil, val.LastError() + } + return types.ParseURIRef(str), nil +} + +func toUriPtr(val jsoniter.Any) (*types.URI, error) { + str := val.ToString() + if val.LastError() != nil { + return nil, val.LastError() + } + return types.ParseURI(str), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (e *Event) UnmarshalJSON(b []byte) error { + iterator := jsoniter.ConfigFastest.BorrowIterator(b) + defer jsoniter.ConfigFastest.ReturnIterator(iterator) + return readJsonFromIterator(e, iterator) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go new file mode 100644 index 000000000..958ecc47d --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go @@ -0,0 +1,50 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "strings" +) + +type ValidationError map[string]error + +func (e ValidationError) Error() string { + b := strings.Builder{} + for k, v := range e { + b.WriteString(k) + b.WriteString(": ") + b.WriteString(v.Error()) + b.WriteRune('\n') + } + return b.String() +} + +// Validate performs a spec based validation on this event. +// Validation is dependent on the spec version specified in the event context. +func (e Event) Validate() error { + if e.Context == nil { + return ValidationError{"specversion": fmt.Errorf("missing Event.Context")} + } + + errs := map[string]error{} + if e.FieldErrors != nil { + for k, v := range e.FieldErrors { + errs[k] = v + } + } + + if fieldErrors := e.Context.Validate(); fieldErrors != nil { + for k, v := range fieldErrors { + errs[k] = v + } + } + + if len(errs) > 0 { + return ValidationError(errs) + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go new file mode 100644 index 000000000..ddfb1be38 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go @@ -0,0 +1,117 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "time" +) + +var _ EventWriter = (*Event)(nil) + +// SetSpecVersion implements EventWriter.SetSpecVersion +func (e *Event) SetSpecVersion(v string) { + switch v { + case CloudEventsVersionV03: + if e.Context == nil { + e.Context = &EventContextV03{} + } else { + e.Context = e.Context.AsV03() + } + case CloudEventsVersionV1: + if e.Context == nil { + e.Context = &EventContextV1{} + } else { + e.Context = e.Context.AsV1() + } + default: + e.fieldError("specversion", fmt.Errorf("a valid spec version is required: [%s, %s]", + CloudEventsVersionV03, CloudEventsVersionV1)) + return + } + e.fieldOK("specversion") +} + +// SetType implements EventWriter.SetType +func (e *Event) SetType(t string) { + if err := e.Context.SetType(t); err != nil { + e.fieldError("type", err) + } else { + e.fieldOK("type") + } +} + +// SetSource implements EventWriter.SetSource +func (e *Event) SetSource(s string) { + if err := e.Context.SetSource(s); err != nil { + e.fieldError("source", err) + } else { + e.fieldOK("source") + } +} + +// SetSubject implements EventWriter.SetSubject +func (e *Event) SetSubject(s string) { + if err := e.Context.SetSubject(s); err != nil { + e.fieldError("subject", err) + } else { + e.fieldOK("subject") + } +} + +// SetID implements EventWriter.SetID +func (e *Event) SetID(id string) { + if err := e.Context.SetID(id); err != nil { + e.fieldError("id", err) + } else { + e.fieldOK("id") + } +} + +// SetTime implements EventWriter.SetTime +func (e *Event) SetTime(t time.Time) { + if err := e.Context.SetTime(t); err != nil { + e.fieldError("time", err) + } else { + e.fieldOK("time") + } +} + +// SetDataSchema implements EventWriter.SetDataSchema +func (e *Event) SetDataSchema(s string) { + if err := e.Context.SetDataSchema(s); err != nil { + e.fieldError("dataschema", err) + } else { + e.fieldOK("dataschema") + } +} + +// SetDataContentType implements EventWriter.SetDataContentType +func (e *Event) SetDataContentType(ct string) { + if err := e.Context.SetDataContentType(ct); err != nil { + e.fieldError("datacontenttype", err) + } else { + e.fieldOK("datacontenttype") + } +} + +// SetDataContentEncoding is deprecated. Implements EventWriter.SetDataContentEncoding. +func (e *Event) SetDataContentEncoding(enc string) { + if err := e.Context.DeprecatedSetDataContentEncoding(enc); err != nil { + e.fieldError("datacontentencoding", err) + } else { + e.fieldOK("datacontentencoding") + } +} + +// SetExtension implements EventWriter.SetExtension +func (e *Event) SetExtension(name string, obj interface{}) { + if err := e.Context.SetExtension(name, obj); err != nil { + e.fieldError("extension:"+name, err) + } else { + e.fieldOK("extension:" + name) + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go new file mode 100644 index 000000000..a39565afa --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go @@ -0,0 +1,125 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import "time" + +// EventContextReader are the methods required to be a reader of context +// attributes. +type EventContextReader interface { + // GetSpecVersion returns the native CloudEvents Spec version of the event + // context. + GetSpecVersion() string + // GetType returns the CloudEvents type from the context. + GetType() string + // GetSource returns the CloudEvents source from the context. + GetSource() string + // GetSubject returns the CloudEvents subject from the context. + GetSubject() string + // GetID returns the CloudEvents ID from the context. + GetID() string + // GetTime returns the CloudEvents creation time from the context. + GetTime() time.Time + // GetDataSchema returns the CloudEvents schema URL (if any) from the + // context. + GetDataSchema() string + // GetDataContentType returns content type on the context. + GetDataContentType() string + // DeprecatedGetDataContentEncoding returns content encoding on the context. + DeprecatedGetDataContentEncoding() string + + // GetDataMediaType returns the MIME media type for encoded data, which is + // needed by both encoding and decoding. This is a processed form of + // GetDataContentType and it may return an error. + GetDataMediaType() (string, error) + + // DEPRECATED: Access extensions directly via the GetExtensions() + // For example replace this: + // + // var i int + // err := ec.ExtensionAs("foo", &i) + // + // With this: + // + // i, err := types.ToInteger(ec.GetExtensions["foo"]) + // + ExtensionAs(string, interface{}) error + + // GetExtensions returns the full extensions map. + // + // Extensions use the CloudEvents type system, details in package cloudevents/types. + GetExtensions() map[string]interface{} + + // GetExtension returns the extension associated with with the given key. + // The given key is case insensitive. If the extension can not be found, + // an error will be returned. + GetExtension(string) (interface{}, error) +} + +// EventContextWriter are the methods required to be a writer of context +// attributes. +type EventContextWriter interface { + // SetType sets the type of the context. + SetType(string) error + // SetSource sets the source of the context. + SetSource(string) error + // SetSubject sets the subject of the context. + SetSubject(string) error + // SetID sets the ID of the context. + SetID(string) error + // SetTime sets the time of the context. + SetTime(time time.Time) error + // SetDataSchema sets the schema url of the context. + SetDataSchema(string) error + // SetDataContentType sets the data content type of the context. + SetDataContentType(string) error + // DeprecatedSetDataContentEncoding sets the data context encoding of the context. + DeprecatedSetDataContentEncoding(string) error + + // SetExtension sets the given interface onto the extension attributes + // determined by the provided name. + // + // This function fails in V1 if the name doesn't respect the regex ^[a-zA-Z0-9]+$ + // + // Package ./types documents the types that are allowed as extension values. + SetExtension(string, interface{}) error +} + +// EventContextConverter are the methods that allow for event version +// conversion. +type EventContextConverter interface { + // AsV03 provides a translation from whatever the "native" encoding of the + // CloudEvent was to the equivalent in v0.3 field names, moving fields to or + // from extensions as necessary. + AsV03() *EventContextV03 + + // AsV1 provides a translation from whatever the "native" encoding of the + // CloudEvent was to the equivalent in v1.0 field names, moving fields to or + // from extensions as necessary. + AsV1() *EventContextV1 +} + +// EventContext is conical interface for a CloudEvents Context. +type EventContext interface { + // EventContextConverter allows for conversion between versions. + EventContextConverter + + // EventContextReader adds methods for reading context. + EventContextReader + + // EventContextWriter adds methods for writing to context. + EventContextWriter + + // Validate the event based on the specifics of the CloudEvents spec version + // represented by this event context. + Validate() ValidationError + + // Clone clones the event context. + Clone() EventContext + + // String returns a pretty-printed representation of the EventContext. + String() string +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go new file mode 100644 index 000000000..c511c81c4 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go @@ -0,0 +1,329 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "encoding/json" + "fmt" + "mime" + "sort" + "strings" + + "github.com/cloudevents/sdk-go/v2/types" +) + +const ( + // CloudEventsVersionV03 represents the version 0.3 of the CloudEvents spec. + CloudEventsVersionV03 = "0.3" +) + +var specV03Attributes = map[string]struct{}{ + "type": {}, + "source": {}, + "subject": {}, + "id": {}, + "time": {}, + "schemaurl": {}, + "datacontenttype": {}, + "datacontentencoding": {}, +} + +// EventContextV03 represents the non-data attributes of a CloudEvents v0.3 +// event. +type EventContextV03 struct { + // Type - The type of the occurrence which has happened. + Type string `json:"type"` + // Source - A URI describing the event producer. + Source types.URIRef `json:"source"` + // Subject - The subject of the event in the context of the event producer + // (identified by `source`). + Subject *string `json:"subject,omitempty"` + // ID of the event; must be non-empty and unique within the scope of the producer. + ID string `json:"id"` + // Time - A Timestamp when the event happened. + Time *types.Timestamp `json:"time,omitempty"` + // DataSchema - A link to the schema that the `data` attribute adheres to. + SchemaURL *types.URIRef `json:"schemaurl,omitempty"` + // GetDataMediaType - A MIME (RFC2046) string describing the media type of `data`. + DataContentType *string `json:"datacontenttype,omitempty"` + // DeprecatedDataContentEncoding describes the content encoding for the `data` attribute. Valid: nil, `Base64`. + DataContentEncoding *string `json:"datacontentencoding,omitempty"` + // Extensions - Additional extension metadata beyond the base spec. + Extensions map[string]interface{} `json:"-"` +} + +// Adhere to EventContext +var _ EventContext = (*EventContextV03)(nil) + +// ExtensionAs implements EventContext.ExtensionAs +func (ec EventContextV03) ExtensionAs(name string, obj interface{}) error { + value, ok := ec.Extensions[name] + if !ok { + return fmt.Errorf("extension %q does not exist", name) + } + + // Try to unmarshal extension if we find it as a RawMessage. + switch v := value.(type) { + case json.RawMessage: + if err := json.Unmarshal(v, obj); err == nil { + // if that worked, return with obj set. + return nil + } + } + // else try as a string ptr. + + // Only support *string for now. + switch v := obj.(type) { + case *string: + if valueAsString, ok := value.(string); ok { + *v = valueAsString + return nil + } else { + return fmt.Errorf("invalid type for extension %q", name) + } + default: + return fmt.Errorf("unknown extension type %T", obj) + } +} + +// SetExtension adds the extension 'name' with value 'value' to the CloudEvents +// context. This function fails if the name uses a reserved event context key. +func (ec *EventContextV03) SetExtension(name string, value interface{}) error { + if ec.Extensions == nil { + ec.Extensions = make(map[string]interface{}) + } + + if _, ok := specV03Attributes[strings.ToLower(name)]; ok { + return fmt.Errorf("bad key %q: CloudEvents spec attribute MUST NOT be overwritten by extension", name) + } + + if value == nil { + delete(ec.Extensions, name) + if len(ec.Extensions) == 0 { + ec.Extensions = nil + } + return nil + } else { + v, err := types.Validate(value) + if err == nil { + ec.Extensions[name] = v + } + return err + } +} + +// Clone implements EventContextConverter.Clone +func (ec EventContextV03) Clone() EventContext { + ec03 := ec.AsV03() + ec03.Source = types.Clone(ec.Source).(types.URIRef) + if ec.Time != nil { + ec03.Time = types.Clone(ec.Time).(*types.Timestamp) + } + if ec.SchemaURL != nil { + ec03.SchemaURL = types.Clone(ec.SchemaURL).(*types.URIRef) + } + ec03.Extensions = ec.cloneExtensions() + return ec03 +} + +func (ec *EventContextV03) cloneExtensions() map[string]interface{} { + old := ec.Extensions + if old == nil { + return nil + } + new := make(map[string]interface{}, len(ec.Extensions)) + for k, v := range old { + new[k] = types.Clone(v) + } + return new +} + +// AsV03 implements EventContextConverter.AsV03 +func (ec EventContextV03) AsV03() *EventContextV03 { + return &ec +} + +// AsV1 implements EventContextConverter.AsV1 +func (ec EventContextV03) AsV1() *EventContextV1 { + ret := EventContextV1{ + ID: ec.ID, + Time: ec.Time, + Type: ec.Type, + DataContentType: ec.DataContentType, + Source: types.URIRef{URL: ec.Source.URL}, + Subject: ec.Subject, + Extensions: make(map[string]interface{}), + } + if ec.SchemaURL != nil { + ret.DataSchema = &types.URI{URL: ec.SchemaURL.URL} + } + + // DataContentEncoding was removed in 1.0, so put it in an extension for 1.0. + if ec.DataContentEncoding != nil { + _ = ret.SetExtension(DataContentEncodingKey, *ec.DataContentEncoding) + } + + if ec.Extensions != nil { + for k, v := range ec.Extensions { + k = strings.ToLower(k) + ret.Extensions[k] = v + } + } + if len(ret.Extensions) == 0 { + ret.Extensions = nil + } + return &ret +} + +// Validate returns errors based on requirements from the CloudEvents spec. +// For more details, see https://github.com/cloudevents/spec/blob/master/spec.md +// As of Feb 26, 2019, commit 17c32ea26baf7714ad027d9917d03d2fff79fc7e +// + https://github.com/cloudevents/spec/pull/387 -> datacontentencoding +// + https://github.com/cloudevents/spec/pull/406 -> subject +func (ec EventContextV03) Validate() ValidationError { + errors := map[string]error{} + + // type + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // SHOULD be prefixed with a reverse-DNS name. The prefixed domain dictates the organization which defines the semantics of this event type. + eventType := strings.TrimSpace(ec.Type) + if eventType == "" { + errors["type"] = fmt.Errorf("MUST be a non-empty string") + } + + // source + // Type: URI-reference + // Constraints: + // REQUIRED + source := strings.TrimSpace(ec.Source.String()) + if source == "" { + errors["source"] = fmt.Errorf("REQUIRED") + } + + // subject + // Type: String + // Constraints: + // OPTIONAL + // MUST be a non-empty string + if ec.Subject != nil { + subject := strings.TrimSpace(*ec.Subject) + if subject == "" { + errors["subject"] = fmt.Errorf("if present, MUST be a non-empty string") + } + } + + // id + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // MUST be unique within the scope of the producer + id := strings.TrimSpace(ec.ID) + if id == "" { + errors["id"] = fmt.Errorf("MUST be a non-empty string") + + // no way to test "MUST be unique within the scope of the producer" + } + + // time + // Type: Timestamp + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3339 + // --> no need to test this, no way to set the time without it being valid. + + // schemaurl + // Type: URI + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3986 + if ec.SchemaURL != nil { + schemaURL := strings.TrimSpace(ec.SchemaURL.String()) + // empty string is not RFC 3986 compatible. + if schemaURL == "" { + errors["schemaurl"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 3986") + } + } + + // datacontenttype + // Type: String per RFC 2046 + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 2046 + if ec.DataContentType != nil { + dataContentType := strings.TrimSpace(*ec.DataContentType) + if dataContentType == "" { + errors["datacontenttype"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 2046") + } else { + _, _, err := mime.ParseMediaType(dataContentType) + if err != nil { + errors["datacontenttype"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 2046") + } + } + } + + // datacontentencoding + // Type: String per RFC 2045 Section 6.1 + // Constraints: + // The attribute MUST be set if the data attribute contains string-encoded binary data. + // Otherwise the attribute MUST NOT be set. + // If present, MUST adhere to RFC 2045 Section 6.1 + if ec.DataContentEncoding != nil { + dataContentEncoding := strings.ToLower(strings.TrimSpace(*ec.DataContentEncoding)) + if dataContentEncoding != Base64 { + errors["datacontentencoding"] = fmt.Errorf("if present, MUST adhere to RFC 2045 Section 6.1") + } + } + + if len(errors) > 0 { + return errors + } + return nil +} + +// String returns a pretty-printed representation of the EventContext. +func (ec EventContextV03) String() string { + b := strings.Builder{} + + b.WriteString("Context Attributes,\n") + + b.WriteString(" specversion: " + CloudEventsVersionV03 + "\n") + b.WriteString(" type: " + ec.Type + "\n") + b.WriteString(" source: " + ec.Source.String() + "\n") + if ec.Subject != nil { + b.WriteString(" subject: " + *ec.Subject + "\n") + } + b.WriteString(" id: " + ec.ID + "\n") + if ec.Time != nil { + b.WriteString(" time: " + ec.Time.String() + "\n") + } + if ec.SchemaURL != nil { + b.WriteString(" schemaurl: " + ec.SchemaURL.String() + "\n") + } + if ec.DataContentType != nil { + b.WriteString(" datacontenttype: " + *ec.DataContentType + "\n") + } + if ec.DataContentEncoding != nil { + b.WriteString(" datacontentencoding: " + *ec.DataContentEncoding + "\n") + } + + if ec.Extensions != nil && len(ec.Extensions) > 0 { + b.WriteString("Extensions,\n") + keys := make([]string, 0, len(ec.Extensions)) + for k := range ec.Extensions { + keys = append(keys, k) + } + sort.Strings(keys) + for _, key := range keys { + b.WriteString(fmt.Sprintf(" %s: %v\n", key, ec.Extensions[key])) + } + } + + return b.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go new file mode 100644 index 000000000..2cd27a705 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go @@ -0,0 +1,99 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "strings" + "time" +) + +// GetSpecVersion implements EventContextReader.GetSpecVersion +func (ec EventContextV03) GetSpecVersion() string { + return CloudEventsVersionV03 +} + +// GetDataContentType implements EventContextReader.GetDataContentType +func (ec EventContextV03) GetDataContentType() string { + if ec.DataContentType != nil { + return *ec.DataContentType + } + return "" +} + +// GetDataMediaType implements EventContextReader.GetDataMediaType +func (ec EventContextV03) GetDataMediaType() (string, error) { + if ec.DataContentType != nil { + dct := *ec.DataContentType + i := strings.IndexRune(dct, ';') + if i == -1 { + return dct, nil + } + return strings.TrimSpace(dct[0:i]), nil + } + return "", nil +} + +// GetType implements EventContextReader.GetType +func (ec EventContextV03) GetType() string { + return ec.Type +} + +// GetSource implements EventContextReader.GetSource +func (ec EventContextV03) GetSource() string { + return ec.Source.String() +} + +// GetSubject implements EventContextReader.GetSubject +func (ec EventContextV03) GetSubject() string { + if ec.Subject != nil { + return *ec.Subject + } + return "" +} + +// GetTime implements EventContextReader.GetTime +func (ec EventContextV03) GetTime() time.Time { + if ec.Time != nil { + return ec.Time.Time + } + return time.Time{} +} + +// GetID implements EventContextReader.GetID +func (ec EventContextV03) GetID() string { + return ec.ID +} + +// GetDataSchema implements EventContextReader.GetDataSchema +func (ec EventContextV03) GetDataSchema() string { + if ec.SchemaURL != nil { + return ec.SchemaURL.String() + } + return "" +} + +// DeprecatedGetDataContentEncoding implements EventContextReader.DeprecatedGetDataContentEncoding +func (ec EventContextV03) DeprecatedGetDataContentEncoding() string { + if ec.DataContentEncoding != nil { + return *ec.DataContentEncoding + } + return "" +} + +// GetExtensions implements EventContextReader.GetExtensions +func (ec EventContextV03) GetExtensions() map[string]interface{} { + return ec.Extensions +} + +// GetExtension implements EventContextReader.GetExtension +func (ec EventContextV03) GetExtension(key string) (interface{}, error) { + v, ok := caseInsensitiveSearch(key, ec.Extensions) + if !ok { + return "", fmt.Errorf("%q not found", key) + } + return v, nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go new file mode 100644 index 000000000..5d664635e --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go @@ -0,0 +1,103 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "errors" + "net/url" + "strings" + "time" + + "github.com/cloudevents/sdk-go/v2/types" +) + +// Adhere to EventContextWriter +var _ EventContextWriter = (*EventContextV03)(nil) + +// SetDataContentType implements EventContextWriter.SetDataContentType +func (ec *EventContextV03) SetDataContentType(ct string) error { + ct = strings.TrimSpace(ct) + if ct == "" { + ec.DataContentType = nil + } else { + ec.DataContentType = &ct + } + return nil +} + +// SetType implements EventContextWriter.SetType +func (ec *EventContextV03) SetType(t string) error { + t = strings.TrimSpace(t) + ec.Type = t + return nil +} + +// SetSource implements EventContextWriter.SetSource +func (ec *EventContextV03) SetSource(u string) error { + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.Source = types.URIRef{URL: *pu} + return nil +} + +// SetSubject implements EventContextWriter.SetSubject +func (ec *EventContextV03) SetSubject(s string) error { + s = strings.TrimSpace(s) + if s == "" { + ec.Subject = nil + } else { + ec.Subject = &s + } + return nil +} + +// SetID implements EventContextWriter.SetID +func (ec *EventContextV03) SetID(id string) error { + id = strings.TrimSpace(id) + if id == "" { + return errors.New("id is required to be a non-empty string") + } + ec.ID = id + return nil +} + +// SetTime implements EventContextWriter.SetTime +func (ec *EventContextV03) SetTime(t time.Time) error { + if t.IsZero() { + ec.Time = nil + } else { + ec.Time = &types.Timestamp{Time: t} + } + return nil +} + +// SetDataSchema implements EventContextWriter.SetDataSchema +func (ec *EventContextV03) SetDataSchema(u string) error { + u = strings.TrimSpace(u) + if u == "" { + ec.SchemaURL = nil + return nil + } + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.SchemaURL = &types.URIRef{URL: *pu} + return nil +} + +// DeprecatedSetDataContentEncoding implements EventContextWriter.DeprecatedSetDataContentEncoding +func (ec *EventContextV03) DeprecatedSetDataContentEncoding(e string) error { + e = strings.ToLower(strings.TrimSpace(e)) + if e == "" { + ec.DataContentEncoding = nil + } else { + ec.DataContentEncoding = &e + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go new file mode 100644 index 000000000..8f164502b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go @@ -0,0 +1,315 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "mime" + "sort" + "strings" + + "github.com/cloudevents/sdk-go/v2/types" +) + +// WIP: AS OF SEP 20, 2019 + +const ( + // CloudEventsVersionV1 represents the version 1.0 of the CloudEvents spec. + CloudEventsVersionV1 = "1.0" +) + +var specV1Attributes = map[string]struct{}{ + "id": {}, + "source": {}, + "type": {}, + "datacontenttype": {}, + "subject": {}, + "time": {}, + "specversion": {}, + "dataschema": {}, +} + +// EventContextV1 represents the non-data attributes of a CloudEvents v1.0 +// event. +type EventContextV1 struct { + // ID of the event; must be non-empty and unique within the scope of the producer. + // +required + ID string `json:"id"` + // Source - A URI describing the event producer. + // +required + Source types.URIRef `json:"source"` + // Type - The type of the occurrence which has happened. + // +required + Type string `json:"type"` + + // DataContentType - A MIME (RFC2046) string describing the media type of `data`. + // +optional + DataContentType *string `json:"datacontenttype,omitempty"` + // Subject - The subject of the event in the context of the event producer + // (identified by `source`). + // +optional + Subject *string `json:"subject,omitempty"` + // Time - A Timestamp when the event happened. + // +optional + Time *types.Timestamp `json:"time,omitempty"` + // DataSchema - A link to the schema that the `data` attribute adheres to. + // +optional + DataSchema *types.URI `json:"dataschema,omitempty"` + + // Extensions - Additional extension metadata beyond the base spec. + // +optional + Extensions map[string]interface{} `json:"-"` +} + +// Adhere to EventContext +var _ EventContext = (*EventContextV1)(nil) + +// ExtensionAs implements EventContext.ExtensionAs +func (ec EventContextV1) ExtensionAs(name string, obj interface{}) error { + name = strings.ToLower(name) + value, ok := ec.Extensions[name] + if !ok { + return fmt.Errorf("extension %q does not exist", name) + } + + // Only support *string for now. + if v, ok := obj.(*string); ok { + if *v, ok = value.(string); ok { + return nil + } + } + return fmt.Errorf("unknown extension type %T", obj) +} + +// SetExtension adds the extension 'name' with value 'value' to the CloudEvents +// context. This function fails if the name doesn't respect the regex +// ^[a-zA-Z0-9]+$ or if the name uses a reserved event context key. +func (ec *EventContextV1) SetExtension(name string, value interface{}) error { + if err := validateExtensionName(name); err != nil { + return err + } + + if _, ok := specV1Attributes[strings.ToLower(name)]; ok { + return fmt.Errorf("bad key %q: CloudEvents spec attribute MUST NOT be overwritten by extension", name) + } + + name = strings.ToLower(name) + if ec.Extensions == nil { + ec.Extensions = make(map[string]interface{}) + } + if value == nil { + delete(ec.Extensions, name) + if len(ec.Extensions) == 0 { + ec.Extensions = nil + } + return nil + } else { + v, err := types.Validate(value) // Ensure it's a legal CE attribute value + if err == nil { + ec.Extensions[name] = v + } + return err + } +} + +// Clone implements EventContextConverter.Clone +func (ec EventContextV1) Clone() EventContext { + ec1 := ec.AsV1() + ec1.Source = types.Clone(ec.Source).(types.URIRef) + if ec.Time != nil { + ec1.Time = types.Clone(ec.Time).(*types.Timestamp) + } + if ec.DataSchema != nil { + ec1.DataSchema = types.Clone(ec.DataSchema).(*types.URI) + } + ec1.Extensions = ec.cloneExtensions() + return ec1 +} + +func (ec *EventContextV1) cloneExtensions() map[string]interface{} { + old := ec.Extensions + if old == nil { + return nil + } + new := make(map[string]interface{}, len(ec.Extensions)) + for k, v := range old { + new[k] = types.Clone(v) + } + return new +} + +// AsV03 implements EventContextConverter.AsV03 +func (ec EventContextV1) AsV03() *EventContextV03 { + ret := EventContextV03{ + ID: ec.ID, + Time: ec.Time, + Type: ec.Type, + DataContentType: ec.DataContentType, + Source: types.URIRef{URL: ec.Source.URL}, + Subject: ec.Subject, + Extensions: make(map[string]interface{}), + } + + if ec.DataSchema != nil { + ret.SchemaURL = &types.URIRef{URL: ec.DataSchema.URL} + } + + if ec.Extensions != nil { + for k, v := range ec.Extensions { + k = strings.ToLower(k) + // DeprecatedDataContentEncoding was introduced in 0.3, removed in 1.0 + if strings.EqualFold(k, DataContentEncodingKey) { + etv, ok := v.(string) + if ok && etv != "" { + ret.DataContentEncoding = &etv + } + continue + } + ret.Extensions[k] = v + } + } + if len(ret.Extensions) == 0 { + ret.Extensions = nil + } + return &ret +} + +// AsV1 implements EventContextConverter.AsV1 +func (ec EventContextV1) AsV1() *EventContextV1 { + return &ec +} + +// Validate returns errors based on requirements from the CloudEvents spec. +// For more details, see https://github.com/cloudevents/spec/blob/v1.0/spec.md. +func (ec EventContextV1) Validate() ValidationError { + errors := map[string]error{} + + // id + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // MUST be unique within the scope of the producer + id := strings.TrimSpace(ec.ID) + if id == "" { + errors["id"] = fmt.Errorf("MUST be a non-empty string") + // no way to test "MUST be unique within the scope of the producer" + } + + // source + // Type: URI-reference + // Constraints: + // REQUIRED + // MUST be a non-empty URI-reference + // An absolute URI is RECOMMENDED + source := strings.TrimSpace(ec.Source.String()) + if source == "" { + errors["source"] = fmt.Errorf("REQUIRED") + } + + // type + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // SHOULD be prefixed with a reverse-DNS name. The prefixed domain dictates the organization which defines the semantics of this event type. + eventType := strings.TrimSpace(ec.Type) + if eventType == "" { + errors["type"] = fmt.Errorf("MUST be a non-empty string") + } + + // The following attributes are optional but still have validation. + + // datacontenttype + // Type: String per RFC 2046 + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 2046 + if ec.DataContentType != nil { + dataContentType := strings.TrimSpace(*ec.DataContentType) + if dataContentType == "" { + errors["datacontenttype"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 2046") + } else { + _, _, err := mime.ParseMediaType(dataContentType) + if err != nil { + errors["datacontenttype"] = fmt.Errorf("failed to parse RFC 2046 media type %w", err) + } + } + } + + // dataschema + // Type: URI + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3986 + if ec.DataSchema != nil { + if !ec.DataSchema.Validate() { + errors["dataschema"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 3986, Section 4.3. Absolute URI") + } + } + + // subject + // Type: String + // Constraints: + // OPTIONAL + // MUST be a non-empty string + if ec.Subject != nil { + subject := strings.TrimSpace(*ec.Subject) + if subject == "" { + errors["subject"] = fmt.Errorf("if present, MUST be a non-empty string") + } + } + + // time + // Type: Timestamp + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3339 + // --> no need to test this, no way to set the time without it being valid. + + if len(errors) > 0 { + return errors + } + return nil +} + +// String returns a pretty-printed representation of the EventContext. +func (ec EventContextV1) String() string { + b := strings.Builder{} + + b.WriteString("Context Attributes,\n") + + b.WriteString(" specversion: " + CloudEventsVersionV1 + "\n") + b.WriteString(" type: " + ec.Type + "\n") + b.WriteString(" source: " + ec.Source.String() + "\n") + if ec.Subject != nil { + b.WriteString(" subject: " + *ec.Subject + "\n") + } + b.WriteString(" id: " + ec.ID + "\n") + if ec.Time != nil { + b.WriteString(" time: " + ec.Time.String() + "\n") + } + if ec.DataSchema != nil { + b.WriteString(" dataschema: " + ec.DataSchema.String() + "\n") + } + if ec.DataContentType != nil { + b.WriteString(" datacontenttype: " + *ec.DataContentType + "\n") + } + + if ec.Extensions != nil && len(ec.Extensions) > 0 { + b.WriteString("Extensions,\n") + keys := make([]string, 0, len(ec.Extensions)) + for k := range ec.Extensions { + keys = append(keys, k) + } + sort.Strings(keys) + for _, key := range keys { + b.WriteString(fmt.Sprintf(" %s: %v\n", key, ec.Extensions[key])) + } + } + + return b.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go new file mode 100644 index 000000000..74f73b029 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go @@ -0,0 +1,104 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "strings" + "time" +) + +// GetSpecVersion implements EventContextReader.GetSpecVersion +func (ec EventContextV1) GetSpecVersion() string { + return CloudEventsVersionV1 +} + +// GetDataContentType implements EventContextReader.GetDataContentType +func (ec EventContextV1) GetDataContentType() string { + if ec.DataContentType != nil { + return *ec.DataContentType + } + return "" +} + +// GetDataMediaType implements EventContextReader.GetDataMediaType +func (ec EventContextV1) GetDataMediaType() (string, error) { + if ec.DataContentType != nil { + dct := *ec.DataContentType + i := strings.IndexRune(dct, ';') + if i == -1 { + return dct, nil + } + return strings.TrimSpace(dct[0:i]), nil + } + return "", nil +} + +// GetType implements EventContextReader.GetType +func (ec EventContextV1) GetType() string { + return ec.Type +} + +// GetSource implements EventContextReader.GetSource +func (ec EventContextV1) GetSource() string { + return ec.Source.String() +} + +// GetSubject implements EventContextReader.GetSubject +func (ec EventContextV1) GetSubject() string { + if ec.Subject != nil { + return *ec.Subject + } + return "" +} + +// GetTime implements EventContextReader.GetTime +func (ec EventContextV1) GetTime() time.Time { + if ec.Time != nil { + return ec.Time.Time + } + return time.Time{} +} + +// GetID implements EventContextReader.GetID +func (ec EventContextV1) GetID() string { + return ec.ID +} + +// GetDataSchema implements EventContextReader.GetDataSchema +func (ec EventContextV1) GetDataSchema() string { + if ec.DataSchema != nil { + return ec.DataSchema.String() + } + return "" +} + +// DeprecatedGetDataContentEncoding implements EventContextReader.DeprecatedGetDataContentEncoding +func (ec EventContextV1) DeprecatedGetDataContentEncoding() string { + return "" +} + +// GetExtensions implements EventContextReader.GetExtensions +func (ec EventContextV1) GetExtensions() map[string]interface{} { + if len(ec.Extensions) == 0 { + return nil + } + // For now, convert the extensions of v1.0 to the pre-v1.0 style. + ext := make(map[string]interface{}, len(ec.Extensions)) + for k, v := range ec.Extensions { + ext[k] = v + } + return ext +} + +// GetExtension implements EventContextReader.GetExtension +func (ec EventContextV1) GetExtension(key string) (interface{}, error) { + v, ok := caseInsensitiveSearch(key, ec.Extensions) + if !ok { + return "", fmt.Errorf("%q not found", key) + } + return v, nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go new file mode 100644 index 000000000..5f2aca763 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go @@ -0,0 +1,97 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "errors" + "net/url" + "strings" + "time" + + "github.com/cloudevents/sdk-go/v2/types" +) + +// Adhere to EventContextWriter +var _ EventContextWriter = (*EventContextV1)(nil) + +// SetDataContentType implements EventContextWriter.SetDataContentType +func (ec *EventContextV1) SetDataContentType(ct string) error { + ct = strings.TrimSpace(ct) + if ct == "" { + ec.DataContentType = nil + } else { + ec.DataContentType = &ct + } + return nil +} + +// SetType implements EventContextWriter.SetType +func (ec *EventContextV1) SetType(t string) error { + t = strings.TrimSpace(t) + ec.Type = t + return nil +} + +// SetSource implements EventContextWriter.SetSource +func (ec *EventContextV1) SetSource(u string) error { + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.Source = types.URIRef{URL: *pu} + return nil +} + +// SetSubject implements EventContextWriter.SetSubject +func (ec *EventContextV1) SetSubject(s string) error { + s = strings.TrimSpace(s) + if s == "" { + ec.Subject = nil + } else { + ec.Subject = &s + } + return nil +} + +// SetID implements EventContextWriter.SetID +func (ec *EventContextV1) SetID(id string) error { + id = strings.TrimSpace(id) + if id == "" { + return errors.New("id is required to be a non-empty string") + } + ec.ID = id + return nil +} + +// SetTime implements EventContextWriter.SetTime +func (ec *EventContextV1) SetTime(t time.Time) error { + if t.IsZero() { + ec.Time = nil + } else { + ec.Time = &types.Timestamp{Time: t} + } + return nil +} + +// SetDataSchema implements EventContextWriter.SetDataSchema +func (ec *EventContextV1) SetDataSchema(u string) error { + u = strings.TrimSpace(u) + if u == "" { + ec.DataSchema = nil + return nil + } + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.DataSchema = &types.URI{URL: *pu} + return nil +} + +// DeprecatedSetDataContentEncoding implements EventContextWriter.DeprecatedSetDataContentEncoding +func (ec *EventContextV1) DeprecatedSetDataContentEncoding(e string) error { + return errors.New("deprecated: SetDataContentEncoding is not supported in v1.0 of CloudEvents") +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go b/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go new file mode 100644 index 000000000..72d0e757a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go @@ -0,0 +1,57 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "errors" + "fmt" + "strings" +) + +const ( + // DataContentEncodingKey is the key to DeprecatedDataContentEncoding for versions that do not support data content encoding + // directly. + DataContentEncodingKey = "datacontentencoding" +) + +var ( + // This determines the behavior of validateExtensionName(). For MaxExtensionNameLength > 0, an error will be returned, + // if len(key) > MaxExtensionNameLength + MaxExtensionNameLength = 0 +) + +func caseInsensitiveSearch(key string, space map[string]interface{}) (interface{}, bool) { + lkey := strings.ToLower(key) + for k, v := range space { + if strings.EqualFold(lkey, strings.ToLower(k)) { + return v, true + } + } + return nil, false +} + +func IsExtensionNameValid(key string) bool { + if err := validateExtensionName(key); err != nil { + return false + } + return true +} + +func validateExtensionName(key string) error { + if len(key) < 1 { + return errors.New("bad key, CloudEvents attribute names MUST NOT be empty") + } + if MaxExtensionNameLength > 0 && len(key) > MaxExtensionNameLength { + return fmt.Errorf("bad key, CloudEvents attribute name '%s' is longer than %d characters", key, MaxExtensionNameLength) + } + + for _, c := range key { + if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9')) { + return errors.New("bad key, CloudEvents attribute names MUST consist of lower-case letters ('a' to 'z'), upper-case letters ('A' to 'Z') or digits ('0' to '9') from the ASCII character set") + } + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go new file mode 100644 index 000000000..f826a1841 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go @@ -0,0 +1,26 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package protocol defines interfaces to decouple the client package +from protocol implementations. + +Most event sender and receiver applications should not use this +package, they should use the client package. This package is for +infrastructure developers implementing new transports, or intermediary +components like importers, channels or brokers. + +Available protocols: + +* HTTP (using net/http) +* Kafka (using github.com/Shopify/sarama) +* AMQP (using pack.ag/amqp) +* Go Channels +* Nats +* Nats Streaming (stan) +* Google PubSub + +*/ +package protocol diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go new file mode 100644 index 000000000..a3f335261 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go @@ -0,0 +1,42 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import "fmt" + +// ErrTransportMessageConversion is an error produced when the transport +// message can not be converted. +type ErrTransportMessageConversion struct { + fatal bool + handled bool + transport string + message string +} + +// NewErrTransportMessageConversion makes a new ErrTransportMessageConversion. +func NewErrTransportMessageConversion(transport, message string, handled, fatal bool) *ErrTransportMessageConversion { + return &ErrTransportMessageConversion{ + transport: transport, + message: message, + handled: handled, + fatal: fatal, + } +} + +// IsFatal reports if this error should be considered fatal. +func (e *ErrTransportMessageConversion) IsFatal() bool { + return e.fatal +} + +// Handled reports if this error should be considered accepted and no further action. +func (e *ErrTransportMessageConversion) Handled() bool { + return e.handled +} + +// Error implements error.Error +func (e *ErrTransportMessageConversion) Error() string { + return fmt.Sprintf("transport %s failed to convert message: %s", e.transport, e.message) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go new file mode 100644 index 000000000..48f03fb6c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go @@ -0,0 +1,128 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "go.uber.org/zap" + "net/http" + "strconv" + "strings" + "time" +) + +type WebhookConfig struct { + AllowedMethods []string // defaults to POST + AllowedRate *int + AutoACKCallback bool + AllowedOrigins []string +} + +const ( + DefaultAllowedRate = 1000 + DefaultTimeout = time.Second * 600 +) + +// TODO: implement rate limiting. +// Throttling is indicated by requests being rejected using HTTP status code 429 Too Many Requests. +// TODO: use this if Webhook Request Origin has been turned on. +// Inbound requests should be rejected if Allowed Origins is required by SDK. + +func (p *Protocol) OptionsHandler(rw http.ResponseWriter, req *http.Request) { + if req.Method != http.MethodOptions || p.WebhookConfig == nil { + rw.WriteHeader(http.StatusMethodNotAllowed) + return + } + + headers := make(http.Header) + + // The spec does not say we need to validate the origin, just the request origin. + // After the handshake, we will validate the origin. + if origin, ok := p.ValidateRequestOrigin(req); !ok { + rw.WriteHeader(http.StatusBadRequest) + return + } else { + headers.Set("WebHook-Allowed-Origin", origin) + } + + allowedRateRequired := false + if _, ok := req.Header[http.CanonicalHeaderKey("WebHook-Request-Rate")]; ok { + // must send WebHook-Allowed-Rate + allowedRateRequired = true + } + + if p.WebhookConfig.AllowedRate != nil { + headers.Set("WebHook-Allowed-Rate", strconv.Itoa(*p.WebhookConfig.AllowedRate)) + } else if allowedRateRequired { + headers.Set("WebHook-Allowed-Rate", strconv.Itoa(DefaultAllowedRate)) + } + + if len(p.WebhookConfig.AllowedMethods) > 0 { + headers.Set("Allow", strings.Join(p.WebhookConfig.AllowedMethods, ", ")) + } else { + headers.Set("Allow", http.MethodPost) + } + + cb := req.Header.Get("WebHook-Request-Callback") + if cb != "" { + if p.WebhookConfig.AutoACKCallback { + go func() { + reqAck, err := http.NewRequest(http.MethodPost, cb, nil) + if err != nil { + cecontext.LoggerFrom(req.Context()).Errorw("OPTIONS handler failed to create http request attempting to ack callback.", zap.Error(err), zap.String("callback", cb)) + return + } + + // Write out the headers. + for k := range headers { + reqAck.Header.Set(k, headers.Get(k)) + } + + _, err = http.DefaultClient.Do(reqAck) + if err != nil { + cecontext.LoggerFrom(req.Context()).Errorw("OPTIONS handler failed to ack callback.", zap.Error(err), zap.String("callback", cb)) + return + } + }() + return + } else { + cecontext.LoggerFrom(req.Context()).Infof("ACTION REQUIRED: Please validate web hook request callback: %q", cb) + // TODO: what to do pending https://github.com/cloudevents/spec/issues/617 + return + } + } + + // Write out the headers. + for k := range headers { + rw.Header().Set(k, headers.Get(k)) + } +} + +func (p *Protocol) ValidateRequestOrigin(req *http.Request) (string, bool) { + return p.validateOrigin(req.Header.Get("WebHook-Request-Origin")) +} + +func (p *Protocol) ValidateOrigin(req *http.Request) (string, bool) { + return p.validateOrigin(req.Header.Get("Origin")) +} + +func (p *Protocol) validateOrigin(ro string) (string, bool) { + cecontext.LoggerFrom(context.TODO()).Infow("Validating origin.", zap.String("origin", ro)) + + for _, ao := range p.WebhookConfig.AllowedOrigins { + if ao == "*" { + return ao, true + } + // TODO: it is not clear what the rules for allowed hosts are. + // Need to find docs for this. For now, test for prefix. + if strings.HasPrefix(ro, ao) { + return ao, true + } + } + + return ro, false +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go new file mode 100644 index 000000000..0eec396a1 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go @@ -0,0 +1,48 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + + nethttp "net/http" + "net/url" +) + +type requestKey struct{} + +// RequestData holds the http.Request information subset that can be +// used to retrieve HTTP information for an incoming CloudEvent. +type RequestData struct { + URL *url.URL + Header nethttp.Header + RemoteAddr string + Host string +} + +// WithRequestDataAtContext uses the http.Request to add RequestData +// information to the Context. +func WithRequestDataAtContext(ctx context.Context, r *nethttp.Request) context.Context { + if r == nil { + return ctx + } + + return context.WithValue(ctx, requestKey{}, &RequestData{ + URL: r.URL, + Header: r.Header, + RemoteAddr: r.RemoteAddr, + Host: r.Host, + }) +} + +// RequestDataFromContext retrieves RequestData from the Context. +// If not set nil is returned. +func RequestDataFromContext(ctx context.Context) *RequestData { + if req := ctx.Value(requestKey{}); req != nil { + return req.(*RequestData) + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go new file mode 100644 index 000000000..3428ea387 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package http implements an HTTP binding using net/http module +*/ +package http diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go new file mode 100644 index 000000000..055a5c4dd --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go @@ -0,0 +1,55 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + "github.com/cloudevents/sdk-go/v2/binding" + "net/http" + "net/textproto" + "strings" + "unicode" + + "github.com/cloudevents/sdk-go/v2/binding/spec" +) + +var attributeHeadersMapping map[string]string + +type customHeaderKey int + +const ( + headerKey customHeaderKey = iota +) + +func init() { + attributeHeadersMapping = make(map[string]string) + for _, v := range specs.Versions() { + for _, a := range v.Attributes() { + if a.Kind() == spec.DataContentType { + attributeHeadersMapping[a.Name()] = ContentType + } else { + attributeHeadersMapping[a.Name()] = textproto.CanonicalMIMEHeaderKey(prefix + a.Name()) + } + } + } +} + +func extNameToHeaderName(name string) string { + var b strings.Builder + b.Grow(len(name) + len(prefix)) + b.WriteString(prefix) + b.WriteRune(unicode.ToUpper(rune(name[0]))) + b.WriteString(name[1:]) + return b.String() +} + +func HeaderFrom(ctx context.Context) http.Header { + return binding.GetOrDefaultFromCtx(ctx, headerKey, make(http.Header)).(http.Header) +} + +func WithCustomHeader(ctx context.Context, header http.Header) context.Context { + return context.WithValue(ctx, headerKey, header) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go new file mode 100644 index 000000000..7a7c36f9b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go @@ -0,0 +1,175 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + "io" + nethttp "net/http" + "net/textproto" + "strings" + "unicode" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" +) + +const prefix = "Ce-" + +var specs = spec.WithPrefixMatchExact( + func(s string) string { + if s == "datacontenttype" { + return "Content-Type" + } else { + return textproto.CanonicalMIMEHeaderKey("Ce-" + s) + } + }, + "Ce-", +) + +const ContentType = "Content-Type" +const ContentLength = "Content-Length" + +// Message holds the Header and Body of a HTTP Request or Response. +// The Message instance *must* be constructed from NewMessage function. +// This message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +type Message struct { + Header nethttp.Header + BodyReader io.ReadCloser + OnFinish func(error) error + + ctx context.Context + + format format.Format + version spec.Version +} + +// Check if http.Message implements binding.Message +var _ binding.Message = (*Message)(nil) +var _ binding.MessageContext = (*Message)(nil) +var _ binding.MessageMetadataReader = (*Message)(nil) + +// NewMessage returns a binding.Message with header and data. +// The returned binding.Message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +func NewMessage(header nethttp.Header, body io.ReadCloser) *Message { + m := Message{Header: header} + if body != nil { + m.BodyReader = body + } + if m.format = format.Lookup(header.Get(ContentType)); m.format == nil { + m.version = specs.Version(m.Header.Get(specs.PrefixedSpecVersionName())) + } + return &m +} + +// NewMessageFromHttpRequest returns a binding.Message with header and data. +// The returned binding.Message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +func NewMessageFromHttpRequest(req *nethttp.Request) *Message { + if req == nil { + return nil + } + message := NewMessage(req.Header, req.Body) + message.ctx = req.Context() + return message +} + +// NewMessageFromHttpResponse returns a binding.Message with header and data. +// The returned binding.Message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +func NewMessageFromHttpResponse(resp *nethttp.Response) *Message { + if resp == nil { + return nil + } + msg := NewMessage(resp.Header, resp.Body) + return msg +} + +func (m *Message) ReadEncoding() binding.Encoding { + if m.version != nil { + return binding.EncodingBinary + } + if m.format != nil { + if m.format == format.JSONBatch { + return binding.EncodingBatch + } + return binding.EncodingStructured + } + return binding.EncodingUnknown +} + +func (m *Message) ReadStructured(ctx context.Context, encoder binding.StructuredWriter) error { + if m.format == nil { + return binding.ErrNotStructured + } else { + return encoder.SetStructuredEvent(ctx, m.format, m.BodyReader) + } +} + +func (m *Message) ReadBinary(ctx context.Context, encoder binding.BinaryWriter) (err error) { + if m.version == nil { + return binding.ErrNotBinary + } + + for k, v := range m.Header { + attr := m.version.Attribute(k) + if attr != nil { + err = encoder.SetAttribute(attr, v[0]) + } else if strings.HasPrefix(k, prefix) { + // Trim Prefix + To lower + var b strings.Builder + b.Grow(len(k) - len(prefix)) + b.WriteRune(unicode.ToLower(rune(k[len(prefix)]))) + b.WriteString(k[len(prefix)+1:]) + err = encoder.SetExtension(b.String(), v[0]) + } + if err != nil { + return err + } + } + + if m.BodyReader != nil { + err = encoder.SetData(m.BodyReader) + if err != nil { + return err + } + } + + return +} + +func (m *Message) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) { + attr := m.version.AttributeFromKind(k) + if attr != nil { + h := m.Header[attributeHeadersMapping[attr.Name()]] + if h != nil { + return attr, h[0] + } + return attr, nil + } + return nil, nil +} + +func (m *Message) GetExtension(name string) interface{} { + h := m.Header[extNameToHeaderName(name)] + if h != nil { + return h[0] + } + return nil +} + +func (m *Message) Context() context.Context { + return m.ctx +} + +func (m *Message) Finish(err error) error { + if m.BodyReader != nil { + _ = m.BodyReader.Close() + } + if m.OnFinish != nil { + return m.OnFinish(err) + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go new file mode 100644 index 000000000..5e400905a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go @@ -0,0 +1,301 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "fmt" + "net" + nethttp "net/http" + "net/url" + "strings" + "time" +) + +// Option is the function signature required to be considered an http.Option. +type Option func(*Protocol) error + +// WithTarget sets the outbound recipient of cloudevents when using an HTTP +// request. +func WithTarget(targetUrl string) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http target option can not set nil protocol") + } + targetUrl = strings.TrimSpace(targetUrl) + if targetUrl != "" { + var err error + var target *url.URL + target, err = url.Parse(targetUrl) + if err != nil { + return fmt.Errorf("http target option failed to parse target url: %s", err.Error()) + } + + p.Target = target + + if p.RequestTemplate == nil { + p.RequestTemplate = &nethttp.Request{ + Method: nethttp.MethodPost, + } + } + p.RequestTemplate.URL = target + + return nil + } + return fmt.Errorf("http target option was empty string") + } +} + +// WithHeader sets an additional default outbound header for all cloudevents +// when using an HTTP request. +func WithHeader(key, value string) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http header option can not set nil protocol") + } + key = strings.TrimSpace(key) + if key != "" { + if p.RequestTemplate == nil { + p.RequestTemplate = &nethttp.Request{ + Method: nethttp.MethodPost, + } + } + if p.RequestTemplate.Header == nil { + p.RequestTemplate.Header = nethttp.Header{} + } + p.RequestTemplate.Header.Add(key, value) + return nil + } + return fmt.Errorf("http header option was empty string") + } +} + +// WithShutdownTimeout sets the shutdown timeout when the http server is being shutdown. +func WithShutdownTimeout(timeout time.Duration) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http shutdown timeout option can not set nil protocol") + } + p.ShutdownTimeout = timeout + return nil + } +} + +func checkListen(p *Protocol, prefix string) error { + switch { + case p.listener.Load() != nil: + return fmt.Errorf("error setting %v: listener already set", prefix) + } + return nil +} + +// WithPort sets the listening port for StartReceiver. +// Only one of WithListener or WithPort is allowed. +func WithPort(port int) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http port option can not set nil protocol") + } + if port < 0 || port > 65535 { + return fmt.Errorf("http port option was given an invalid port: %d", port) + } + if err := checkListen(p, "http port option"); err != nil { + return err + } + p.Port = port + return nil + } +} + +// WithListener sets the listener for StartReceiver. +// Only one of WithListener or WithPort is allowed. +func WithListener(l net.Listener) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http listener option can not set nil protocol") + } + if err := checkListen(p, "http listener"); err != nil { + return err + } + p.listener.Store(l) + return nil + } +} + +// WithPath sets the path to receive cloudevents on for HTTP transports. +func WithPath(path string) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http path option can not set nil protocol") + } + path = strings.TrimSpace(path) + if len(path) == 0 { + return fmt.Errorf("http path option was given an invalid path: %q", path) + } + p.Path = path + return nil + } +} + +// WithMethod sets the HTTP verb (GET, POST, PUT, etc.) to use +// when using an HTTP request. +func WithMethod(method string) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http method option can not set nil protocol") + } + method = strings.TrimSpace(method) + if method != "" { + if p.RequestTemplate == nil { + p.RequestTemplate = &nethttp.Request{} + } + p.RequestTemplate.Method = method + return nil + } + return fmt.Errorf("http method option was empty string") + } +} + +// +// Middleware is a function that takes an existing http.Handler and wraps it in middleware, +// returning the wrapped http.Handler. +type Middleware func(next nethttp.Handler) nethttp.Handler + +// WithMiddleware adds an HTTP middleware to the transport. It may be specified multiple times. +// Middleware is applied to everything before it. For example +// `NewClient(WithMiddleware(foo), WithMiddleware(bar))` would result in `bar(foo(original))`. +func WithMiddleware(middleware Middleware) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http middleware option can not set nil protocol") + } + p.middleware = append(p.middleware, middleware) + return nil + } +} + +// WithRoundTripper sets the HTTP RoundTripper. +func WithRoundTripper(roundTripper nethttp.RoundTripper) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http round tripper option can not set nil protocol") + } + p.roundTripper = roundTripper + return nil + } +} + +// WithRoundTripperDecorator decorates the default HTTP RoundTripper chosen. +func WithRoundTripperDecorator(decorator func(roundTripper nethttp.RoundTripper) nethttp.RoundTripper) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http round tripper option can not set nil protocol") + } + if p.roundTripper == nil { + if p.Client == nil { + p.roundTripper = nethttp.DefaultTransport + } else { + p.roundTripper = p.Client.Transport + } + } + p.roundTripper = decorator(p.roundTripper) + return nil + } +} + +// WithClient sets the protocol client +func WithClient(client nethttp.Client) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("client option can not set nil protocol") + } + p.Client = &client + return nil + } +} + +// WithGetHandlerFunc sets the http GET handler func +func WithGetHandlerFunc(fn nethttp.HandlerFunc) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http GET handler func can not set nil protocol") + } + p.GetHandlerFn = fn + return nil + } +} + +// WithOptionsHandlerFunc sets the http OPTIONS handler func +func WithOptionsHandlerFunc(fn nethttp.HandlerFunc) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http OPTIONS handler func can not set nil protocol") + } + p.OptionsHandlerFn = fn + return nil + } +} + +// WithDefaultOptionsHandlerFunc sets the options handler to be the built in handler and configures the options. +// methods: the supported methods reported to OPTIONS caller. +// rate: the rate limit reported to OPTIONS caller. +// origins: the prefix of the accepted origins, or "*". +// callback: preform the callback to ACK the OPTIONS request. +func WithDefaultOptionsHandlerFunc(methods []string, rate int, origins []string, callback bool) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http OPTIONS handler func can not set nil protocol") + } + p.OptionsHandlerFn = p.OptionsHandler + p.WebhookConfig = &WebhookConfig{ + AllowedMethods: methods, + AllowedRate: &rate, + AllowedOrigins: origins, + AutoACKCallback: callback, + } + return nil + } +} + +// IsRetriable is a custom function that can be used to override the +// default retriable status codes. +type IsRetriable func(statusCode int) bool + +// WithIsRetriableFunc sets the function that gets called to determine if an +// error should be retried. If not set, the defaultIsRetriableFunc is used. +func WithIsRetriableFunc(isRetriable IsRetriable) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("isRetriable handler func can not set nil protocol") + } + if isRetriable == nil { + return fmt.Errorf("isRetriable handler can not be nil") + } + p.isRetriableFunc = isRetriable + return nil + } +} + +func WithRateLimiter(rl RateLimiter) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http OPTIONS handler func can not set nil protocol") + } + p.limiter = rl + return nil + } +} + +// WithRequestDataAtContextMiddleware adds to the Context RequestData. +// This enables a user's dispatch handler to inspect HTTP request information by +// retrieving it from the Context. +func WithRequestDataAtContextMiddleware() Option { + return WithMiddleware(func(next nethttp.Handler) nethttp.Handler { + return nethttp.HandlerFunc(func(w nethttp.ResponseWriter, r *nethttp.Request) { + ctx := WithRequestDataAtContext(r.Context(), r) + next.ServeHTTP(w, r.WithContext(ctx)) + }) + }) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go new file mode 100644 index 000000000..dba6fd7ba --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go @@ -0,0 +1,408 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +const ( + // DefaultShutdownTimeout defines the default timeout given to the http.Server when calling Shutdown. + DefaultShutdownTimeout = time.Minute * 1 +) + +type msgErr struct { + msg *Message + respFn protocol.ResponseFn + err error +} + +// Default error codes that we retry on - string isn't used, it's just there so +// people know what each error code's title is. +// To modify this use Option +var defaultRetriableErrors = map[int]string{ + 404: "Not Found", + 413: "Payload Too Large", + 425: "Too Early", + 429: "Too Many Requests", + 502: "Bad Gateway", + 503: "Service Unavailable", + 504: "Gateway Timeout", +} + +// Protocol acts as both a http client and a http handler. +type Protocol struct { + Target *url.URL + RequestTemplate *http.Request + Client *http.Client + incoming chan msgErr + + // OptionsHandlerFn handles the OPTIONS method requests and is intended to + // implement the abuse protection spec: + // https://github.com/cloudevents/spec/blob/v1.0/http-webhook.md#4-abuse-protection + OptionsHandlerFn http.HandlerFunc + WebhookConfig *WebhookConfig + + GetHandlerFn http.HandlerFunc + DeleteHandlerFn http.HandlerFunc + + // To support Opener: + + // ShutdownTimeout defines the timeout given to the http.Server when calling Shutdown. + // If 0, DefaultShutdownTimeout is used. + ShutdownTimeout time.Duration + + // Port is the port configured to bind the receiver to. Defaults to 8080. + // If you want to know the effective port you're listening to, use GetListeningPort() + Port int + // Path is the path to bind the receiver to. Defaults to "/". + Path string + + // Receive Mutex + reMu sync.Mutex + // Handler is the handler the http Server will use. Use this to reuse the + // http server. If nil, the Protocol will create a one. + Handler *http.ServeMux + + listener atomic.Value + roundTripper http.RoundTripper + server *http.Server + handlerRegistered bool + middleware []Middleware + limiter RateLimiter + + isRetriableFunc IsRetriable +} + +func New(opts ...Option) (*Protocol, error) { + p := &Protocol{ + incoming: make(chan msgErr), + Port: -1, + } + if err := p.applyOptions(opts...); err != nil { + return nil, err + } + + if p.Client == nil { + p.Client = http.DefaultClient + } + + if p.roundTripper != nil { + p.Client.Transport = p.roundTripper + } + + if p.ShutdownTimeout == 0 { + p.ShutdownTimeout = DefaultShutdownTimeout + } + + if p.isRetriableFunc == nil { + p.isRetriableFunc = defaultIsRetriableFunc + } + + if p.limiter == nil { + p.limiter = noOpLimiter{} + } + + return p, nil +} + +// NewObserved creates an HTTP protocol with trace propagating middleware. +// Deprecated: now this behaves like New and it will be removed in future releases, +// setup the http observed protocol using the opencensus separate module NewObservedHttp +var NewObserved = New + +func (p *Protocol) applyOptions(opts ...Option) error { + for _, fn := range opts { + if err := fn(p); err != nil { + return err + } + } + return nil +} + +// Send implements binding.Sender +func (p *Protocol) Send(ctx context.Context, m binding.Message, transformers ...binding.Transformer) error { + if ctx == nil { + return fmt.Errorf("nil Context") + } else if m == nil { + return fmt.Errorf("nil Message") + } + + msg, err := p.Request(ctx, m, transformers...) + if msg != nil { + defer func() { _ = msg.Finish(err) }() + } + if err != nil && !protocol.IsACK(err) { + var res *Result + if protocol.ResultAs(err, &res) { + if message, ok := msg.(*Message); ok { + buf := new(bytes.Buffer) + buf.ReadFrom(message.BodyReader) + errorStr := buf.String() + // If the error is not wrapped, then append the original error string. + if og, ok := err.(*Result); ok { + og.Format = og.Format + "%s" + og.Args = append(og.Args, errorStr) + err = og + } else { + err = NewResult(res.StatusCode, "%w: %s", err, errorStr) + } + } + } + } + return err +} + +// Request implements binding.Requester +func (p *Protocol) Request(ctx context.Context, m binding.Message, transformers ...binding.Transformer) (binding.Message, error) { + if ctx == nil { + return nil, fmt.Errorf("nil Context") + } else if m == nil { + return nil, fmt.Errorf("nil Message") + } + + var err error + defer func() { _ = m.Finish(err) }() + + req := p.makeRequest(ctx) + + if p.Client == nil || req == nil || req.URL == nil { + return nil, fmt.Errorf("not initialized: %#v", p) + } + + if err = WriteRequest(ctx, m, req, transformers...); err != nil { + return nil, err + } + + return p.do(ctx, req) +} + +func (p *Protocol) makeRequest(ctx context.Context) *http.Request { + req := &http.Request{ + Method: http.MethodPost, + Header: HeaderFrom(ctx), + } + + if p.RequestTemplate != nil { + req.Method = p.RequestTemplate.Method + req.URL = p.RequestTemplate.URL + req.Close = p.RequestTemplate.Close + req.Host = p.RequestTemplate.Host + copyHeadersEnsure(p.RequestTemplate.Header, &req.Header) + } + + if p.Target != nil { + req.URL = p.Target + } + + // Override the default request with target from context. + if target := cecontext.TargetFrom(ctx); target != nil { + req.URL = target + } + return req.WithContext(ctx) +} + +// Ensure to is a non-nil map before copying +func copyHeadersEnsure(from http.Header, to *http.Header) { + if len(from) > 0 { + if *to == nil { + *to = http.Header{} + } + copyHeaders(from, *to) + } +} + +func copyHeaders(from, to http.Header) { + if from == nil || to == nil { + return + } + for header, values := range from { + for _, value := range values { + to.Add(header, value) + } + } +} + +// Receive the next incoming HTTP request as a CloudEvent. +// Returns non-nil error if the incoming HTTP request fails to parse as a CloudEvent +// Returns io.EOF if the receiver is closed. +func (p *Protocol) Receive(ctx context.Context) (binding.Message, error) { + if ctx == nil { + return nil, fmt.Errorf("nil Context") + } + + msg, fn, err := p.Respond(ctx) + // No-op the response when finish is invoked. + if msg != nil { + return binding.WithFinish(msg, func(err error) { + if fn != nil { + _ = fn(ctx, nil, nil) + } + }), err + } else { + return nil, err + } +} + +// Respond receives the next incoming HTTP request as a CloudEvent and waits +// for the response callback to invoked before continuing. +// Returns non-nil error if the incoming HTTP request fails to parse as a CloudEvent +// Returns io.EOF if the receiver is closed. +func (p *Protocol) Respond(ctx context.Context) (binding.Message, protocol.ResponseFn, error) { + if ctx == nil { + return nil, nil, fmt.Errorf("nil Context") + } + + select { + case in, ok := <-p.incoming: + if !ok { + return nil, nil, io.EOF + } + + if in.msg == nil { + return nil, in.respFn, in.err + } + return in.msg, in.respFn, in.err + + case <-ctx.Done(): + return nil, nil, io.EOF + } +} + +// ServeHTTP implements http.Handler. +// Blocks until ResponseFn is invoked. +func (p *Protocol) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + // always apply limiter first using req context + ok, reset, err := p.limiter.Allow(req.Context(), req) + if err != nil { + p.incoming <- msgErr{msg: nil, err: fmt.Errorf("unable to acquire rate limit token: %w", err)} + rw.WriteHeader(http.StatusInternalServerError) + return + } + + if !ok { + rw.Header().Add("Retry-After", strconv.Itoa(int(reset))) + http.Error(rw, "limit exceeded", 429) + return + } + + // Filter the GET style methods: + switch req.Method { + case http.MethodOptions: + if p.OptionsHandlerFn == nil { + rw.WriteHeader(http.StatusMethodNotAllowed) + return + } + p.OptionsHandlerFn(rw, req) + return + + case http.MethodGet: + if p.GetHandlerFn == nil { + rw.WriteHeader(http.StatusMethodNotAllowed) + return + } + p.GetHandlerFn(rw, req) + return + + case http.MethodDelete: + if p.DeleteHandlerFn == nil { + rw.WriteHeader(http.StatusMethodNotAllowed) + return + } + p.DeleteHandlerFn(rw, req) + return + } + + m := NewMessageFromHttpRequest(req) + if m == nil { + // Should never get here unless ServeHTTP is called directly. + p.incoming <- msgErr{msg: nil, err: binding.ErrUnknownEncoding} + rw.WriteHeader(http.StatusBadRequest) + return // if there was no message, return. + } + + var finishErr error + m.OnFinish = func(err error) error { + finishErr = err + return nil + } + + wg := sync.WaitGroup{} + wg.Add(1) + var fn protocol.ResponseFn = func(ctx context.Context, respMsg binding.Message, res protocol.Result, transformers ...binding.Transformer) error { + // Unblock the ServeHTTP after the reply is written + defer func() { + wg.Done() + }() + + if finishErr != nil { + http.Error(rw, fmt.Sprintf("Cannot forward CloudEvent: %s", finishErr), http.StatusInternalServerError) + return finishErr + } + + status := http.StatusOK + var errMsg string + if res != nil { + var result *Result + switch { + case protocol.ResultAs(res, &result): + if result.StatusCode > 100 && result.StatusCode < 600 { + status = result.StatusCode + } + errMsg = fmt.Errorf(result.Format, result.Args...).Error() + case !protocol.IsACK(res): + // Map client errors to http status code + validationError := event.ValidationError{} + if errors.As(res, &validationError) { + status = http.StatusBadRequest + rw.Header().Set("content-type", "text/plain") + rw.WriteHeader(status) + _, _ = rw.Write([]byte(validationError.Error())) + return validationError + } else if errors.Is(res, binding.ErrUnknownEncoding) { + status = http.StatusUnsupportedMediaType + } else { + status = http.StatusInternalServerError + } + } + } + + if respMsg != nil { + err := WriteResponseWriter(ctx, respMsg, status, rw, transformers...) + return respMsg.Finish(err) + } + + rw.WriteHeader(status) + if _, err := rw.Write([]byte(errMsg)); err != nil { + return err + } + return nil + } + + p.incoming <- msgErr{msg: m, respFn: fn} // Send to Request + // Block until ResponseFn is invoked + wg.Wait() +} + +func defaultIsRetriableFunc(sc int) bool { + _, ok := defaultRetriableErrors[sc] + return ok +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go new file mode 100644 index 000000000..04ef96915 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go @@ -0,0 +1,143 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + "fmt" + "net" + "net/http" + "strings" + + "github.com/cloudevents/sdk-go/v2/protocol" +) + +var _ protocol.Opener = (*Protocol)(nil) + +func (p *Protocol) OpenInbound(ctx context.Context) error { + p.reMu.Lock() + defer p.reMu.Unlock() + + if p.Handler == nil { + p.Handler = http.NewServeMux() + } + + if !p.handlerRegistered { + // handler.Handle might panic if the user tries to use the same path as the sdk. + p.Handler.Handle(p.GetPath(), p) + p.handlerRegistered = true + } + + // After listener is invok + listener, err := p.listen() + if err != nil { + return err + } + + p.server = &http.Server{ + Addr: listener.Addr().String(), + Handler: attachMiddleware(p.Handler, p.middleware), + ReadTimeout: DefaultTimeout, + WriteTimeout: DefaultTimeout, + } + + // Shutdown + defer func() { + _ = p.server.Close() + p.server = nil + }() + + errChan := make(chan error) + go func() { + errChan <- p.server.Serve(listener) + }() + + // wait for the server to return or ctx.Done(). + select { + case <-ctx.Done(): + // Try a graceful shutdown. + ctx, cancel := context.WithTimeout(context.Background(), p.ShutdownTimeout) + defer cancel() + + shdwnErr := p.server.Shutdown(ctx) + if shdwnErr != nil { + shdwnErr = fmt.Errorf("shutting down HTTP server: %w", shdwnErr) + } + + // Wait for server goroutine to exit + rntmErr := <-errChan + if rntmErr != nil && rntmErr != http.ErrServerClosed { + rntmErr = fmt.Errorf("server failed during shutdown: %w", rntmErr) + + if shdwnErr != nil { + return fmt.Errorf("combined error during shutdown of HTTP server: %w, %v", + shdwnErr, rntmErr) + } + + return rntmErr + } + + return shdwnErr + + case err := <-errChan: + if err != nil { + return fmt.Errorf("during runtime of HTTP server: %w", err) + } + return nil + } +} + +// GetListeningPort returns the listening port. +// Returns -1 if it's not listening. +func (p *Protocol) GetListeningPort() int { + if listener := p.listener.Load(); listener != nil { + if tcpAddr, ok := listener.(net.Listener).Addr().(*net.TCPAddr); ok { + return tcpAddr.Port + } + } + return -1 +} + +// listen if not already listening, update t.Port +func (p *Protocol) listen() (net.Listener, error) { + if p.listener.Load() == nil { + port := 8080 + if p.Port != -1 { + port = p.Port + if port < 0 || port > 65535 { + return nil, fmt.Errorf("invalid port %d", port) + } + } + var err error + var listener net.Listener + if listener, err = net.Listen("tcp", fmt.Sprintf(":%d", port)); err != nil { + return nil, err + } + p.listener.Store(listener) + return listener, nil + } + return p.listener.Load().(net.Listener), nil +} + +// GetPath returns the path the transport is hosted on. If the path is '/', +// the transport will handle requests on any URI. To discover the true path +// a request was received on, inspect the context from Receive(cxt, ...) with +// TransportContextFrom(ctx). +func (p *Protocol) GetPath() string { + path := strings.TrimSpace(p.Path) + if len(path) > 0 { + return path + } + return "/" // default +} + +// attachMiddleware attaches the HTTP middleware to the specified handler. +func attachMiddleware(h http.Handler, middleware []Middleware) http.Handler { + for _, m := range middleware { + h = m(h) + } + return h +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go new file mode 100644 index 000000000..9c4c10a29 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go @@ -0,0 +1,34 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + "net/http" +) + +type RateLimiter interface { + // Allow attempts to take one token from the rate limiter for the specified + // request. It returns ok when this operation was successful. In case ok is + // false, reset will indicate the time in seconds when it is safe to perform + // another attempt. An error is returned when this operation failed, e.g. due to + // a backend error. + Allow(ctx context.Context, r *http.Request) (ok bool, reset uint64, err error) + // Close terminates rate limiter and cleans up any data structures or + // connections that may remain open. After a store is stopped, Take() should + // always return zero values. + Close(ctx context.Context) error +} + +type noOpLimiter struct{} + +func (n noOpLimiter) Allow(ctx context.Context, r *http.Request) (bool, uint64, error) { + return true, 0, nil +} + +func (n noOpLimiter) Close(ctx context.Context) error { + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go new file mode 100644 index 000000000..71e7346f3 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go @@ -0,0 +1,145 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "errors" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + + "go.uber.org/zap" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +func (p *Protocol) do(ctx context.Context, req *http.Request) (binding.Message, error) { + params := cecontext.RetriesFrom(ctx) + + switch params.Strategy { + case cecontext.BackoffStrategyConstant, cecontext.BackoffStrategyLinear, cecontext.BackoffStrategyExponential: + return p.doWithRetry(ctx, params, req) + case cecontext.BackoffStrategyNone: + fallthrough + default: + return p.doOnce(req) + } +} + +func (p *Protocol) doOnce(req *http.Request) (binding.Message, protocol.Result) { + resp, err := p.Client.Do(req) + if err != nil { + return nil, protocol.NewReceipt(false, "%w", err) + } + + var result protocol.Result + if resp.StatusCode/100 == 2 { + result = protocol.ResultACK + } else { + result = protocol.ResultNACK + } + + return NewMessage(resp.Header, resp.Body), NewResult(resp.StatusCode, "%w", result) +} + +func (p *Protocol) doWithRetry(ctx context.Context, params *cecontext.RetryParams, req *http.Request) (binding.Message, error) { + then := time.Now() + retry := 0 + results := make([]protocol.Result, 0) + + var ( + body []byte + err error + ) + + if req != nil && req.Body != nil { + defer func() { + if err = req.Body.Close(); err != nil { + cecontext.LoggerFrom(ctx).Warnw("could not close request body", zap.Error(err)) + } + }() + body, err = ioutil.ReadAll(req.Body) + if err != nil { + panic(err) + } + resetBody(req, body) + } + + for { + msg, result := p.doOnce(req) + + // Fast track common case. + if protocol.IsACK(result) { + return msg, NewRetriesResult(result, retry, then, results) + } + + // Try again? + // + // Make sure the error was something we should retry. + + { + var uErr *url.Error + if errors.As(result, &uErr) { + goto DoBackoff + } + } + + { + var httpResult *Result + if errors.As(result, &httpResult) { + sc := httpResult.StatusCode + if p.isRetriableFunc(sc) { + // retry! + goto DoBackoff + } else { + // Permanent error + cecontext.LoggerFrom(ctx).Debugw("status code not retryable, will not try again", + zap.Error(httpResult), + zap.Int("statusCode", sc)) + return msg, NewRetriesResult(result, retry, then, results) + } + } + } + + DoBackoff: + resetBody(req, body) + + // Wait for the correct amount of backoff time. + + // total tries = retry + 1 + if err := params.Backoff(ctx, retry+1); err != nil { + // do not try again. + cecontext.LoggerFrom(ctx).Debugw("backoff error, will not try again", zap.Error(err)) + return msg, NewRetriesResult(result, retry, then, results) + } + + retry++ + results = append(results, result) + } +} + +// reset body to allow it to be read multiple times, e.g. when retrying http +// requests +func resetBody(req *http.Request, body []byte) { + if req == nil || req.Body == nil { + return + } + + req.Body = ioutil.NopCloser(bytes.NewReader(body)) + + // do not modify existing GetBody function + if req.GetBody == nil { + req.GetBody = func() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(body)), nil + } + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go new file mode 100644 index 000000000..7a0b2626c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go @@ -0,0 +1,60 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "errors" + "fmt" + + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// NewResult returns a fully populated http Result that should be used as +// a transport.Result. +func NewResult(statusCode int, messageFmt string, args ...interface{}) protocol.Result { + return &Result{ + StatusCode: statusCode, + Format: messageFmt, + Args: args, + } +} + +// Result wraps the fields required to make adjustments for http Responses. +type Result struct { + StatusCode int + Format string + Args []interface{} +} + +// make sure Result implements error. +var _ error = (*Result)(nil) + +// Is returns if the target error is a Result type checking target. +func (e *Result) Is(target error) bool { + if o, ok := target.(*Result); ok { + return e.StatusCode == o.StatusCode + } + + // Special case for nil == ACK + if o, ok := target.(*protocol.Receipt); ok { + if e == nil && o.ACK { + return true + } + } + + // Allow for wrapped errors. + if e != nil { + err := fmt.Errorf(e.Format, e.Args...) + return errors.Is(err, target) + } + return false +} + +// Error returns the string that is formed by using the format string with the +// provided args. +func (e *Result) Error() string { + return fmt.Sprintf("%d: %v", e.StatusCode, fmt.Errorf(e.Format, e.Args...)) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go new file mode 100644 index 000000000..f4046d522 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go @@ -0,0 +1,59 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "fmt" + "time" + + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// NewRetriesResult returns a http RetriesResult that should be used as +// a transport.Result without retries +func NewRetriesResult(result protocol.Result, retries int, startTime time.Time, attempts []protocol.Result) protocol.Result { + rr := &RetriesResult{ + Result: result, + Retries: retries, + Duration: time.Since(startTime), + } + if len(attempts) > 0 { + rr.Attempts = attempts + } + return rr +} + +// RetriesResult wraps the fields required to make adjustments for http Responses. +type RetriesResult struct { + // The last result + protocol.Result + + // Retries is the number of times the request was tried + Retries int + + // Duration records the time spent retrying. Exclude the successful request (if any) + Duration time.Duration + + // Attempts of all failed requests. Exclude last result. + Attempts []protocol.Result +} + +// make sure RetriesResult implements error. +var _ error = (*RetriesResult)(nil) + +// Is returns if the target error is a RetriesResult type checking target. +func (e *RetriesResult) Is(target error) bool { + return protocol.ResultIs(e.Result, target) +} + +// Error returns the string that is formed by using the format string with the +// provided args. +func (e *RetriesResult) Error() string { + if e.Retries == 0 { + return e.Result.Error() + } + return fmt.Sprintf("%s (%dx)", e.Result.Error(), e.Retries) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go new file mode 100644 index 000000000..350fc1cf6 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go @@ -0,0 +1,89 @@ +/* + Copyright 2022 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "encoding/json" + nethttp "net/http" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/event" +) + +// NewEventFromHTTPRequest returns an Event. +func NewEventFromHTTPRequest(req *nethttp.Request) (*event.Event, error) { + msg := NewMessageFromHttpRequest(req) + return binding.ToEvent(context.Background(), msg) +} + +// NewEventFromHTTPResponse returns an Event. +func NewEventFromHTTPResponse(resp *nethttp.Response) (*event.Event, error) { + msg := NewMessageFromHttpResponse(resp) + return binding.ToEvent(context.Background(), msg) +} + +// NewEventsFromHTTPRequest returns a batched set of Events from a HTTP Request +func NewEventsFromHTTPRequest(req *nethttp.Request) ([]event.Event, error) { + msg := NewMessageFromHttpRequest(req) + return binding.ToEvents(context.Background(), msg, msg.BodyReader) +} + +// NewEventsFromHTTPResponse returns a batched set of Events from a HTTP Response +func NewEventsFromHTTPResponse(resp *nethttp.Response) ([]event.Event, error) { + msg := NewMessageFromHttpResponse(resp) + return binding.ToEvents(context.Background(), msg, msg.BodyReader) +} + +// NewHTTPRequestFromEvent creates a http.Request object that can be used with any http.Client for a singular event. +// This is an HTTP POST action to the provided url. +func NewHTTPRequestFromEvent(ctx context.Context, url string, event event.Event) (*nethttp.Request, error) { + if err := event.Validate(); err != nil { + return nil, err + } + + req, err := nethttp.NewRequestWithContext(ctx, nethttp.MethodPost, url, nil) + if err != nil { + return nil, err + } + if err := WriteRequest(ctx, (*binding.EventMessage)(&event), req); err != nil { + return nil, err + } + + return req, nil +} + +// NewHTTPRequestFromEvents creates a http.Request object that can be used with any http.Client for sending +// a batched set of events. This is an HTTP POST action to the provided url. +func NewHTTPRequestFromEvents(ctx context.Context, url string, events []event.Event) (*nethttp.Request, error) { + // Sending batch events is quite straightforward, as there is only JSON format, so a simple implementation. + for _, e := range events { + if err := e.Validate(); err != nil { + return nil, err + } + } + var buffer bytes.Buffer + err := json.NewEncoder(&buffer).Encode(events) + if err != nil { + return nil, err + } + + request, err := nethttp.NewRequestWithContext(ctx, nethttp.MethodPost, url, &buffer) + if err != nil { + return nil, err + } + + request.Header.Set(ContentType, event.ApplicationCloudEventsBatchJSON) + + return request, nil +} + +// IsHTTPBatch returns if the current http.Request or http.Response is a batch event operation, by checking the +// header `Content-Type` value. +func IsHTTPBatch(header nethttp.Header) bool { + return header.Get(ContentType) == event.ApplicationCloudEventsBatchJSON +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go new file mode 100644 index 000000000..43ad36180 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go @@ -0,0 +1,141 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/types" +) + +// WriteRequest fills the provided httpRequest with the message m. +// Using context you can tweak the encoding processing (more details on binding.Write documentation). +func WriteRequest(ctx context.Context, m binding.Message, httpRequest *http.Request, transformers ...binding.Transformer) error { + structuredWriter := (*httpRequestWriter)(httpRequest) + binaryWriter := (*httpRequestWriter)(httpRequest) + + _, err := binding.Write( + ctx, + m, + structuredWriter, + binaryWriter, + transformers..., + ) + return err +} + +type httpRequestWriter http.Request + +func (b *httpRequestWriter) SetStructuredEvent(ctx context.Context, format format.Format, event io.Reader) error { + b.Header.Set(ContentType, format.MediaType()) + return b.setBody(event) +} + +func (b *httpRequestWriter) Start(ctx context.Context) error { + return nil +} + +func (b *httpRequestWriter) End(ctx context.Context) error { + return nil +} + +func (b *httpRequestWriter) SetData(data io.Reader) error { + return b.setBody(data) +} + +// setBody is a cherry-pick of the implementation in http.NewRequestWithContext +func (b *httpRequestWriter) setBody(body io.Reader) error { + rc, ok := body.(io.ReadCloser) + if !ok && body != nil { + rc = ioutil.NopCloser(body) + } + b.Body = rc + if body != nil { + switch v := body.(type) { + case *bytes.Buffer: + b.ContentLength = int64(v.Len()) + buf := v.Bytes() + b.GetBody = func() (io.ReadCloser, error) { + r := bytes.NewReader(buf) + return ioutil.NopCloser(r), nil + } + case *bytes.Reader: + b.ContentLength = int64(v.Len()) + snapshot := *v + b.GetBody = func() (io.ReadCloser, error) { + r := snapshot + return ioutil.NopCloser(&r), nil + } + case *strings.Reader: + b.ContentLength = int64(v.Len()) + snapshot := *v + b.GetBody = func() (io.ReadCloser, error) { + r := snapshot + return ioutil.NopCloser(&r), nil + } + default: + // This is where we'd set it to -1 (at least + // if body != NoBody) to mean unknown, but + // that broke people during the Go 1.8 testing + // period. People depend on it being 0 I + // guess. Maybe retry later. See Issue 18117. + } + // For client requests, Request.ContentLength of 0 + // means either actually 0, or unknown. The only way + // to explicitly say that the ContentLength is zero is + // to set the Body to nil. But turns out too much code + // depends on NewRequest returning a non-nil Body, + // so we use a well-known ReadCloser variable instead + // and have the http package also treat that sentinel + // variable to mean explicitly zero. + if b.GetBody != nil && b.ContentLength == 0 { + b.Body = http.NoBody + b.GetBody = func() (io.ReadCloser, error) { return http.NoBody, nil } + } + } + return nil +} + +func (b *httpRequestWriter) SetAttribute(attribute spec.Attribute, value interface{}) error { + mapping := attributeHeadersMapping[attribute.Name()] + if value == nil { + delete(b.Header, mapping) + return nil + } + + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + b.Header[mapping] = append(b.Header[mapping], s) + return nil +} + +func (b *httpRequestWriter) SetExtension(name string, value interface{}) error { + if value == nil { + delete(b.Header, extNameToHeaderName(name)) + return nil + } + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + b.Header[extNameToHeaderName(name)] = []string{s} + return nil +} + +var _ binding.StructuredWriter = (*httpRequestWriter)(nil) // Test it conforms to the interface +var _ binding.BinaryWriter = (*httpRequestWriter)(nil) // Test it conforms to the interface diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go new file mode 100644 index 000000000..41385dab1 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go @@ -0,0 +1,126 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "io" + "net/http" + "strconv" + "strings" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/types" +) + +// WriteResponseWriter writes out to the the provided httpResponseWriter with the message m. +// Using context you can tweak the encoding processing (more details on binding.Write documentation). +func WriteResponseWriter(ctx context.Context, m binding.Message, status int, rw http.ResponseWriter, transformers ...binding.Transformer) error { + if status < 200 || status >= 600 { + status = http.StatusOK + } + writer := &httpResponseWriter{rw: rw, status: status} + + _, err := binding.Write( + ctx, + m, + writer, + writer, + transformers..., + ) + return err +} + +type httpResponseWriter struct { + rw http.ResponseWriter + status int + body io.Reader +} + +func (b *httpResponseWriter) SetStructuredEvent(ctx context.Context, format format.Format, event io.Reader) error { + b.rw.Header().Set(ContentType, format.MediaType()) + b.body = event + return b.finalizeWriter() +} + +func (b *httpResponseWriter) Start(ctx context.Context) error { + return nil +} + +func (b *httpResponseWriter) SetAttribute(attribute spec.Attribute, value interface{}) error { + mapping := attributeHeadersMapping[attribute.Name()] + if value == nil { + delete(b.rw.Header(), mapping) + } + + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + b.rw.Header()[mapping] = append(b.rw.Header()[mapping], s) + return nil +} + +func (b *httpResponseWriter) SetExtension(name string, value interface{}) error { + if value == nil { + delete(b.rw.Header(), extNameToHeaderName(name)) + } + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + b.rw.Header()[extNameToHeaderName(name)] = []string{s} + return nil +} + +func (b *httpResponseWriter) SetData(reader io.Reader) error { + b.body = reader + return nil +} + +func (b *httpResponseWriter) finalizeWriter() error { + if b.body != nil { + // Try to figure it out if we have a content-length + contentLength := -1 + switch v := b.body.(type) { + case *bytes.Buffer: + contentLength = v.Len() + case *bytes.Reader: + contentLength = v.Len() + case *strings.Reader: + contentLength = v.Len() + } + + if contentLength != -1 { + b.rw.Header().Add("Content-length", strconv.Itoa(contentLength)) + } + + // Finalize the headers. + b.rw.WriteHeader(b.status) + + // Write body. + _, err := io.Copy(b.rw, b.body) + if err != nil { + return err + } + } else { + // Finalize the headers. + b.rw.WriteHeader(b.status) + } + return nil +} + +func (b *httpResponseWriter) End(ctx context.Context) error { + return b.finalizeWriter() +} + +var _ binding.StructuredWriter = (*httpResponseWriter)(nil) // Test it conforms to the interface +var _ binding.BinaryWriter = (*httpResponseWriter)(nil) // Test it conforms to the interface diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go new file mode 100644 index 000000000..e7a74294d --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go @@ -0,0 +1,54 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/binding" +) + +// Receiver receives messages. +type Receiver interface { + // Receive blocks till a message is received or ctx expires. + // Receive can be invoked safely from different goroutines. + // + // A non-nil error means the receiver is closed. + // io.EOF means it closed cleanly, any other value indicates an error. + // The caller is responsible for `Finish()` the returned message + Receive(ctx context.Context) (binding.Message, error) +} + +// ReceiveCloser is a Receiver that can be closed. +type ReceiveCloser interface { + Receiver + Closer +} + +// ResponseFn is the function callback provided from Responder.Respond to allow +// for a receiver to "reply" to a message it receives. +// transformers are applied when the message is written on the wire. +type ResponseFn func(ctx context.Context, m binding.Message, r Result, transformers ...binding.Transformer) error + +// Responder receives messages and is given a callback to respond. +type Responder interface { + // Respond blocks till a message is received or ctx expires. + // Respond can be invoked safely from different goroutines. + // + // A non-nil error means the receiver is closed. + // io.EOF means it closed cleanly, any other value indicates an error. + // The caller is responsible for `Finish()` the returned message, + // while the protocol implementation is responsible for `Finish()` the response message. + // The caller MUST invoke ResponseFn, in order to avoid leaks. + // The correct flow for the caller is to finish the received message and then invoke the ResponseFn + Respond(ctx context.Context) (binding.Message, ResponseFn, error) +} + +// ResponderCloser is a Responder that can be closed. +type ResponderCloser interface { + Responder + Closer +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go new file mode 100644 index 000000000..4a058c962 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go @@ -0,0 +1,23 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import ( + "context" +) + +// Opener is the common interface for things that need to be opened. +type Opener interface { + // OpenInbound is a blocking call and ctx is used to stop the Inbound message Receiver/Responder. + // Closing the context won't close the Receiver/Responder, aka it won't invoke Close(ctx). + OpenInbound(ctx context.Context) error +} + +// Closer is the common interface for things that can be closed. +// After invoking Close(ctx), you cannot reuse the object you closed. +type Closer interface { + Close(ctx context.Context) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go new file mode 100644 index 000000000..e44fa432a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go @@ -0,0 +1,49 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/binding" +) + +// Sender sends messages. +type Sender interface { + // Send a message. + // + // Send returns when the "outbound" message has been sent. The Sender may + // still be expecting acknowledgment or holding other state for the message. + // + // m.Finish() is called when sending is finished (both succeeded or failed): + // expected acknowledgments (or errors) have been received, the Sender is + // no longer holding any state for the message. + // m.Finish() may be called during or after Send(). + // + // transformers are applied when the message is written on the wire. + Send(ctx context.Context, m binding.Message, transformers ...binding.Transformer) error +} + +// SendCloser is a Sender that can be closed. +type SendCloser interface { + Sender + Closer +} + +// Requester sends a message and receives a response +// +// Optional interface that may be implemented by protocols that support +// request/response correlation. +type Requester interface { + // Request sends m like Sender.Send() but also arranges to receive a response. + Request(ctx context.Context, m binding.Message, transformers ...binding.Transformer) (binding.Message, error) +} + +// RequesterCloser is a Requester that can be closed. +type RequesterCloser interface { + Requester + Closer +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go new file mode 100644 index 000000000..eae64e018 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go @@ -0,0 +1,127 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import ( + "errors" + "fmt" +) + +// Result leverages go's error wrapping. +type Result error + +// ResultIs reports whether any error in err's chain matches target. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +// (text from errors/wrap.go) +var ResultIs = errors.Is + +// ResultAs finds the first error in err's chain that matches target, and if so, sets +// target to that error value and returns true. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error matches target if the error's concrete value is assignable to the value +// pointed to by target, or if the error has a method As(interface{}) bool such that +// As(target) returns true. In the latter case, the As method is responsible for +// setting target. +// +// As will panic if target is not a non-nil pointer to either a type that implements +// error, or to any interface type. As returns false if err is nil. +// (text from errors/wrap.go) +var ResultAs = errors.As + +func NewResult(messageFmt string, args ...interface{}) Result { + return fmt.Errorf(messageFmt, args...) +} + +// IsACK true means the recipient acknowledged the event. +func IsACK(target Result) bool { + // special case, nil target also means ACK. + if target == nil { + return true + } + + return ResultIs(target, ResultACK) +} + +// IsNACK true means the recipient did not acknowledge the event. +func IsNACK(target Result) bool { + return ResultIs(target, ResultNACK) +} + +// IsUndelivered true means the target result is not an ACK/NACK, but some other +// error unrelated to delivery not from the intended recipient. Likely target +// is an error that represents some part of the protocol is misconfigured or +// the event that was attempting to be sent was invalid. +func IsUndelivered(target Result) bool { + if target == nil { + // Short-circuit nil result is ACK. + return false + } + return !ResultIs(target, ResultACK) && !ResultIs(target, ResultNACK) +} + +var ( + ResultACK = NewReceipt(true, "") + ResultNACK = NewReceipt(false, "") +) + +// NewReceipt returns a fully populated protocol Receipt that should be used as +// a transport.Result. This type holds the base ACK/NACK results. +func NewReceipt(ack bool, messageFmt string, args ...interface{}) Result { + return &Receipt{ + Err: fmt.Errorf(messageFmt, args...), + ACK: ack, + } +} + +// Receipt wraps the fields required to understand if a protocol event is acknowledged. +type Receipt struct { + Err error + ACK bool +} + +// make sure Result implements error. +var _ error = (*Receipt)(nil) + +// Is returns if the target error is a Result type checking target. +func (e *Receipt) Is(target error) bool { + if o, ok := target.(*Receipt); ok { + if e == nil { + // Special case nil e as ACK. + return o.ACK + } + return e.ACK == o.ACK + } + // Allow for wrapped errors. + if e != nil { + return errors.Is(e.Err, target) + } + return false +} + +// Error returns the string that is formed by using the format string with the +// provided args. +func (e *Receipt) Error() string { + if e != nil { + return e.Err.Error() + } + return "" +} + +// Unwrap returns the wrapped error if exist or nil +func (e *Receipt) Unwrap() error { + if e != nil { + return errors.Unwrap(e.Err) + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/staticcheck.conf b/vendor/github.com/cloudevents/sdk-go/v2/staticcheck.conf new file mode 100644 index 000000000..d6f269556 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/staticcheck.conf @@ -0,0 +1,3 @@ +checks = [ + "all", "-ST1003", +] \ No newline at end of file diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go b/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go new file mode 100644 index 000000000..814626874 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go @@ -0,0 +1,41 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import "reflect" + +// Allocate allocates a new instance of type t and returns: +// asPtr is of type t if t is a pointer type and of type &t otherwise +// asValue is a Value of type t pointing to the same data as asPtr +func Allocate(obj interface{}) (asPtr interface{}, asValue reflect.Value) { + if obj == nil { + return nil, reflect.Value{} + } + + switch t := reflect.TypeOf(obj); t.Kind() { + case reflect.Ptr: + reflectPtr := reflect.New(t.Elem()) + asPtr = reflectPtr.Interface() + asValue = reflectPtr + case reflect.Map: + reflectPtr := reflect.MakeMap(t) + asPtr = reflectPtr.Interface() + asValue = reflectPtr + case reflect.String: + reflectPtr := reflect.New(t) + asPtr = "" + asValue = reflectPtr.Elem() + case reflect.Slice: + reflectPtr := reflect.MakeSlice(t, 0, 0) + asPtr = reflectPtr.Interface() + asValue = reflectPtr + default: + reflectPtr := reflect.New(t) + asPtr = reflectPtr.Interface() + asValue = reflectPtr.Elem() + } + return +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go new file mode 100644 index 000000000..cf7a94f35 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go @@ -0,0 +1,46 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package types implements the CloudEvents type system. + +CloudEvents defines a set of abstract types for event context attributes. Each +type has a corresponding native Go type and a canonical string encoding. The +native Go types used to represent the CloudEvents types are: +bool, int32, string, []byte, *url.URL, time.Time + + +----------------+----------------+-----------------------------------+ + |CloudEvents Type|Native Type |Convertible From | + +================+================+===================================+ + |Bool |bool |bool | + +----------------+----------------+-----------------------------------+ + |Integer |int32 |Any numeric type with value in | + | | |range of int32 | + +----------------+----------------+-----------------------------------+ + |String |string |string | + +----------------+----------------+-----------------------------------+ + |Binary |[]byte |[]byte | + +----------------+----------------+-----------------------------------+ + |URI-Reference |*url.URL |url.URL, types.URIRef, types.URI | + +----------------+----------------+-----------------------------------+ + |URI |*url.URL |url.URL, types.URIRef, types.URI | + | | |Must be an absolute URI. | + +----------------+----------------+-----------------------------------+ + |Timestamp |time.Time |time.Time, types.Timestamp | + +----------------+----------------+-----------------------------------+ + +Extension attributes may be stored as a native type or a canonical string. The +To functions will convert to the desired from any convertible type +or from the canonical string form. + +The Parse and Format functions convert native types to/from +canonical strings. + +Note are no Parse or Format functions for URL or string. For URL use the +standard url.Parse() and url.URL.String(). The canonical string format of a +string is the string itself. + +*/ +package types diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go b/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go new file mode 100644 index 000000000..ff049727d --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go @@ -0,0 +1,75 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "time" +) + +// Timestamp wraps time.Time to normalize the time layout to RFC3339. It is +// intended to enforce compliance with the CloudEvents spec for their +// definition of Timestamp. Custom marshal methods are implemented to ensure +// the outbound Timestamp is a string in the RFC3339 layout. +type Timestamp struct { + time.Time +} + +// ParseTimestamp attempts to parse the given time assuming RFC3339 layout +func ParseTimestamp(s string) (*Timestamp, error) { + if s == "" { + return nil, nil + } + tt, err := ParseTime(s) + return &Timestamp{Time: tt}, err +} + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (t *Timestamp) MarshalJSON() ([]byte, error) { + if t == nil || t.IsZero() { + return []byte(`""`), nil + } + return []byte(fmt.Sprintf("%q", t)), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (t *Timestamp) UnmarshalJSON(b []byte) error { + var timestamp string + if err := json.Unmarshal(b, ×tamp); err != nil { + return err + } + var err error + t.Time, err = ParseTime(timestamp) + return err +} + +// MarshalXML implements a custom xml marshal method used when this type is +// marshaled using xml.Marshal. +func (t *Timestamp) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if t == nil || t.IsZero() { + return e.EncodeElement(nil, start) + } + return e.EncodeElement(t.String(), start) +} + +// UnmarshalXML implements the xml unmarshal method used when this type is +// unmarshaled using xml.Unmarshal. +func (t *Timestamp) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var timestamp string + if err := d.DecodeElement(×tamp, &start); err != nil { + return err + } + var err error + t.Time, err = ParseTime(timestamp) + return err +} + +// String outputs the time using RFC3339 format. +func (t Timestamp) String() string { return FormatTime(t.Time) } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go b/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go new file mode 100644 index 000000000..bed608094 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go @@ -0,0 +1,86 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "net/url" +) + +// URI is a wrapper to url.URL. It is intended to enforce compliance with +// the CloudEvents spec for their definition of URI. Custom +// marshal methods are implemented to ensure the outbound URI object +// is a flat string. +type URI struct { + url.URL +} + +// ParseURI attempts to parse the given string as a URI. +func ParseURI(u string) *URI { + if u == "" { + return nil + } + pu, err := url.Parse(u) + if err != nil { + return nil + } + return &URI{URL: *pu} +} + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (u URI) MarshalJSON() ([]byte, error) { + b := fmt.Sprintf("%q", u.String()) + return []byte(b), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (u *URI) UnmarshalJSON(b []byte) error { + var ref string + if err := json.Unmarshal(b, &ref); err != nil { + return err + } + r := ParseURI(ref) + if r != nil { + *u = *r + } + return nil +} + +// MarshalXML implements a custom xml marshal method used when this type is +// marshaled using xml.Marshal. +func (u URI) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + return e.EncodeElement(u.String(), start) +} + +// UnmarshalXML implements the xml unmarshal method used when this type is +// unmarshaled using xml.Unmarshal. +func (u *URI) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var ref string + if err := d.DecodeElement(&ref, &start); err != nil { + return err + } + r := ParseURI(ref) + if r != nil { + *u = *r + } + return nil +} + +func (u URI) Validate() bool { + return u.IsAbs() +} + +// String returns the full string representation of the URI-Reference. +func (u *URI) String() string { + if u == nil { + return "" + } + return u.URL.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go b/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go new file mode 100644 index 000000000..22fa12314 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go @@ -0,0 +1,82 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "net/url" +) + +// URIRef is a wrapper to url.URL. It is intended to enforce compliance with +// the CloudEvents spec for their definition of URI-Reference. Custom +// marshal methods are implemented to ensure the outbound URIRef object is +// is a flat string. +type URIRef struct { + url.URL +} + +// ParseURIRef attempts to parse the given string as a URI-Reference. +func ParseURIRef(u string) *URIRef { + if u == "" { + return nil + } + pu, err := url.Parse(u) + if err != nil { + return nil + } + return &URIRef{URL: *pu} +} + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (u URIRef) MarshalJSON() ([]byte, error) { + b := fmt.Sprintf("%q", u.String()) + return []byte(b), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (u *URIRef) UnmarshalJSON(b []byte) error { + var ref string + if err := json.Unmarshal(b, &ref); err != nil { + return err + } + r := ParseURIRef(ref) + if r != nil { + *u = *r + } + return nil +} + +// MarshalXML implements a custom xml marshal method used when this type is +// marshaled using xml.Marshal. +func (u URIRef) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + return e.EncodeElement(u.String(), start) +} + +// UnmarshalXML implements the xml unmarshal method used when this type is +// unmarshaled using xml.Unmarshal. +func (u *URIRef) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var ref string + if err := d.DecodeElement(&ref, &start); err != nil { + return err + } + r := ParseURIRef(ref) + if r != nil { + *u = *r + } + return nil +} + +// String returns the full string representation of the URI-Reference. +func (u *URIRef) String() string { + if u == nil { + return "" + } + return u.URL.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/value.go b/vendor/github.com/cloudevents/sdk-go/v2/types/value.go new file mode 100644 index 000000000..f643d0aa5 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/value.go @@ -0,0 +1,335 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import ( + "encoding/base64" + "fmt" + "math" + "net/url" + "reflect" + "strconv" + "time" +) + +// FormatBool returns canonical string format: "true" or "false" +func FormatBool(v bool) string { return strconv.FormatBool(v) } + +// FormatInteger returns canonical string format: decimal notation. +func FormatInteger(v int32) string { return strconv.Itoa(int(v)) } + +// FormatBinary returns canonical string format: standard base64 encoding +func FormatBinary(v []byte) string { return base64.StdEncoding.EncodeToString(v) } + +// FormatTime returns canonical string format: RFC3339 with nanoseconds +func FormatTime(v time.Time) string { return v.UTC().Format(time.RFC3339Nano) } + +// ParseBool parse canonical string format: "true" or "false" +func ParseBool(v string) (bool, error) { return strconv.ParseBool(v) } + +// ParseInteger parse canonical string format: decimal notation. +func ParseInteger(v string) (int32, error) { + // Accept floating-point but truncate to int32 as per CE spec. + f, err := strconv.ParseFloat(v, 64) + if err != nil { + return 0, err + } + if f > math.MaxInt32 || f < math.MinInt32 { + return 0, rangeErr(v) + } + return int32(f), nil +} + +// ParseBinary parse canonical string format: standard base64 encoding +func ParseBinary(v string) ([]byte, error) { return base64.StdEncoding.DecodeString(v) } + +// ParseTime parse canonical string format: RFC3339 with nanoseconds +func ParseTime(v string) (time.Time, error) { + t, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + err := convertErr(time.Time{}, v) + err.extra = ": not in RFC3339 format" + return time.Time{}, err + } + return t, nil +} + +// Format returns the canonical string format of v, where v can be +// any type that is convertible to a CloudEvents type. +func Format(v interface{}) (string, error) { + v, err := Validate(v) + if err != nil { + return "", err + } + switch v := v.(type) { + case bool: + return FormatBool(v), nil + case int32: + return FormatInteger(v), nil + case string: + return v, nil + case []byte: + return FormatBinary(v), nil + case URI: + return v.String(), nil + case URIRef: + // url.URL is often passed by pointer so allow both + return v.String(), nil + case Timestamp: + return FormatTime(v.Time), nil + default: + return "", fmt.Errorf("%T is not a CloudEvents type", v) + } +} + +// Validate v is a valid CloudEvents attribute value, convert it to one of: +// bool, int32, string, []byte, types.URI, types.URIRef, types.Timestamp +func Validate(v interface{}) (interface{}, error) { + switch v := v.(type) { + case bool, int32, string, []byte: + return v, nil // Already a CloudEvents type, no validation needed. + + case uint, uintptr, uint8, uint16, uint32, uint64: + u := reflect.ValueOf(v).Uint() + if u > math.MaxInt32 { + return nil, rangeErr(v) + } + return int32(u), nil + case int, int8, int16, int64: + i := reflect.ValueOf(v).Int() + if i > math.MaxInt32 || i < math.MinInt32 { + return nil, rangeErr(v) + } + return int32(i), nil + case float32, float64: + f := reflect.ValueOf(v).Float() + if f > math.MaxInt32 || f < math.MinInt32 { + return nil, rangeErr(v) + } + return int32(f), nil + + case *url.URL: + if v == nil { + break + } + return URI{URL: *v}, nil + case url.URL: + return URI{URL: v}, nil + case *URIRef: + if v != nil { + return *v, nil + } + return nil, nil + case URIRef: + return v, nil + case *URI: + if v != nil { + return *v, nil + } + return nil, nil + case URI: + return v, nil + case time.Time: + return Timestamp{Time: v}, nil + case *time.Time: + if v == nil { + break + } + return Timestamp{Time: *v}, nil + case Timestamp: + return v, nil + } + rx := reflect.ValueOf(v) + if rx.Kind() == reflect.Ptr && !rx.IsNil() { + // Allow pointers-to convertible types + return Validate(rx.Elem().Interface()) + } + return nil, fmt.Errorf("invalid CloudEvents value: %#v", v) +} + +// Clone v clones a CloudEvents attribute value, which is one of the valid types: +// bool, int32, string, []byte, types.URI, types.URIRef, types.Timestamp +// Returns the same type +// Panics if the type is not valid +func Clone(v interface{}) interface{} { + if v == nil { + return nil + } + switch v := v.(type) { + case bool, int32, string, nil: + return v // Already a CloudEvents type, no validation needed. + case []byte: + clone := make([]byte, len(v)) + copy(clone, v) + return v + case url.URL: + return URI{v} + case *url.URL: + return &URI{*v} + case URIRef: + return v + case *URIRef: + return &URIRef{v.URL} + case URI: + return v + case *URI: + return &URI{v.URL} + case time.Time: + return Timestamp{v} + case *time.Time: + return &Timestamp{*v} + case Timestamp: + return v + case *Timestamp: + return &Timestamp{v.Time} + } + panic(fmt.Errorf("invalid CloudEvents value: %#v", v)) +} + +// ToBool accepts a bool value or canonical "true"/"false" string. +func ToBool(v interface{}) (bool, error) { + v, err := Validate(v) + if err != nil { + return false, err + } + switch v := v.(type) { + case bool: + return v, nil + case string: + return ParseBool(v) + default: + return false, convertErr(true, v) + } +} + +// ToInteger accepts any numeric value in int32 range, or canonical string. +func ToInteger(v interface{}) (int32, error) { + v, err := Validate(v) + if err != nil { + return 0, err + } + switch v := v.(type) { + case int32: + return v, nil + case string: + return ParseInteger(v) + default: + return 0, convertErr(int32(0), v) + } +} + +// ToString returns a string value unaltered. +// +// This function does not perform canonical string encoding, use one of the +// Format functions for that. +func ToString(v interface{}) (string, error) { + v, err := Validate(v) + if err != nil { + return "", err + } + switch v := v.(type) { + case string: + return v, nil + default: + return "", convertErr("", v) + } +} + +// ToBinary returns a []byte value, decoding from base64 string if necessary. +func ToBinary(v interface{}) ([]byte, error) { + v, err := Validate(v) + if err != nil { + return nil, err + } + switch v := v.(type) { + case []byte: + return v, nil + case string: + return base64.StdEncoding.DecodeString(v) + default: + return nil, convertErr([]byte(nil), v) + } +} + +// ToURL returns a *url.URL value, parsing from string if necessary. +func ToURL(v interface{}) (*url.URL, error) { + v, err := Validate(v) + if err != nil { + return nil, err + } + switch v := v.(type) { + case *URI: + return &v.URL, nil + case URI: + return &v.URL, nil + case *URIRef: + return &v.URL, nil + case URIRef: + return &v.URL, nil + case string: + u, err := url.Parse(v) + if err != nil { + return nil, err + } + return u, nil + default: + return nil, convertErr((*url.URL)(nil), v) + } +} + +// ToTime returns a time.Time value, parsing from RFC3339 string if necessary. +func ToTime(v interface{}) (time.Time, error) { + v, err := Validate(v) + if err != nil { + return time.Time{}, err + } + switch v := v.(type) { + case Timestamp: + return v.Time, nil + case string: + ts, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + return time.Time{}, err + } + return ts, nil + default: + return time.Time{}, convertErr(time.Time{}, v) + } +} + +func IsZero(v interface{}) bool { + // Fast path + if v == nil { + return true + } + if s, ok := v.(string); ok && s == "" { + return true + } + return reflect.ValueOf(v).IsZero() +} + +type ConvertErr struct { + // Value being converted + Value interface{} + // Type of attempted conversion + Type reflect.Type + + extra string +} + +func (e *ConvertErr) Error() string { + return fmt.Sprintf("cannot convert %#v to %s%s", e.Value, e.Type, e.extra) +} + +func convertErr(target, v interface{}) *ConvertErr { + return &ConvertErr{Value: v, Type: reflect.TypeOf(target)} +} + +func rangeErr(v interface{}) error { + e := convertErr(int32(0), v) + e.extra = ": out of range" + return e +} diff --git a/vendor/github.com/eclipse/paho.golang/LICENSE b/vendor/github.com/eclipse/paho.golang/LICENSE new file mode 100644 index 000000000..d3087e4c5 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/LICENSE @@ -0,0 +1,277 @@ +Eclipse Public License - v 2.0 + + THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE + PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION + OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +1. DEFINITIONS + +"Contribution" means: + + a) in the case of the initial Contributor, the initial content + Distributed under this Agreement, and + + b) in the case of each subsequent Contributor: + i) changes to the Program, and + ii) additions to the Program; + where such changes and/or additions to the Program originate from + and are Distributed by that particular Contributor. A Contribution + "originates" from a Contributor if it was added to the Program by + such Contributor itself or anyone acting on such Contributor's behalf. + Contributions do not include changes or additions to the Program that + are not Modified Works. + +"Contributor" means any person or entity that Distributes the Program. + +"Licensed Patents" mean patent claims licensable by a Contributor which +are necessarily infringed by the use or sale of its Contribution alone +or when combined with the Program. + +"Program" means the Contributions Distributed in accordance with this +Agreement. + +"Recipient" means anyone who receives the Program under this Agreement +or any Secondary License (as applicable), including Contributors. + +"Derivative Works" shall mean any work, whether in Source Code or other +form, that is based on (or derived from) the Program and for which the +editorial revisions, annotations, elaborations, or other modifications +represent, as a whole, an original work of authorship. + +"Modified Works" shall mean any work in Source Code or other form that +results from an addition to, deletion from, or modification of the +contents of the Program, including, for purposes of clarity any new file +in Source Code form that contains any contents of the Program. Modified +Works shall not include works that contain only declarations, +interfaces, types, classes, structures, or files of the Program solely +in each case in order to link to, bind by name, or subclass the Program +or Modified Works thereof. + +"Distribute" means the acts of a) distributing or b) making available +in any manner that enables the transfer of a copy. + +"Source Code" means the form of a Program preferred for making +modifications, including but not limited to software source code, +documentation source, and configuration files. + +"Secondary License" means either the GNU General Public License, +Version 2.0, or any later versions of that license, including any +exceptions or additional permissions as identified by the initial +Contributor. + +2. GRANT OF RIGHTS + + a) Subject to the terms of this Agreement, each Contributor hereby + grants Recipient a non-exclusive, worldwide, royalty-free copyright + license to reproduce, prepare Derivative Works of, publicly display, + publicly perform, Distribute and sublicense the Contribution of such + Contributor, if any, and such Derivative Works. + + b) Subject to the terms of this Agreement, each Contributor hereby + grants Recipient a non-exclusive, worldwide, royalty-free patent + license under Licensed Patents to make, use, sell, offer to sell, + import and otherwise transfer the Contribution of such Contributor, + if any, in Source Code or other form. This patent license shall + apply to the combination of the Contribution and the Program if, at + the time the Contribution is added by the Contributor, such addition + of the Contribution causes such combination to be covered by the + Licensed Patents. The patent license shall not apply to any other + combinations which include the Contribution. No hardware per se is + licensed hereunder. + + c) Recipient understands that although each Contributor grants the + licenses to its Contributions set forth herein, no assurances are + provided by any Contributor that the Program does not infringe the + patent or other intellectual property rights of any other entity. + Each Contributor disclaims any liability to Recipient for claims + brought by any other entity based on infringement of intellectual + property rights or otherwise. As a condition to exercising the + rights and licenses granted hereunder, each Recipient hereby + assumes sole responsibility to secure any other intellectual + property rights needed, if any. For example, if a third party + patent license is required to allow Recipient to Distribute the + Program, it is Recipient's responsibility to acquire that license + before distributing the Program. + + d) Each Contributor represents that to its knowledge it has + sufficient copyright rights in its Contribution, if any, to grant + the copyright license set forth in this Agreement. + + e) Notwithstanding the terms of any Secondary License, no + Contributor makes additional grants to any Recipient (other than + those set forth in this Agreement) as a result of such Recipient's + receipt of the Program under the terms of a Secondary License + (if permitted under the terms of Section 3). + +3. REQUIREMENTS + +3.1 If a Contributor Distributes the Program in any form, then: + + a) the Program must also be made available as Source Code, in + accordance with section 3.2, and the Contributor must accompany + the Program with a statement that the Source Code for the Program + is available under this Agreement, and informs Recipients how to + obtain it in a reasonable manner on or through a medium customarily + used for software exchange; and + + b) the Contributor may Distribute the Program under a license + different than this Agreement, provided that such license: + i) effectively disclaims on behalf of all other Contributors all + warranties and conditions, express and implied, including + warranties or conditions of title and non-infringement, and + implied warranties or conditions of merchantability and fitness + for a particular purpose; + + ii) effectively excludes on behalf of all other Contributors all + liability for damages, including direct, indirect, special, + incidental and consequential damages, such as lost profits; + + iii) does not attempt to limit or alter the recipients' rights + in the Source Code under section 3.2; and + + iv) requires any subsequent distribution of the Program by any + party to be under a license that satisfies the requirements + of this section 3. + +3.2 When the Program is Distributed as Source Code: + + a) it must be made available under this Agreement, or if the + Program (i) is combined with other material in a separate file or + files made available under a Secondary License, and (ii) the initial + Contributor attached to the Source Code the notice described in + Exhibit A of this Agreement, then the Program may be made available + under the terms of such Secondary Licenses, and + + b) a copy of this Agreement must be included with each copy of + the Program. + +3.3 Contributors may not remove or alter any copyright, patent, +trademark, attribution notices, disclaimers of warranty, or limitations +of liability ("notices") contained within the Program from any copy of +the Program which they Distribute, provided that Contributors may add +their own appropriate notices. + +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities +with respect to end users, business partners and the like. While this +license is intended to facilitate the commercial use of the Program, +the Contributor who includes the Program in a commercial product +offering should do so in a manner which does not create potential +liability for other Contributors. Therefore, if a Contributor includes +the Program in a commercial product offering, such Contributor +("Commercial Contributor") hereby agrees to defend and indemnify every +other Contributor ("Indemnified Contributor") against any losses, +damages and costs (collectively "Losses") arising from claims, lawsuits +and other legal actions brought by a third party against the Indemnified +Contributor to the extent caused by the acts or omissions of such +Commercial Contributor in connection with its distribution of the Program +in a commercial product offering. The obligations in this section do not +apply to any claims or Losses relating to any actual or alleged +intellectual property infringement. In order to qualify, an Indemnified +Contributor must: a) promptly notify the Commercial Contributor in +writing of such claim, and b) allow the Commercial Contributor to control, +and cooperate with the Commercial Contributor in, the defense and any +related settlement negotiations. The Indemnified Contributor may +participate in any such claim at its own expense. + +For example, a Contributor might include the Program in a commercial +product offering, Product X. That Contributor is then a Commercial +Contributor. If that Commercial Contributor then makes performance +claims, or offers warranties related to Product X, those performance +claims and warranties are such Commercial Contributor's responsibility +alone. Under this section, the Commercial Contributor would have to +defend claims against the other Contributors related to those performance +claims and warranties, and if a court requires any other Contributor to +pay any damages as a result, the Commercial Contributor must pay +those damages. + +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT +PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN "AS IS" +BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR +IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF +TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR +PURPOSE. Each Recipient is solely responsible for determining the +appropriateness of using and distributing the Program and assumes all +risks associated with its exercise of rights under this Agreement, +including but not limited to the risks and costs of program errors, +compliance with applicable laws, damage to or loss of data, programs +or equipment, and unavailability or interruption of operations. + +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT +PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS +SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST +PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE +EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under +applicable law, it shall not affect the validity or enforceability of +the remainder of the terms of this Agreement, and without further +action by the parties hereto, such provision shall be reformed to the +minimum extent necessary to make such provision valid and enforceable. + +If Recipient institutes patent litigation against any entity +(including a cross-claim or counterclaim in a lawsuit) alleging that the +Program itself (excluding combinations of the Program with other software +or hardware) infringes such Recipient's patent(s), then such Recipient's +rights granted under Section 2(b) shall terminate as of the date such +litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it +fails to comply with any of the material terms or conditions of this +Agreement and does not cure such failure in a reasonable period of +time after becoming aware of such noncompliance. If all Recipient's +rights under this Agreement terminate, Recipient agrees to cease use +and distribution of the Program as soon as reasonably practicable. +However, Recipient's obligations under this Agreement and any licenses +granted by Recipient relating to the Program shall continue and survive. + +Everyone is permitted to copy and distribute copies of this Agreement, +but in order to avoid inconsistency the Agreement is copyrighted and +may only be modified in the following manner. The Agreement Steward +reserves the right to publish new versions (including revisions) of +this Agreement from time to time. No one other than the Agreement +Steward has the right to modify this Agreement. The Eclipse Foundation +is the initial Agreement Steward. The Eclipse Foundation may assign the +responsibility to serve as the Agreement Steward to a suitable separate +entity. Each new version of the Agreement will be given a distinguishing +version number. The Program (including Contributions) may always be +Distributed subject to the version of the Agreement under which it was +received. In addition, after a new version of the Agreement is published, +Contributor may elect to Distribute the Program (including its +Contributions) under the new version. + +Except as expressly stated in Sections 2(a) and 2(b) above, Recipient +receives no rights or licenses to the intellectual property of any +Contributor under this Agreement, whether expressly, by implication, +estoppel or otherwise. All rights in the Program not expressly granted +under this Agreement are reserved. Nothing in this Agreement is intended +to be enforceable by any entity that is not a Contributor or Recipient. +No third-party beneficiary rights are created under this Agreement. + +Exhibit A - Form of Secondary Licenses Notice + +"This Source Code may also be made available under the following +Secondary Licenses when the conditions for such availability set forth +in the Eclipse Public License, v. 2.0 are satisfied: {name license(s), +version(s), and exceptions or additional permissions here}." + + Simply including a copy of this Agreement, including this Exhibit A + is not sufficient to license the Source Code under Secondary Licenses. + + If it is not possible or desirable to put the notice in a particular + file, then You may include the notice in a location (such as a LICENSE + file in a relevant directory) where a recipient would be likely to + look for such a notice. + + You may add additional accurate notices of copyright ownership. diff --git a/vendor/github.com/eclipse/paho.golang/packets/auth.go b/vendor/github.com/eclipse/paho.golang/packets/auth.go new file mode 100644 index 000000000..56237e00c --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/auth.go @@ -0,0 +1,77 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Auth is the Variable Header definition for a Auth control packet +type Auth struct { + Properties *Properties + ReasonCode byte +} + +// AuthSuccess is the return code for successful authentication +const ( + AuthSuccess = 0x00 + AuthContinueAuthentication = 0x18 + AuthReauthenticate = 0x19 +) + +func (a *Auth) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "AUTH: ReasonCode:%X", a.ReasonCode) + if a.Properties != nil { + fmt.Fprintf(&b, " Properties:\n%s", a.Properties) + } else { + fmt.Fprint(&b, "\n") + } + + return b.String() +} + +// Unpack is the implementation of the interface required function for a packet +func (a *Auth) Unpack(r *bytes.Buffer) error { + var err error + + success := r.Len() == 0 + noProps := r.Len() == 1 + if !success { + a.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + if !noProps { + err = a.Properties.Unpack(r, AUTH) + if err != nil { + return err + } + } + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (a *Auth) Buffers() net.Buffers { + idvp := a.Properties.Pack(AUTH) + propLen := encodeVBI(len(idvp)) + n := net.Buffers{[]byte{a.ReasonCode}, propLen} + if len(idvp) > 0 { + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (a *Auth) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: AUTH}} + cp.Content = a + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/connack.go b/vendor/github.com/eclipse/paho.golang/packets/connack.go new file mode 100644 index 000000000..3041fbcb5 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/connack.go @@ -0,0 +1,145 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Connack is the Variable Header definition for a connack control packet +type Connack struct { + Properties *Properties + ReasonCode byte + SessionPresent bool +} + +const ( + ConnackSuccess = 0x00 + ConnackUnspecifiedError = 0x80 + ConnackMalformedPacket = 0x81 + ConnackProtocolError = 0x81 + ConnackImplementationSpecificError = 0x83 + ConnackUnsupportedProtocolVersion = 0x84 + ConnackInvalidClientID = 0x85 + ConnackBadUsernameOrPassword = 0x86 + ConnackNotAuthorized = 0x87 + ConnackServerUnavailable = 0x88 + ConnackServerBusy = 0x89 + ConnackBanned = 0x8A + ConnackBadAuthenticationMethod = 0x8C + ConnackTopicNameInvalid = 0x90 + ConnackPacketTooLarge = 0x95 + ConnackQuotaExceeded = 0x97 + ConnackPayloadFormatInvalid = 0x99 + ConnackRetainNotSupported = 0x9A + ConnackQoSNotSupported = 0x9B + ConnackUseAnotherServer = 0x9C + ConnackServerMoved = 0x9D + ConnackConnectionRateExceeded = 0x9F +) + +func (c *Connack) String() string { + return fmt.Sprintf("CONNACK: ReasonCode:%d SessionPresent:%t\nProperties:\n%s", c.ReasonCode, c.SessionPresent, c.Properties) +} + +//Unpack is the implementation of the interface required function for a packet +func (c *Connack) Unpack(r *bytes.Buffer) error { + connackFlags, err := r.ReadByte() + if err != nil { + return err + } + c.SessionPresent = connackFlags&0x01 > 0 + + c.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + err = c.Properties.Unpack(r, CONNACK) + if err != nil { + return err + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (c *Connack) Buffers() net.Buffers { + var header bytes.Buffer + + if c.SessionPresent { + header.WriteByte(1) + } else { + header.WriteByte(0) + } + header.WriteByte(c.ReasonCode) + + idvp := c.Properties.Pack(CONNACK) + propLen := encodeVBI(len(idvp)) + + n := net.Buffers{header.Bytes(), propLen} + if len(idvp) > 0 { + n = append(n, idvp) + } + + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (c *Connack) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: CONNACK}} + cp.Content = c + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (c *Connack) Reason() string { + switch c.ReasonCode { + case 0: + return "Success - The Connection is accepted." + case 128: + return "Unspecified error - The Server does not wish to reveal the reason for the failure, or none of the other Reason Codes apply." + case 129: + return "Malformed Packet - Data within the CONNECT packet could not be correctly parsed." + case 130: + return "Protocol Error - Data in the CONNECT packet does not conform to this specification." + case 131: + return "Implementation specific error - The CONNECT is valid but is not accepted by this Server." + case 132: + return "Unsupported Protocol Version - The Server does not support the version of the MQTT protocol requested by the Client." + case 133: + return "Client Identifier not valid - The Client Identifier is a valid string but is not allowed by the Server." + case 134: + return "Bad User Name or Password - The Server does not accept the User Name or Password specified by the Client" + case 135: + return "Not authorized - The Client is not authorized to connect." + case 136: + return "Server unavailable - The MQTT Server is not available." + case 137: + return "Server busy - The Server is busy. Try again later." + case 138: + return "Banned - This Client has been banned by administrative action. Contact the server administrator." + case 140: + return "Bad authentication method - The authentication method is not supported or does not match the authentication method currently in use." + case 144: + return "Topic Name invalid - The Will Topic Name is not malformed, but is not accepted by this Server." + case 149: + return "Packet too large - The CONNECT packet exceeded the maximum permissible size." + case 151: + return "Quota exceeded - An implementation or administrative imposed limit has been exceeded." + case 154: + return "Retain not supported - The Server does not support retained messages, and Will Retain was set to 1." + case 155: + return "QoS not supported - The Server does not support the QoS set in Will QoS." + case 156: + return "Use another server - The Client should temporarily use another server." + case 157: + return "Server moved - The Client should permanently use another server." + case 159: + return "Connection rate exceeded - The connection rate limit has been exceeded." + } + + return "" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/connect.go b/vendor/github.com/eclipse/paho.golang/packets/connect.go new file mode 100644 index 000000000..31340f6bd --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/connect.go @@ -0,0 +1,189 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Connect is the Variable Header definition for a connect control packet +type Connect struct { + WillMessage []byte + Password []byte + Username string + ProtocolName string + ClientID string + WillTopic string + Properties *Properties + WillProperties *Properties + KeepAlive uint16 + ProtocolVersion byte + WillQOS byte + PasswordFlag bool + UsernameFlag bool + WillRetain bool + WillFlag bool + CleanStart bool +} + +func (c *Connect) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "CONNECT: ProtocolName:%s ProtocolVersion:%d ClientID:%s KeepAlive:%d CleanStart:%t", c.ProtocolName, c.ProtocolVersion, c.ClientID, c.KeepAlive, c.CleanStart) + if c.UsernameFlag { + fmt.Fprintf(&b, " Username:%s", c.Username) + } + if c.PasswordFlag { + fmt.Fprintf(&b, " Password:%s", c.Password) + } + fmt.Fprint(&b, "\n") + if c.WillFlag { + fmt.Fprintf(&b, " WillTopic:%s WillQOS:%d WillRetain:%t WillMessage:\n%s\n", c.WillTopic, c.WillQOS, c.WillRetain, c.WillMessage) + if c.WillProperties != nil { + fmt.Fprintf(&b, "WillProperties:\n%s", c.WillProperties) + } + } + if c.Properties != nil { + fmt.Fprintf(&b, "Properties:\n%s", c.Properties) + } + + return b.String() +} + +// PackFlags takes the Connect flags and packs them into the single byte +// representation used on the wire by MQTT +func (c *Connect) PackFlags() (f byte) { + if c.UsernameFlag { + f |= 0x01 << 7 + } + if c.PasswordFlag { + f |= 0x01 << 6 + } + if c.WillFlag { + f |= 0x01 << 2 + f |= c.WillQOS << 3 + if c.WillRetain { + f |= 0x01 << 5 + } + } + if c.CleanStart { + f |= 0x01 << 1 + } + return +} + +// UnpackFlags takes the wire byte representing the connect options flags +// and fills out the appropriate variables in the struct +func (c *Connect) UnpackFlags(b byte) { + c.CleanStart = 1&(b>>1) > 0 + c.WillFlag = 1&(b>>2) > 0 + c.WillQOS = 3 & (b >> 3) + c.WillRetain = 1&(b>>5) > 0 + c.PasswordFlag = 1&(b>>6) > 0 + c.UsernameFlag = 1&(b>>7) > 0 +} + +//Unpack is the implementation of the interface required function for a packet +func (c *Connect) Unpack(r *bytes.Buffer) error { + var err error + + if c.ProtocolName, err = readString(r); err != nil { + return err + } + + if c.ProtocolVersion, err = r.ReadByte(); err != nil { + return err + } + + flags, err := r.ReadByte() + if err != nil { + return err + } + c.UnpackFlags(flags) + + if c.KeepAlive, err = readUint16(r); err != nil { + return err + } + + err = c.Properties.Unpack(r, CONNECT) + if err != nil { + return err + } + + c.ClientID, err = readString(r) + if err != nil { + return err + } + + if c.WillFlag { + c.WillProperties = &Properties{} + err = c.WillProperties.Unpack(r, CONNECT) + if err != nil { + return err + } + c.WillTopic, err = readString(r) + if err != nil { + return err + } + c.WillMessage, err = readBinary(r) + if err != nil { + return err + } + } + + if c.UsernameFlag { + c.Username, err = readString(r) + if err != nil { + return err + } + } + + if c.PasswordFlag { + c.Password, err = readBinary(r) + if err != nil { + return err + } + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (c *Connect) Buffers() net.Buffers { + var cp bytes.Buffer + + writeString(c.ProtocolName, &cp) + cp.WriteByte(c.ProtocolVersion) + cp.WriteByte(c.PackFlags()) + writeUint16(c.KeepAlive, &cp) + idvp := c.Properties.Pack(CONNECT) + encodeVBIdirect(len(idvp), &cp) + cp.Write(idvp) + + writeString(c.ClientID, &cp) + if c.WillFlag { + willIdvp := c.WillProperties.Pack(CONNECT) + encodeVBIdirect(len(willIdvp), &cp) + cp.Write(willIdvp) + writeString(c.WillTopic, &cp) + writeBinary(c.WillMessage, &cp) + } + if c.UsernameFlag { + writeString(c.Username, &cp) + } + if c.PasswordFlag { + writeBinary(c.Password, &cp) + } + + return net.Buffers{cp.Bytes()} +} + +// WriteTo is the implementation of the interface required function for a packet +func (c *Connect) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: CONNECT}} + cp.Content = c + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/disconnect.go b/vendor/github.com/eclipse/paho.golang/packets/disconnect.go new file mode 100644 index 000000000..9180207a6 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/disconnect.go @@ -0,0 +1,152 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Disconnect is the Variable Header definition for a Disconnect control packet +type Disconnect struct { + Properties *Properties + ReasonCode byte +} + +func (d *Disconnect) String() string { + return fmt.Sprintf("DISCONNECT: ReasonCode:%X Properties\n%s", d.ReasonCode, d.Properties) +} + +// DisconnectNormalDisconnection, etc are the list of valid disconnection reason codes. +const ( + DisconnectNormalDisconnection = 0x00 + DisconnectDisconnectWithWillMessage = 0x04 + DisconnectUnspecifiedError = 0x80 + DisconnectMalformedPacket = 0x81 + DisconnectProtocolError = 0x82 + DisconnectImplementationSpecificError = 0x83 + DisconnectNotAuthorized = 0x87 + DisconnectServerBusy = 0x89 + DisconnectServerShuttingDown = 0x8B + DisconnectKeepAliveTimeout = 0x8D + DisconnectSessionTakenOver = 0x8E + DisconnectTopicFilterInvalid = 0x8F + DisconnectTopicNameInvalid = 0x90 + DisconnectReceiveMaximumExceeded = 0x93 + DisconnectTopicAliasInvalid = 0x94 + DisconnectPacketTooLarge = 0x95 + DisconnectMessageRateTooHigh = 0x96 + DisconnectQuotaExceeded = 0x97 + DisconnectAdministrativeAction = 0x98 + DisconnectPayloadFormatInvalid = 0x99 + DisconnectRetainNotSupported = 0x9A + DisconnectQoSNotSupported = 0x9B + DisconnectUseAnotherServer = 0x9C + DisconnectServerMoved = 0x9D + DisconnectSharedSubscriptionNotSupported = 0x9E + DisconnectConnectionRateExceeded = 0x9F + DisconnectMaximumConnectTime = 0xA0 + DisconnectSubscriptionIdentifiersNotSupported = 0xA1 + DisconnectWildcardSubscriptionsNotSupported = 0xA2 +) + +// Unpack is the implementation of the interface required function for a packet +func (d *Disconnect) Unpack(r *bytes.Buffer) error { + var err error + d.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + err = d.Properties.Unpack(r, DISCONNECT) + if err != nil { + return err + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (d *Disconnect) Buffers() net.Buffers { + idvp := d.Properties.Pack(DISCONNECT) + propLen := encodeVBI(len(idvp)) + n := net.Buffers{[]byte{d.ReasonCode}, propLen} + if len(idvp) > 0 { + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (d *Disconnect) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: DISCONNECT}} + cp.Content = d + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (d *Disconnect) Reason() string { + switch d.ReasonCode { + case 0: + return "Normal disconnection - Close the connection normally. Do not send the Will Message." + case 4: + return "Disconnect with Will Message - The Client wishes to disconnect but requires that the Server also publishes its Will Message." + case 128: + return "Unspecified error - The Connection is closed but the sender either does not wish to reveal the reason, or none of the other Reason Codes apply." + case 129: + return "Malformed Packet - The received packet does not conform to this specification." + case 130: + return "Protocol Error - An unexpected or out of order packet was received." + case 131: + return "Implementation specific error - The packet received is valid but cannot be processed by this implementation." + case 135: + return "Not authorized - The request is not authorized." + case 137: + return "Server busy - The Server is busy and cannot continue processing requests from this Client." + case 139: + return "Server shutting down - The Server is shutting down." + case 141: + return "Keep Alive timeout - The Connection is closed because no packet has been received for 1.5 times the Keepalive time." + case 142: + return "Session taken over - Another Connection using the same ClientID has connected causing this Connection to be closed." + case 143: + return "Topic Filter invalid - The Topic Filter is correctly formed, but is not accepted by this Sever." + case 144: + return "Topic Name invalid - The Topic Name is correctly formed, but is not accepted by this Client or Server." + case 147: + return "Receive Maximum exceeded - The Client or Server has received more than Receive Maximum publication for which it has not sent PUBACK or PUBCOMP." + case 148: + return "Topic Alias invalid - The Client or Server has received a PUBLISH packet containing a Topic Alias which is greater than the Maximum Topic Alias it sent in the CONNECT or CONNACK packet." + case 149: + return "Packet too large - The packet size is greater than Maximum Packet Size for this Client or Server." + case 150: + return "Message rate too high - The received data rate is too high." + case 151: + return "Quota exceeded - An implementation or administrative imposed limit has been exceeded." + case 152: + return "Administrative action - The Connection is closed due to an administrative action." + case 153: + return "Payload format invalid - The payload format does not match the one specified by the Payload Format Indicator." + case 154: + return "Retain not supported - The Server has does not support retained messages." + case 155: + return "QoS not supported - The Client specified a QoS greater than the QoS specified in a Maximum QoS in the CONNACK." + case 156: + return "Use another server - The Client should temporarily change its Server." + case 157: + return "Server moved - The Server is moved and the Client should permanently change its server location." + case 158: + return "Shared Subscription not supported - The Server does not support Shared Subscriptions." + case 159: + return "Connection rate exceeded - This connection is closed because the connection rate is too high." + case 160: + return "Maximum connect time - The maximum connection time authorized for this connection has been exceeded." + case 161: + return "Subscription Identifiers not supported - The Server does not support Subscription Identifiers; the subscription is not accepted." + case 162: + return "Wildcard subscriptions not supported - The Server does not support Wildcard subscription; the subscription is not accepted." + } + + return "" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/packets.go b/vendor/github.com/eclipse/paho.golang/packets/packets.go new file mode 100644 index 000000000..496594012 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/packets.go @@ -0,0 +1,447 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "sync" +) + +// PacketType is a type alias to byte representing the different +// MQTT control packet types +// type PacketType byte + +// The following consts are the packet type number for each of the +// different control packets in MQTT +const ( + _ byte = iota + CONNECT + CONNACK + PUBLISH + PUBACK + PUBREC + PUBREL + PUBCOMP + SUBSCRIBE + SUBACK + UNSUBSCRIBE + UNSUBACK + PINGREQ + PINGRESP + DISCONNECT + AUTH +) + +type ( + // Packet is the interface defining the unique parts of a controlpacket + Packet interface { + Unpack(*bytes.Buffer) error + Buffers() net.Buffers + WriteTo(io.Writer) (int64, error) + } + + // FixedHeader is the definition of a control packet fixed header + FixedHeader struct { + remainingLength int + Type byte + Flags byte + } + + // ControlPacket is the definition of a control packet + ControlPacket struct { + Content Packet + FixedHeader + } +) + +// NewThreadSafeConn wraps net.Conn with a mutex. ControlPacket uses it in +// WriteTo method to ensure parallel writes are thread-Safe. +func NewThreadSafeConn(c net.Conn) net.Conn { + type threadSafeConn struct { + net.Conn + sync.Locker + } + + return &threadSafeConn{ + Conn: c, + Locker: &sync.Mutex{}, + } +} + +// WriteTo operates on a FixedHeader and takes the option values and produces +// the wire format byte that represents these. +func (f *FixedHeader) WriteTo(w io.Writer) (int64, error) { + if _, err := w.Write([]byte{byte(f.Type)<<4 | f.Flags}); err != nil { + return 0, err + } + if _, err := w.Write(encodeVBI(f.remainingLength)); err != nil { + return 0, err + } + + return 0, nil +} + +// PacketID is a helper function that returns the value of the PacketID +// field from any kind of mqtt packet in the Content element +func (c *ControlPacket) PacketID() uint16 { + switch r := c.Content.(type) { + case *Publish: + return r.PacketID + case *Puback: + return r.PacketID + case *Pubrec: + return r.PacketID + case *Pubrel: + return r.PacketID + case *Pubcomp: + return r.PacketID + case *Subscribe: + return r.PacketID + case *Suback: + return r.PacketID + case *Unsubscribe: + return r.PacketID + case *Unsuback: + return r.PacketID + default: + return 0 + } +} + +func (c *ControlPacket) PacketType() string { + return [...]string{ + "", + "CONNECT", + "CONNACK", + "PUBLISH", + "PUBACK", + "PUBREC", + "PUBREL", + "PUBCOMP", + "SUBSCRIBE", + "SUBACK", + "UNSUBSCRIBE", + "UNSUBACK", + "PINGREQ", + "PINGRESP", + "DISCONNECT", + "AUTH", + }[c.FixedHeader.Type] +} + +// NewControlPacket takes a packetType and returns a pointer to a +// ControlPacket where the VariableHeader field is a pointer to an +// instance of a VariableHeader definition for that packetType +func NewControlPacket(t byte) *ControlPacket { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: t}} + switch t { + case CONNECT: + cp.Content = &Connect{ + ProtocolName: "MQTT", + ProtocolVersion: 5, + Properties: &Properties{}, + } + case CONNACK: + cp.Content = &Connack{Properties: &Properties{}} + case PUBLISH: + cp.Content = &Publish{Properties: &Properties{}} + case PUBACK: + cp.Content = &Puback{Properties: &Properties{}} + case PUBREC: + cp.Content = &Pubrec{Properties: &Properties{}} + case PUBREL: + cp.Flags = 2 + cp.Content = &Pubrel{Properties: &Properties{}} + case PUBCOMP: + cp.Content = &Pubcomp{Properties: &Properties{}} + case SUBSCRIBE: + cp.Flags = 2 + cp.Content = &Subscribe{ + Subscriptions: make(map[string]SubOptions), + Properties: &Properties{}, + } + case SUBACK: + cp.Content = &Suback{Properties: &Properties{}} + case UNSUBSCRIBE: + cp.Flags = 2 + cp.Content = &Unsubscribe{Properties: &Properties{}} + case UNSUBACK: + cp.Content = &Unsuback{Properties: &Properties{}} + case PINGREQ: + cp.Content = &Pingreq{} + case PINGRESP: + cp.Content = &Pingresp{} + case DISCONNECT: + cp.Content = &Disconnect{Properties: &Properties{}} + case AUTH: + cp.Flags = 1 + cp.Content = &Auth{Properties: &Properties{}} + default: + return nil + } + return cp +} + +// ReadPacket reads a control packet from a io.Reader and returns a completed +// struct with the appropriate data +func ReadPacket(r io.Reader) (*ControlPacket, error) { + t := [1]byte{} + _, err := io.ReadFull(r, t[:]) + if err != nil { + return nil, err + } + // cp := NewControlPacket(PacketType(t[0] >> 4)) + // if cp == nil { + // return nil, fmt.Errorf("invalid packet type requested, %d", t[0]>>4) + // } + + pt := t[0] >> 4 + cp := &ControlPacket{FixedHeader: FixedHeader{Type: pt}} + switch pt { + case CONNECT: + cp.Content = &Connect{ + ProtocolName: "MQTT", + ProtocolVersion: 5, + Properties: &Properties{}, + } + case CONNACK: + cp.Content = &Connack{Properties: &Properties{}} + case PUBLISH: + cp.Content = &Publish{Properties: &Properties{}} + case PUBACK: + cp.Content = &Puback{Properties: &Properties{}} + case PUBREC: + cp.Content = &Pubrec{Properties: &Properties{}} + case PUBREL: + cp.Flags = 2 + cp.Content = &Pubrel{Properties: &Properties{}} + case PUBCOMP: + cp.Content = &Pubcomp{Properties: &Properties{}} + case SUBSCRIBE: + cp.Flags = 2 + cp.Content = &Subscribe{ + Subscriptions: make(map[string]SubOptions), + Properties: &Properties{}, + } + case SUBACK: + cp.Content = &Suback{Properties: &Properties{}} + case UNSUBSCRIBE: + cp.Flags = 2 + cp.Content = &Unsubscribe{Properties: &Properties{}} + case UNSUBACK: + cp.Content = &Unsuback{Properties: &Properties{}} + case PINGREQ: + cp.Content = &Pingreq{} + case PINGRESP: + cp.Content = &Pingresp{} + case DISCONNECT: + cp.Content = &Disconnect{Properties: &Properties{}} + case AUTH: + cp.Flags = 1 + cp.Content = &Auth{Properties: &Properties{}} + default: + return nil, fmt.Errorf("unknown packet type %d requested", pt) + } + + cp.Flags = t[0] & 0xF + if cp.Type == PUBLISH { + cp.Content.(*Publish).QoS = (cp.Flags & 0x6) >> 1 + } + vbi, err := getVBI(r) + if err != nil { + return nil, err + } + cp.remainingLength, err = decodeVBI(vbi) + if err != nil { + return nil, err + } + + var content bytes.Buffer + content.Grow(cp.remainingLength) + + n, err := io.CopyN(&content, r, int64(cp.remainingLength)) + if err != nil { + return nil, err + } + + if n != int64(cp.remainingLength) { + return nil, fmt.Errorf("failed to read packet, expected %d bytes, read %d", cp.remainingLength, n) + } + err = cp.Content.Unpack(&content) + if err != nil { + return nil, err + } + return cp, nil +} + +// WriteTo writes a packet to an io.Writer, handling packing all the parts of +// a control packet. +func (c *ControlPacket) WriteTo(w io.Writer) (int64, error) { + buffers := c.Content.Buffers() + for _, b := range buffers { + c.remainingLength += len(b) + } + + var header bytes.Buffer + if _, err := c.FixedHeader.WriteTo(&header); err != nil { + return 0, err + } + + buffers = append(net.Buffers{header.Bytes()}, buffers...) + + if safe, ok := w.(sync.Locker); ok { + safe.Lock() + defer safe.Unlock() + } + return buffers.WriteTo(w) +} + +func encodeVBI(length int) []byte { + var x int + b := [4]byte{} + for { + digit := byte(length % 128) + length /= 128 + if length > 0 { + digit |= 0x80 + } + b[x] = digit + x++ + if length == 0 { + return b[:x] + } + } +} + +func encodeVBIdirect(length int, buf *bytes.Buffer) { + var x int + b := [4]byte{} + for { + digit := byte(length % 128) + length /= 128 + if length > 0 { + digit |= 0x80 + } + b[x] = digit + x++ + if length == 0 { + buf.Write(b[:x]) + return + } + } +} + +func getVBI(r io.Reader) (*bytes.Buffer, error) { + var ret bytes.Buffer + digit := [1]byte{} + for { + _, err := io.ReadFull(r, digit[:]) + if err != nil { + return nil, err + } + ret.WriteByte(digit[0]) + if digit[0] <= 0x7f { + return &ret, nil + } + } +} + +func decodeVBI(r *bytes.Buffer) (int, error) { + var vbi uint32 + var multiplier uint32 + for { + digit, err := r.ReadByte() + if err != nil && err != io.EOF { + return 0, err + } + vbi |= uint32(digit&127) << multiplier + if (digit & 128) == 0 { + break + } + multiplier += 7 + } + return int(vbi), nil +} + +func writeUint16(u uint16, b *bytes.Buffer) error { + if err := b.WriteByte(byte(u >> 8)); err != nil { + return err + } + return b.WriteByte(byte(u)) +} + +func writeUint32(u uint32, b *bytes.Buffer) error { + if err := b.WriteByte(byte(u >> 24)); err != nil { + return err + } + if err := b.WriteByte(byte(u >> 16)); err != nil { + return err + } + if err := b.WriteByte(byte(u >> 8)); err != nil { + return err + } + return b.WriteByte(byte(u)) +} + +func writeString(s string, b *bytes.Buffer) { + writeUint16(uint16(len(s)), b) + b.WriteString(s) +} + +func writeBinary(d []byte, b *bytes.Buffer) { + writeUint16(uint16(len(d)), b) + b.Write(d) +} + +func readUint16(b *bytes.Buffer) (uint16, error) { + b1, err := b.ReadByte() + if err != nil { + return 0, err + } + b2, err := b.ReadByte() + if err != nil { + return 0, err + } + return (uint16(b1) << 8) | uint16(b2), nil +} + +func readUint32(b *bytes.Buffer) (uint32, error) { + b1, err := b.ReadByte() + if err != nil { + return 0, err + } + b2, err := b.ReadByte() + if err != nil { + return 0, err + } + b3, err := b.ReadByte() + if err != nil { + return 0, err + } + b4, err := b.ReadByte() + if err != nil { + return 0, err + } + return (uint32(b1) << 24) | (uint32(b2) << 16) | (uint32(b3) << 8) | uint32(b4), nil +} + +func readBinary(b *bytes.Buffer) ([]byte, error) { + size, err := readUint16(b) + if err != nil { + return nil, err + } + + var s bytes.Buffer + s.Grow(int(size)) + if _, err := io.CopyN(&s, b, int64(size)); err != nil { + return nil, err + } + + return s.Bytes(), nil +} + +func readString(b *bytes.Buffer) (string, error) { + s, err := readBinary(b) + return string(s), err +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/pingreq.go b/vendor/github.com/eclipse/paho.golang/packets/pingreq.go new file mode 100644 index 000000000..85f30c2b5 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/pingreq.go @@ -0,0 +1,34 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Pingreq is the Variable Header definition for a Pingreq control packet +type Pingreq struct { +} + +func (p *Pingreq) String() string { + return fmt.Sprintf("PINGREQ") +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Pingreq) Unpack(r *bytes.Buffer) error { + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Pingreq) Buffers() net.Buffers { + return nil +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Pingreq) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PINGREQ}} + cp.Content = p + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/pingresp.go b/vendor/github.com/eclipse/paho.golang/packets/pingresp.go new file mode 100644 index 000000000..c110fc4dc --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/pingresp.go @@ -0,0 +1,34 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Pingresp is the Variable Header definition for a Pingresp control packet +type Pingresp struct { +} + +func (p *Pingresp) String() string { + return fmt.Sprintf("PINGRESP") +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Pingresp) Unpack(r *bytes.Buffer) error { + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Pingresp) Buffers() net.Buffers { + return nil +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Pingresp) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PINGRESP}} + cp.Content = p + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/properties.go b/vendor/github.com/eclipse/paho.golang/packets/properties.go new file mode 100644 index 000000000..fe1f5e22e --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/properties.go @@ -0,0 +1,804 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "strings" +) + +// PropPayloadFormat, etc are the list of property codes for the +// MQTT packet properties +const ( + PropPayloadFormat byte = 1 + PropMessageExpiry byte = 2 + PropContentType byte = 3 + PropResponseTopic byte = 8 + PropCorrelationData byte = 9 + PropSubscriptionIdentifier byte = 11 + PropSessionExpiryInterval byte = 17 + PropAssignedClientID byte = 18 + PropServerKeepAlive byte = 19 + PropAuthMethod byte = 21 + PropAuthData byte = 22 + PropRequestProblemInfo byte = 23 + PropWillDelayInterval byte = 24 + PropRequestResponseInfo byte = 25 + PropResponseInfo byte = 26 + PropServerReference byte = 28 + PropReasonString byte = 31 + PropReceiveMaximum byte = 33 + PropTopicAliasMaximum byte = 34 + PropTopicAlias byte = 35 + PropMaximumQOS byte = 36 + PropRetainAvailable byte = 37 + PropUser byte = 38 + PropMaximumPacketSize byte = 39 + PropWildcardSubAvailable byte = 40 + PropSubIDAvailable byte = 41 + PropSharedSubAvailable byte = 42 +) + +// User is a struct for the User properties, originally it was a map +// then it was pointed out that user properties are allowed to appear +// more than once +type User struct { + Key, Value string +} + +// Properties is a struct representing the all the described properties +// allowed by the MQTT protocol, determining the validity of a property +// relvative to the packettype it was received in is provided by the +// ValidateID function +type Properties struct { + // PayloadFormat indicates the format of the payload of the message + // 0 is unspecified bytes + // 1 is UTF8 encoded character data + PayloadFormat *byte + // MessageExpiry is the lifetime of the message in seconds + MessageExpiry *uint32 + // ContentType is a UTF8 string describing the content of the message + // for example it could be a MIME type + ContentType string + // ResponseTopic is a UTF8 string indicating the topic name to which any + // response to this message should be sent + ResponseTopic string + // CorrelationData is binary data used to associate future response + // messages with the original request message + CorrelationData []byte + // SubscriptionIdentifier is an identifier of the subscription to which + // the Publish matched + SubscriptionIdentifier *int + // SessionExpiryInterval is the time in seconds after a client disconnects + // that the server should retain the session information (subscriptions etc) + SessionExpiryInterval *uint32 + // AssignedClientID is the server assigned client identifier in the case + // that a client connected without specifying a clientID the server + // generates one and returns it in the Connack + AssignedClientID string + // ServerKeepAlive allows the server to specify in the Connack packet + // the time in seconds to be used as the keep alive value + ServerKeepAlive *uint16 + // AuthMethod is a UTF8 string containing the name of the authentication + // method to be used for extended authentication + AuthMethod string + // AuthData is binary data containing authentication data + AuthData []byte + // RequestProblemInfo is used by the Client to indicate to the server to + // include the Reason String and/or User Properties in case of failures + RequestProblemInfo *byte + // WillDelayInterval is the number of seconds the server waits after the + // point at which it would otherwise send the will message before sending + // it. The client reconnecting before that time expires causes the server + // to cancel sending the will + WillDelayInterval *uint32 + // RequestResponseInfo is used by the Client to request the Server provide + // Response Information in the Connack + RequestResponseInfo *byte + // ResponseInfo is a UTF8 encoded string that can be used as the basis for + // createing a Response Topic. The way in which the Client creates a + // Response Topic from the Response Information is not defined. A common + // use of this is to pass a globally unique portion of the topic tree which + // is reserved for this Client for at least the lifetime of its Session. This + // often cannot just be a random name as both the requesting Client and the + // responding Client need to be authorized to use it. It is normal to use this + // as the root of a topic tree for a particular Client. For the Server to + // return this information, it normally needs to be correctly configured. + // Using this mechanism allows this configuration to be done once in the + // Server rather than in each Client + ResponseInfo string + // ServerReference is a UTF8 string indicating another server the client + // can use + ServerReference string + // ReasonString is a UTF8 string representing the reason associated with + // this response, intended to be human readable for diagnostic purposes + ReasonString string + // ReceiveMaximum is the maximum number of QOS1 & 2 messages allowed to be + // 'inflight' (not having received a PUBACK/PUBCOMP response for) + ReceiveMaximum *uint16 + // TopicAliasMaximum is the highest value permitted as a Topic Alias + TopicAliasMaximum *uint16 + // TopicAlias is used in place of the topic string to reduce the size of + // packets for repeated messages on a topic + TopicAlias *uint16 + // MaximumQOS is the highest QOS level permitted for a Publish + MaximumQOS *byte + // RetainAvailable indicates whether the server supports messages with the + // retain flag set + RetainAvailable *byte + // User is a slice of user provided properties (key and value) + User []User + // MaximumPacketSize allows the client or server to specify the maximum packet + // size in bytes that they support + MaximumPacketSize *uint32 + // WildcardSubAvailable indicates whether wildcard subscriptions are permitted + WildcardSubAvailable *byte + // SubIDAvailable indicates whether subscription identifiers are supported + SubIDAvailable *byte + // SharedSubAvailable indicates whether shared subscriptions are supported + SharedSubAvailable *byte +} + +func (p *Properties) String() string { + var b strings.Builder + if p.PayloadFormat != nil { + fmt.Fprintf(&b, "\tPayloadFormat:%d\n", *p.PayloadFormat) + } + if p.MessageExpiry != nil { + fmt.Fprintf(&b, "\tMessageExpiry:%d\n", *p.MessageExpiry) + } + if p.ContentType != "" { + fmt.Fprintf(&b, "\tContentType:%s\n", p.ContentType) + } + if p.ResponseTopic != "" { + fmt.Fprintf(&b, "\tResponseTopic:%s\n", p.ResponseTopic) + } + if len(p.CorrelationData) > 0 { + fmt.Fprintf(&b, "\tCorrelationData:%X\n", p.CorrelationData) + } + if p.SubscriptionIdentifier != nil { + fmt.Fprintf(&b, "\tSubscriptionIdentifier:%d\n", *p.SubscriptionIdentifier) + } + if p.SessionExpiryInterval != nil { + fmt.Fprintf(&b, "\tSessionExpiryInterval:%d\n", *p.SessionExpiryInterval) + } + if p.AssignedClientID != "" { + fmt.Fprintf(&b, "\tAssignedClientID:%s\n", p.AssignedClientID) + } + if p.ServerKeepAlive != nil { + fmt.Fprintf(&b, "\tServerKeepAlive:%d\n", *p.ServerKeepAlive) + } + if p.AuthMethod != "" { + fmt.Fprintf(&b, "\tAuthMethod:%s\n", p.AuthMethod) + } + if len(p.AuthData) > 0 { + fmt.Fprintf(&b, "\tAuthData:%X\n", p.AuthData) + } + if p.RequestProblemInfo != nil { + fmt.Fprintf(&b, "\tRequestProblemInfo:%d\n", *p.RequestProblemInfo) + } + if p.WillDelayInterval != nil { + fmt.Fprintf(&b, "\tWillDelayInterval:%d\n", *p.WillDelayInterval) + } + if p.RequestResponseInfo != nil { + fmt.Fprintf(&b, "\tRequestResponseInfo:%d\n", *p.RequestResponseInfo) + } + if p.ServerReference != "" { + fmt.Fprintf(&b, "\tServerReference:%s\n", p.ServerReference) + } + if p.ReasonString != "" { + fmt.Fprintf(&b, "\tReasonString:%s\n", p.ReasonString) + } + if p.ReceiveMaximum != nil { + fmt.Fprintf(&b, "\tReceiveMaximum:%d\n", *p.ReceiveMaximum) + } + if p.TopicAliasMaximum != nil { + fmt.Fprintf(&b, "\tTopicAliasMaximum:%d\n", *p.TopicAliasMaximum) + } + if p.TopicAlias != nil { + fmt.Fprintf(&b, "\tTopicAlias:%d\n", *p.TopicAlias) + } + if p.MaximumQOS != nil { + fmt.Fprintf(&b, "\tMaximumQOS:%d\n", *p.MaximumQOS) + } + if p.RetainAvailable != nil { + fmt.Fprintf(&b, "\tRetainAvailable:%d\n", *p.RetainAvailable) + } + if p.MaximumPacketSize != nil { + fmt.Fprintf(&b, "\tMaximumPacketSize:%d\n", *p.MaximumPacketSize) + } + if p.WildcardSubAvailable != nil { + fmt.Fprintf(&b, "\tWildcardSubAvailable:%d\n", *p.WildcardSubAvailable) + } + if p.SubIDAvailable != nil { + fmt.Fprintf(&b, "\tSubIDAvailable:%d\n", *p.SubIDAvailable) + } + if p.SharedSubAvailable != nil { + fmt.Fprintf(&b, "\tSharedSubAvailable:%d\n", *p.SharedSubAvailable) + } + if len(p.User) > 0 { + fmt.Fprint(&b, "\tUser Properties:\n") + for _, v := range p.User { + fmt.Fprintf(&b, "\t\t%s:%s\n", v.Key, v.Value) + } + } + + return b.String() +} + +// Pack takes all the defined properties for an Properties and produces +// a slice of bytes representing the wire format for the information +func (i *Properties) Pack(p byte) []byte { + var b bytes.Buffer + + if i == nil { + return nil + } + + if p == PUBLISH { + if i.PayloadFormat != nil { + b.WriteByte(PropPayloadFormat) + b.WriteByte(*i.PayloadFormat) + } + + if i.MessageExpiry != nil { + b.WriteByte(PropMessageExpiry) + writeUint32(*i.MessageExpiry, &b) + } + + if i.ContentType != "" { + b.WriteByte(PropContentType) + writeString(i.ContentType, &b) + } + + if i.ResponseTopic != "" { + b.WriteByte(PropResponseTopic) + writeString(i.ResponseTopic, &b) + } + + if len(i.CorrelationData) > 0 { + b.WriteByte(PropCorrelationData) + writeBinary(i.CorrelationData, &b) + } + + if i.TopicAlias != nil { + b.WriteByte(PropTopicAlias) + writeUint16(*i.TopicAlias, &b) + } + } + + if p == PUBLISH || p == SUBSCRIBE { + if i.SubscriptionIdentifier != nil { + b.WriteByte(PropSubscriptionIdentifier) + encodeVBIdirect(*i.SubscriptionIdentifier, &b) + } + } + + if p == CONNECT || p == CONNACK { + if i.ReceiveMaximum != nil { + b.WriteByte(PropReceiveMaximum) + writeUint16(*i.ReceiveMaximum, &b) + } + + if i.TopicAliasMaximum != nil { + b.WriteByte(PropTopicAliasMaximum) + writeUint16(*i.TopicAliasMaximum, &b) + } + + if i.MaximumQOS != nil { + b.WriteByte(PropMaximumQOS) + b.WriteByte(*i.MaximumQOS) + } + + if i.MaximumPacketSize != nil { + b.WriteByte(PropMaximumPacketSize) + writeUint32(*i.MaximumPacketSize, &b) + } + } + + if p == CONNACK { + if i.AssignedClientID != "" { + b.WriteByte(PropAssignedClientID) + writeString(i.AssignedClientID, &b) + } + + if i.ServerKeepAlive != nil { + b.WriteByte(PropServerKeepAlive) + writeUint16(*i.ServerKeepAlive, &b) + } + + if i.WildcardSubAvailable != nil { + b.WriteByte(PropWildcardSubAvailable) + b.WriteByte(*i.WildcardSubAvailable) + } + + if i.SubIDAvailable != nil { + b.WriteByte(PropSubIDAvailable) + b.WriteByte(*i.SubIDAvailable) + } + + if i.SharedSubAvailable != nil { + b.WriteByte(PropSharedSubAvailable) + b.WriteByte(*i.SharedSubAvailable) + } + + if i.RetainAvailable != nil { + b.WriteByte(PropRetainAvailable) + b.WriteByte(*i.RetainAvailable) + } + + if i.ResponseInfo != "" { + b.WriteByte(PropResponseInfo) + writeString(i.ResponseInfo, &b) + } + } + + if p == CONNECT { + if i.RequestProblemInfo != nil { + b.WriteByte(PropRequestProblemInfo) + b.WriteByte(*i.RequestProblemInfo) + } + + if i.WillDelayInterval != nil { + b.WriteByte(PropWillDelayInterval) + writeUint32(*i.WillDelayInterval, &b) + } + + if i.RequestResponseInfo != nil { + b.WriteByte(PropRequestResponseInfo) + b.WriteByte(*i.RequestResponseInfo) + } + } + + if p == CONNECT || p == CONNACK || p == DISCONNECT { + if i.SessionExpiryInterval != nil { + b.WriteByte(PropSessionExpiryInterval) + writeUint32(*i.SessionExpiryInterval, &b) + } + } + + if p == CONNECT || p == CONNACK || p == AUTH { + if i.AuthMethod != "" { + b.WriteByte(PropAuthMethod) + writeString(i.AuthMethod, &b) + } + + if i.AuthData != nil && len(i.AuthData) > 0 { + b.WriteByte(PropAuthData) + writeBinary(i.AuthData, &b) + } + } + + if p == CONNACK || p == DISCONNECT { + if i.ServerReference != "" { + b.WriteByte(PropServerReference) + writeString(i.ServerReference, &b) + } + } + + if p != CONNECT { + if i.ReasonString != "" { + b.WriteByte(PropReasonString) + writeString(i.ReasonString, &b) + } + } + + for _, v := range i.User { + b.WriteByte(PropUser) + writeString(v.Key, &b) + writeString(v.Value, &b) + } + + return b.Bytes() +} + +// PackBuf will create a bytes.Buffer of the packed properties, it +// will only pack the properties appropriate to the packet type p +// even though other properties may exist, it will silently ignore +// them +func (i *Properties) PackBuf(p byte) *bytes.Buffer { + var b bytes.Buffer + + if i == nil { + return nil + } + + if p == PUBLISH { + if i.PayloadFormat != nil { + b.WriteByte(PropPayloadFormat) + b.WriteByte(*i.PayloadFormat) + } + + if i.MessageExpiry != nil { + b.WriteByte(PropMessageExpiry) + writeUint32(*i.MessageExpiry, &b) + } + + if i.ContentType != "" { + b.WriteByte(PropContentType) + writeString(i.ContentType, &b) + } + + if i.ResponseTopic != "" { + b.WriteByte(PropResponseTopic) + writeString(i.ResponseTopic, &b) + } + + if i.CorrelationData != nil && len(i.CorrelationData) > 0 { + b.WriteByte(PropCorrelationData) + writeBinary(i.CorrelationData, &b) + } + + if i.TopicAlias != nil { + b.WriteByte(PropTopicAlias) + writeUint16(*i.TopicAlias, &b) + } + } + + if p == PUBLISH || p == SUBSCRIBE { + if i.SubscriptionIdentifier != nil { + b.WriteByte(PropSubscriptionIdentifier) + encodeVBIdirect(*i.SubscriptionIdentifier, &b) + } + } + + if p == CONNECT || p == CONNACK { + if i.ReceiveMaximum != nil { + b.WriteByte(PropReceiveMaximum) + writeUint16(*i.ReceiveMaximum, &b) + } + + if i.TopicAliasMaximum != nil { + b.WriteByte(PropTopicAliasMaximum) + writeUint16(*i.TopicAliasMaximum, &b) + } + + if i.MaximumQOS != nil { + b.WriteByte(PropMaximumQOS) + b.WriteByte(*i.MaximumQOS) + } + + if i.MaximumPacketSize != nil { + b.WriteByte(PropMaximumPacketSize) + writeUint32(*i.MaximumPacketSize, &b) + } + } + + if p == CONNACK { + if i.AssignedClientID != "" { + b.WriteByte(PropAssignedClientID) + writeString(i.AssignedClientID, &b) + } + + if i.ServerKeepAlive != nil { + b.WriteByte(PropServerKeepAlive) + writeUint16(*i.ServerKeepAlive, &b) + } + + if i.WildcardSubAvailable != nil { + b.WriteByte(PropWildcardSubAvailable) + b.WriteByte(*i.WildcardSubAvailable) + } + + if i.SubIDAvailable != nil { + b.WriteByte(PropSubIDAvailable) + b.WriteByte(*i.SubIDAvailable) + } + + if i.SharedSubAvailable != nil { + b.WriteByte(PropSharedSubAvailable) + b.WriteByte(*i.SharedSubAvailable) + } + + if i.RetainAvailable != nil { + b.WriteByte(PropRetainAvailable) + b.WriteByte(*i.RetainAvailable) + } + + if i.ResponseInfo != "" { + b.WriteByte(PropResponseInfo) + writeString(i.ResponseInfo, &b) + } + } + + if p == CONNECT { + if i.RequestProblemInfo != nil { + b.WriteByte(PropRequestProblemInfo) + b.WriteByte(*i.RequestProblemInfo) + } + + if i.WillDelayInterval != nil { + b.WriteByte(PropWillDelayInterval) + writeUint32(*i.WillDelayInterval, &b) + } + + if i.RequestResponseInfo != nil { + b.WriteByte(PropRequestResponseInfo) + b.WriteByte(*i.RequestResponseInfo) + } + } + + if p == CONNECT || p == CONNACK || p == DISCONNECT { + if i.SessionExpiryInterval != nil { + b.WriteByte(PropSessionExpiryInterval) + writeUint32(*i.SessionExpiryInterval, &b) + } + } + + if p == CONNECT || p == CONNACK || p == AUTH { + if i.AuthMethod != "" { + b.WriteByte(PropAuthMethod) + writeString(i.AuthMethod, &b) + } + + if i.AuthData != nil && len(i.AuthData) > 0 { + b.WriteByte(PropAuthData) + writeBinary(i.AuthData, &b) + } + } + + if p == CONNACK || p == DISCONNECT { + if i.ServerReference != "" { + b.WriteByte(PropServerReference) + writeString(i.ServerReference, &b) + } + } + + if p != CONNECT { + if i.ReasonString != "" { + b.WriteByte(PropReasonString) + writeString(i.ReasonString, &b) + } + } + + for _, v := range i.User { + b.WriteByte(PropUser) + writeString(v.Key, &b) + writeString(v.Value, &b) + } + + return &b +} + +// Unpack takes a buffer of bytes and reads out the defined properties +// filling in the appropriate entries in the struct, it returns the number +// of bytes used to store the Prop data and any error in decoding them +func (i *Properties) Unpack(r *bytes.Buffer, p byte) error { + vbi, err := getVBI(r) + if err != nil { + return err + } + size, err := decodeVBI(vbi) + if err != nil { + return err + } + if size == 0 { + return nil + } + + buf := bytes.NewBuffer(r.Next(size)) + for { + PropType, err := buf.ReadByte() + if err != nil && err != io.EOF { + return err + } + if err == io.EOF { + break + } + if !ValidateID(p, PropType) { + return fmt.Errorf("invalid Prop type %d for packet %d", PropType, p) + } + switch PropType { + case PropPayloadFormat: + pf, err := buf.ReadByte() + if err != nil { + return err + } + i.PayloadFormat = &pf + case PropMessageExpiry: + pe, err := readUint32(buf) + if err != nil { + return err + } + i.MessageExpiry = &pe + case PropContentType: + ct, err := readString(buf) + if err != nil { + return err + } + i.ContentType = ct + case PropResponseTopic: + tr, err := readString(buf) + if err != nil { + return err + } + i.ResponseTopic = tr + case PropCorrelationData: + cd, err := readBinary(buf) + if err != nil { + return err + } + i.CorrelationData = cd + case PropSubscriptionIdentifier: + si, err := decodeVBI(buf) + if err != nil { + return err + } + i.SubscriptionIdentifier = &si + case PropSessionExpiryInterval: + se, err := readUint32(buf) + if err != nil { + return err + } + i.SessionExpiryInterval = &se + case PropAssignedClientID: + ac, err := readString(buf) + if err != nil { + return err + } + i.AssignedClientID = ac + case PropServerKeepAlive: + sk, err := readUint16(buf) + if err != nil { + return err + } + i.ServerKeepAlive = &sk + case PropAuthMethod: + am, err := readString(buf) + if err != nil { + return err + } + i.AuthMethod = am + case PropAuthData: + ad, err := readBinary(buf) + if err != nil { + return err + } + i.AuthData = ad + case PropRequestProblemInfo: + rp, err := buf.ReadByte() + if err != nil { + return err + } + i.RequestProblemInfo = &rp + case PropWillDelayInterval: + wd, err := readUint32(buf) + if err != nil { + return err + } + i.WillDelayInterval = &wd + case PropRequestResponseInfo: + rp, err := buf.ReadByte() + if err != nil { + return err + } + i.RequestResponseInfo = &rp + case PropResponseInfo: + ri, err := readString(buf) + if err != nil { + return err + } + i.ResponseInfo = ri + case PropServerReference: + sr, err := readString(buf) + if err != nil { + return err + } + i.ServerReference = sr + case PropReasonString: + rs, err := readString(buf) + if err != nil { + return err + } + i.ReasonString = rs + case PropReceiveMaximum: + rm, err := readUint16(buf) + if err != nil { + return err + } + i.ReceiveMaximum = &rm + case PropTopicAliasMaximum: + ta, err := readUint16(buf) + if err != nil { + return err + } + i.TopicAliasMaximum = &ta + case PropTopicAlias: + ta, err := readUint16(buf) + if err != nil { + return err + } + i.TopicAlias = &ta + case PropMaximumQOS: + mq, err := buf.ReadByte() + if err != nil { + return err + } + i.MaximumQOS = &mq + case PropRetainAvailable: + ra, err := buf.ReadByte() + if err != nil { + return err + } + i.RetainAvailable = &ra + case PropUser: + k, err := readString(buf) + if err != nil { + return err + } + v, err := readString(buf) + if err != nil { + return err + } + i.User = append(i.User, User{k, v}) + case PropMaximumPacketSize: + mp, err := readUint32(buf) + if err != nil { + return err + } + i.MaximumPacketSize = &mp + case PropWildcardSubAvailable: + ws, err := buf.ReadByte() + if err != nil { + return err + } + i.WildcardSubAvailable = &ws + case PropSubIDAvailable: + si, err := buf.ReadByte() + if err != nil { + return err + } + i.SubIDAvailable = &si + case PropSharedSubAvailable: + ss, err := buf.ReadByte() + if err != nil { + return err + } + i.SharedSubAvailable = &ss + default: + return fmt.Errorf("unknown Prop type %d", PropType) + } + } + + return nil +} + +// ValidProperties is a map of the various properties and the +// PacketTypes that property is valid for. +var ValidProperties = map[byte]map[byte]struct{}{ + PropPayloadFormat: {PUBLISH: {}}, + PropMessageExpiry: {PUBLISH: {}}, + PropContentType: {PUBLISH: {}}, + PropResponseTopic: {PUBLISH: {}}, + PropCorrelationData: {PUBLISH: {}}, + PropTopicAlias: {PUBLISH: {}}, + PropSubscriptionIdentifier: {PUBLISH: {}, SUBSCRIBE: {}}, + PropSessionExpiryInterval: {CONNECT: {}, CONNACK: {}, DISCONNECT: {}}, + PropAssignedClientID: {CONNACK: {}}, + PropServerKeepAlive: {CONNACK: {}}, + PropWildcardSubAvailable: {CONNACK: {}}, + PropSubIDAvailable: {CONNACK: {}}, + PropSharedSubAvailable: {CONNACK: {}}, + PropRetainAvailable: {CONNACK: {}}, + PropResponseInfo: {CONNACK: {}}, + PropAuthMethod: {CONNECT: {}, CONNACK: {}, AUTH: {}}, + PropAuthData: {CONNECT: {}, CONNACK: {}, AUTH: {}}, + PropRequestProblemInfo: {CONNECT: {}}, + PropWillDelayInterval: {CONNECT: {}}, + PropRequestResponseInfo: {CONNECT: {}}, + PropServerReference: {CONNACK: {}, DISCONNECT: {}}, + PropReasonString: {CONNACK: {}, PUBACK: {}, PUBREC: {}, PUBREL: {}, PUBCOMP: {}, SUBACK: {}, UNSUBACK: {}, DISCONNECT: {}, AUTH: {}}, + PropReceiveMaximum: {CONNECT: {}, CONNACK: {}}, + PropTopicAliasMaximum: {CONNECT: {}, CONNACK: {}}, + PropMaximumQOS: {CONNECT: {}, CONNACK: {}}, + PropMaximumPacketSize: {CONNECT: {}, CONNACK: {}}, + PropUser: {CONNECT: {}, CONNACK: {}, PUBLISH: {}, PUBACK: {}, PUBREC: {}, PUBREL: {}, PUBCOMP: {}, SUBSCRIBE: {}, UNSUBSCRIBE: {}, SUBACK: {}, UNSUBACK: {}, DISCONNECT: {}, AUTH: {}}, +} + +// ValidateID takes a PacketType and a property name and returns +// a boolean indicating if that property is valid for that +// PacketType +func ValidateID(p byte, i byte) bool { + _, ok := ValidProperties[i][p] + return ok +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/puback.go b/vendor/github.com/eclipse/paho.golang/packets/puback.go new file mode 100644 index 000000000..67f404ce6 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/puback.go @@ -0,0 +1,115 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Puback is the Variable Header definition for a Puback control packet +type Puback struct { + Properties *Properties + PacketID uint16 + ReasonCode byte +} + +// PubackSuccess, etc are the list of valid puback reason codes. +const ( + PubackSuccess = 0x00 + PubackNoMatchingSubscribers = 0x10 + PubackUnspecifiedError = 0x80 + PubackImplementationSpecificError = 0x83 + PubackNotAuthorized = 0x87 + PubackTopicNameInvalid = 0x90 + PubackPacketIdentifierInUse = 0x91 + PubackQuotaExceeded = 0x97 + PubackPayloadFormatInvalid = 0x99 +) + +func (p *Puback) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "PUBACK: PacketID:%d ReasonCode:%X", p.PacketID, p.ReasonCode) + if p.Properties != nil { + fmt.Fprintf(&b, " Properties:\n%s", p.Properties) + } else { + fmt.Fprint(&b, "\n") + } + + return b.String() +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Puback) Unpack(r *bytes.Buffer) error { + var err error + success := r.Len() == 2 + noProps := r.Len() == 3 + p.PacketID, err = readUint16(r) + if err != nil { + return err + } + if !success { + p.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + if !noProps { + err = p.Properties.Unpack(r, PUBACK) + if err != nil { + return err + } + } + } + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Puback) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(p.PacketID, &b) + b.WriteByte(p.ReasonCode) + idvp := p.Properties.Pack(PUBACK) + propLen := encodeVBI(len(idvp)) + n := net.Buffers{b.Bytes(), propLen} + if len(idvp) > 0 { + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Puback) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PUBACK}} + cp.Content = p + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (p *Puback) Reason() string { + switch p.ReasonCode { + case 0: + return "The message is accepted. Publication of the QoS 1 message proceeds." + case 16: + return "The message is accepted but there are no subscribers. This is sent only by the Server. If the Server knows that there are no matching subscribers, it MAY use this Reason Code instead of 0x00 (Success)." + case 128: + return "The receiver does not accept the publish but either does not want to reveal the reason, or it does not match one of the other values." + case 131: + return "The PUBLISH is valid but the receiver is not willing to accept it." + case 135: + return "The PUBLISH is not authorized." + case 144: + return "The Topic Name is not malformed, but is not accepted by this Client or Server." + case 145: + return "The Packet Identifier is already in use. This might indicate a mismatch in the Session State between the Client and Server." + case 151: + return "An implementation or administrative imposed limit has been exceeded." + case 153: + return "The payload format does not match the specified Payload Format Indicator." + } + + return "" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/pubcomp.go b/vendor/github.com/eclipse/paho.golang/packets/pubcomp.go new file mode 100644 index 000000000..1cdfe61e9 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/pubcomp.go @@ -0,0 +1,95 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Pubcomp is the Variable Header definition for a Pubcomp control packet +type Pubcomp struct { + Properties *Properties + PacketID uint16 + ReasonCode byte +} + +// PubcompSuccess, etc are the list of valid pubcomp reason codes. +const ( + PubcompSuccess = 0x00 + PubcompPacketIdentifierNotFound = 0x92 +) + +func (p *Pubcomp) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "PUBCOMP: ReasonCode:%X PacketID:%d", p.ReasonCode, p.PacketID) + if p.Properties != nil { + fmt.Fprintf(&b, " Properties:\n%s", p.Properties) + } else { + fmt.Fprint(&b, "\n") + } + + return b.String() +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Pubcomp) Unpack(r *bytes.Buffer) error { + var err error + success := r.Len() == 2 + noProps := r.Len() == 3 + p.PacketID, err = readUint16(r) + if err != nil { + return err + } + if !success { + p.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + if !noProps { + err = p.Properties.Unpack(r, PUBACK) + if err != nil { + return err + } + } + } + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Pubcomp) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(p.PacketID, &b) + b.WriteByte(p.ReasonCode) + n := net.Buffers{b.Bytes()} + idvp := p.Properties.Pack(PUBCOMP) + propLen := encodeVBI(len(idvp)) + if len(idvp) > 0 { + n = append(n, propLen) + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Pubcomp) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PUBCOMP}} + cp.Content = p + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (p *Pubcomp) Reason() string { + switch p.ReasonCode { + case 0: + return "Success - Packet Identifier released. Publication of QoS 2 message is complete." + case 146: + return "Packet Identifier not found - The Packet Identifier is not known. This is not an error during recovery, but at other times indicates a mismatch between the Session State on the Client and Server." + } + + return "" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/publish.go b/vendor/github.com/eclipse/paho.golang/packets/publish.go new file mode 100644 index 000000000..ef834b7b9 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/publish.go @@ -0,0 +1,80 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net" +) + +// Publish is the Variable Header definition for a publish control packet +type Publish struct { + Payload []byte + Topic string + Properties *Properties + PacketID uint16 + QoS byte + Duplicate bool + Retain bool +} + +func (p *Publish) String() string { + return fmt.Sprintf("PUBLISH: PacketID:%d QOS:%d Topic:%s Duplicate:%t Retain:%t Payload:\n%s\nProperties\n%s", p.PacketID, p.QoS, p.Topic, p.Duplicate, p.Retain, string(p.Payload), p.Properties) +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Publish) Unpack(r *bytes.Buffer) error { + var err error + p.Topic, err = readString(r) + if err != nil { + return err + } + if p.QoS > 0 { + p.PacketID, err = readUint16(r) + if err != nil { + return err + } + } + + err = p.Properties.Unpack(r, PUBLISH) + if err != nil { + return err + } + + p.Payload, err = ioutil.ReadAll(r) + if err != nil { + return err + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Publish) Buffers() net.Buffers { + var b bytes.Buffer + writeString(p.Topic, &b) + if p.QoS > 0 { + _ = writeUint16(p.PacketID, &b) + } + idvp := p.Properties.Pack(PUBLISH) + encodeVBIdirect(len(idvp), &b) + return net.Buffers{b.Bytes(), idvp, p.Payload} + +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Publish) WriteTo(w io.Writer) (int64, error) { + f := p.QoS << 1 + if p.Duplicate { + f |= 1 << 3 + } + if p.Retain { + f |= 1 + } + + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PUBLISH, Flags: f}} + cp.Content = p + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/pubrec.go b/vendor/github.com/eclipse/paho.golang/packets/pubrec.go new file mode 100644 index 000000000..c3820191a --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/pubrec.go @@ -0,0 +1,117 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Pubrec is the Variable Header definition for a Pubrec control packet +type Pubrec struct { + Properties *Properties + PacketID uint16 + ReasonCode byte +} + +// PubrecSuccess, etc are the list of valid Pubrec reason codes +const ( + PubrecSuccess = 0x00 + PubrecNoMatchingSubscribers = 0x10 + PubrecUnspecifiedError = 0x80 + PubrecImplementationSpecificError = 0x83 + PubrecNotAuthorized = 0x87 + PubrecTopicNameInvalid = 0x90 + PubrecPacketIdentifierInUse = 0x91 + PubrecQuotaExceeded = 0x97 + PubrecPayloadFormatInvalid = 0x99 +) + +func (p *Pubrec) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "PUBREC: ReasonCode:%X PacketID:%d", p.ReasonCode, p.PacketID) + if p.Properties != nil { + fmt.Fprintf(&b, " Properties:\n%s", p.Properties) + } else { + fmt.Fprint(&b, "\n") + } + + return b.String() +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Pubrec) Unpack(r *bytes.Buffer) error { + var err error + success := r.Len() == 2 + noProps := r.Len() == 3 + p.PacketID, err = readUint16(r) + if err != nil { + return err + } + if !success { + p.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + if !noProps { + err = p.Properties.Unpack(r, PUBACK) + if err != nil { + return err + } + } + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Pubrec) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(p.PacketID, &b) + b.WriteByte(p.ReasonCode) + n := net.Buffers{b.Bytes()} + idvp := p.Properties.Pack(PUBREC) + propLen := encodeVBI(len(idvp)) + if len(idvp) > 0 { + n = append(n, propLen) + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Pubrec) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PUBREC}} + cp.Content = p + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (p *Pubrec) Reason() string { + switch p.ReasonCode { + case 0: + return "Success - The message is accepted. Publication of the QoS 2 message proceeds." + case 16: + return "No matching subscribers. - The message is accepted but there are no subscribers. This is sent only by the Server. If the Server knows that case there are no matching subscribers, it MAY use this Reason Code instead of 0x00 (Success)" + case 128: + return "Unspecified error - The receiver does not accept the publish but either does not want to reveal the reason, or it does not match one of the other values." + case 131: + return "Implementation specific error - The PUBLISH is valid but the receiver is not willing to accept it." + case 135: + return "Not authorized - The PUBLISH is not authorized." + case 144: + return "Topic Name invalid - The Topic Name is not malformed, but is not accepted by this Client or Server." + case 145: + return "Packet Identifier in use - The Packet Identifier is already in use. This might indicate a mismatch in the Session State between the Client and Server." + case 151: + return "Quota exceeded - An implementation or administrative imposed limit has been exceeded." + case 153: + return "Payload format invalid - The payload format does not match the one specified in the Payload Format Indicator." + } + + return "" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/pubrel.go b/vendor/github.com/eclipse/paho.golang/packets/pubrel.go new file mode 100644 index 000000000..27c48c240 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/pubrel.go @@ -0,0 +1,77 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Pubrel is the Variable Header definition for a Pubrel control packet +type Pubrel struct { + Properties *Properties + PacketID uint16 + ReasonCode byte +} + +func (p *Pubrel) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "PUBREL: ReasonCode:%X PacketID:%d", p.ReasonCode, p.PacketID) + if p.Properties != nil { + fmt.Fprintf(&b, " Properties:\n%s", p.Properties) + } else { + fmt.Fprint(&b, "\n") + } + + return b.String() +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Pubrel) Unpack(r *bytes.Buffer) error { + var err error + success := r.Len() == 2 + noProps := r.Len() == 3 + p.PacketID, err = readUint16(r) + if err != nil { + return err + } + if !success { + p.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + if !noProps { + err = p.Properties.Unpack(r, PUBACK) + if err != nil { + return err + } + } + } + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Pubrel) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(p.PacketID, &b) + b.WriteByte(p.ReasonCode) + n := net.Buffers{b.Bytes()} + idvp := p.Properties.Pack(PUBREL) + propLen := encodeVBI(len(idvp)) + if len(idvp) > 0 { + n = append(n, propLen) + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Pubrel) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PUBREL, Flags: 2}} + cp.Content = p + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/suback.go b/vendor/github.com/eclipse/paho.golang/packets/suback.go new file mode 100644 index 000000000..2503aaf1a --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/suback.go @@ -0,0 +1,103 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Suback is the Variable Header definition for a Suback control packet +type Suback struct { + Properties *Properties + Reasons []byte + PacketID uint16 +} + +func (s *Suback) String() string { + return fmt.Sprintf("SUBACK: ReasonCode:%v PacketID:%d Properties:\n%s", s.Reasons, s.PacketID, s.Properties) +} + +// SubackGrantedQoS0, etc are the list of valid suback reason codes. +const ( + SubackGrantedQoS0 = 0x00 + SubackGrantedQoS1 = 0x01 + SubackGrantedQoS2 = 0x02 + SubackUnspecifiederror = 0x80 + SubackImplementationspecificerror = 0x83 + SubackNotauthorized = 0x87 + SubackTopicFilterinvalid = 0x8F + SubackPacketIdentifierinuse = 0x91 + SubackQuotaexceeded = 0x97 + SubackSharedSubscriptionnotsupported = 0x9E + SubackSubscriptionIdentifiersnotsupported = 0xA1 + SubackWildcardsubscriptionsnotsupported = 0xA2 +) + +//Unpack is the implementation of the interface required function for a packet +func (s *Suback) Unpack(r *bytes.Buffer) error { + var err error + s.PacketID, err = readUint16(r) + if err != nil { + return err + } + + err = s.Properties.Unpack(r, SUBACK) + if err != nil { + return err + } + + s.Reasons = r.Bytes() + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (s *Suback) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(s.PacketID, &b) + idvp := s.Properties.Pack(SUBACK) + propLen := encodeVBI(len(idvp)) + return net.Buffers{b.Bytes(), propLen, idvp, s.Reasons} +} + +// WriteTo is the implementation of the interface required function for a packet +func (s *Suback) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: SUBACK}} + cp.Content = s + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (s *Suback) Reason(index int) string { + if index >= 0 && index < len(s.Reasons) { + switch s.Reasons[index] { + case 0: + return "Granted QoS 0 - The subscription is accepted and the maximum QoS sent will be QoS 0. This might be a lower QoS than was requested." + case 1: + return "Granted QoS 1 - The subscription is accepted and the maximum QoS sent will be QoS 1. This might be a lower QoS than was requested." + case 2: + return "Granted QoS 2 - The subscription is accepted and any received QoS will be sent to this subscription." + case 128: + return "Unspecified error - The subscription is not accepted and the Server either does not wish to reveal the reason or none of the other Reason Codes apply." + case 131: + return "Implementation specific error - The SUBSCRIBE is valid but the Server does not accept it." + case 135: + return "Not authorized - The Client is not authorized to make this subscription." + case 143: + return "Topic Filter invalid - The Topic Filter is correctly formed but is not allowed for this Client." + case 145: + return "Packet Identifier in use - The specified Packet Identifier is already in use." + case 151: + return "Quota exceeded - An implementation or administrative imposed limit has been exceeded." + case 158: + return "Shared Subscription not supported - The Server does not support Shared Subscriptions for this Client." + case 161: + return "Subscription Identifiers not supported - The Server does not support Subscription Identifiers; the subscription is not accepted." + case 162: + return "Wildcard subscriptions not supported - The Server does not support Wildcard subscription; the subscription is not accepted." + } + } + return "Invalid Reason index" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/subscribe.go b/vendor/github.com/eclipse/paho.golang/packets/subscribe.go new file mode 100644 index 000000000..3f457a28a --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/subscribe.go @@ -0,0 +1,116 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Subscribe is the Variable Header definition for a Subscribe control packet +type Subscribe struct { + Properties *Properties + Subscriptions map[string]SubOptions + PacketID uint16 +} + +func (s *Subscribe) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "SUBSCRIBE: PacketID:%d Subscriptions:\n", s.PacketID) + for sub, o := range s.Subscriptions { + fmt.Fprintf(&b, "\t%s: QOS:%d RetainHandling:%X NoLocal:%t RetainAsPublished:%t\n", sub, o.QoS, o.RetainHandling, o.NoLocal, o.RetainAsPublished) + } + fmt.Fprintf(&b, "Properties:\n%s", s.Properties) + + return b.String() +} + +// SubOptions is the struct representing the options for a subscription +type SubOptions struct { + QoS byte + RetainHandling byte + NoLocal bool + RetainAsPublished bool +} + +// Pack is the implementation of the interface required function for a packet +func (s *SubOptions) Pack() byte { + var ret byte + ret |= s.QoS & 0x03 + if s.NoLocal { + ret |= 1 << 2 + } + if s.RetainAsPublished { + ret |= 1 << 3 + } + ret |= s.RetainHandling & 0x30 + + return ret +} + +// Unpack is the implementation of the interface required function for a packet +func (s *SubOptions) Unpack(r *bytes.Buffer) error { + b, err := r.ReadByte() + if err != nil { + return err + } + + s.QoS = b & 0x03 + s.NoLocal = (b & 1 << 2) == 1 + s.RetainAsPublished = (b & 1 << 3) == 1 + s.RetainHandling = b & 0x30 + + return nil +} + +// Unpack is the implementation of the interface required function for a packet +func (s *Subscribe) Unpack(r *bytes.Buffer) error { + var err error + s.PacketID, err = readUint16(r) + if err != nil { + return err + } + + err = s.Properties.Unpack(r, SUBSCRIBE) + if err != nil { + return err + } + + for r.Len() > 0 { + var so SubOptions + t, err := readString(r) + if err != nil { + return err + } + if err = so.Unpack(r); err != nil { + return err + } + s.Subscriptions[t] = so + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (s *Subscribe) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(s.PacketID, &b) + var subs bytes.Buffer + for t, o := range s.Subscriptions { + writeString(t, &subs) + subs.WriteByte(o.Pack()) + } + idvp := s.Properties.Pack(SUBSCRIBE) + propLen := encodeVBI(len(idvp)) + return net.Buffers{b.Bytes(), propLen, idvp, subs.Bytes()} +} + +// WriteTo is the implementation of the interface required function for a packet +func (s *Subscribe) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: SUBSCRIBE, Flags: 2}} + cp.Content = s + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/unsuback.go b/vendor/github.com/eclipse/paho.golang/packets/unsuback.go new file mode 100644 index 000000000..ba5164b9f --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/unsuback.go @@ -0,0 +1,88 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Unsuback is the Variable Header definition for a Unsuback control packet +type Unsuback struct { + Reasons []byte + Properties *Properties + PacketID uint16 +} + +func (u *Unsuback) String() string { + return fmt.Sprintf("UNSUBACK: ReasonCode:%v PacketID:%d Properties:\n%s", u.Reasons, u.PacketID, u.Properties) +} + +// UnsubackSuccess, etc are the list of valid unsuback reason codes. +const ( + UnsubackSuccess = 0x00 + UnsubackNoSubscriptionFound = 0x11 + UnsubackUnspecifiedError = 0x80 + UnsubackImplementationSpecificError = 0x83 + UnsubackNotAuthorized = 0x87 + UnsubackTopicFilterInvalid = 0x8F + UnsubackPacketIdentifierInUse = 0x91 +) + +// Unpack is the implementation of the interface required function for a packet +func (u *Unsuback) Unpack(r *bytes.Buffer) error { + var err error + u.PacketID, err = readUint16(r) + if err != nil { + return err + } + + err = u.Properties.Unpack(r, UNSUBACK) + if err != nil { + return err + } + + u.Reasons = r.Bytes() + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (u *Unsuback) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(u.PacketID, &b) + idvp := u.Properties.Pack(UNSUBACK) + propLen := encodeVBI(len(idvp)) + return net.Buffers{b.Bytes(), propLen, idvp, u.Reasons} +} + +// WriteTo is the implementation of the interface required function for a packet +func (u *Unsuback) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: UNSUBACK}} + cp.Content = u + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (u *Unsuback) Reason(index int) string { + if index >= 0 && index < len(u.Reasons) { + switch u.Reasons[index] { + case 0x00: + return "Success - The subscription is deleted" + case 0x11: + return "No subscription found - No matching Topic Filter is being used by the Client." + case 0x80: + return "Unspecified error - The unsubscribe could not be completed and the Server either does not wish to reveal the reason or none of the other Reason Codes apply." + case 0x83: + return "Implementation specific error - The UNSUBSCRIBE is valid but the Server does not accept it." + case 0x87: + return "Not authorized - The Client is not authorized to unsubscribe." + case 0x8F: + return "Topic Filter invalid - The Topic Filter is correctly formed but is not allowed for this Client." + case 0x91: + return "Packet Identifier in use - The specified Packet Identifier is already in use." + } + } + return "Invalid Reason index" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/unsubscribe.go b/vendor/github.com/eclipse/paho.golang/packets/unsubscribe.go new file mode 100644 index 000000000..dc4e2f89e --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/unsubscribe.go @@ -0,0 +1,67 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Unsubscribe is the Variable Header definition for a Unsubscribe control packet +type Unsubscribe struct { + Topics []string + Properties *Properties + PacketID uint16 +} + +func (u *Unsubscribe) String() string { + return fmt.Sprintf("UNSUBSCRIBE: PacketID:%d Topics:%v Properties:\n%s", u.PacketID, u.Topics, u.Properties) +} + +// Unpack is the implementation of the interface required function for a packet +func (u *Unsubscribe) Unpack(r *bytes.Buffer) error { + var err error + u.PacketID, err = readUint16(r) + if err != nil { + return err + } + + err = u.Properties.Unpack(r, UNSUBSCRIBE) + if err != nil { + return err + } + + for { + t, err := readString(r) + if err != nil && err != io.EOF { + return err + } + if err == io.EOF { + break + } + u.Topics = append(u.Topics, t) + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (u *Unsubscribe) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(u.PacketID, &b) + var topics bytes.Buffer + for _, t := range u.Topics { + writeString(t, &topics) + } + idvp := u.Properties.Pack(UNSUBSCRIBE) + propLen := encodeVBI(len(idvp)) + return net.Buffers{b.Bytes(), propLen, idvp, topics.Bytes()} +} + +// WriteTo is the implementation of the interface required function for a packet +func (u *Unsubscribe) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: UNSUBSCRIBE, Flags: 2}} + cp.Content = u + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/acks_tracker.go b/vendor/github.com/eclipse/paho.golang/paho/acks_tracker.go new file mode 100644 index 000000000..47f11cb67 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/acks_tracker.go @@ -0,0 +1,79 @@ +package paho + +import ( + "errors" + "sync" + + "github.com/eclipse/paho.golang/packets" +) + +var ( + ErrPacketNotFound = errors.New("packet not found") +) + +type acksTracker struct { + mx sync.Mutex + order []packet +} + +func (t *acksTracker) add(pb *packets.Publish) { + t.mx.Lock() + defer t.mx.Unlock() + + for _, v := range t.order { + if v.pb.PacketID == pb.PacketID { + return // already added + } + } + + t.order = append(t.order, packet{pb: pb}) +} + +func (t *acksTracker) markAsAcked(pb *packets.Publish) error { + t.mx.Lock() + defer t.mx.Unlock() + + for k, v := range t.order { + if pb.PacketID == v.pb.PacketID { + t.order[k].acknowledged = true + return nil + } + } + + return ErrPacketNotFound +} + +func (t *acksTracker) flush(do func([]*packets.Publish)) { + t.mx.Lock() + defer t.mx.Unlock() + + var ( + buf []*packets.Publish + ) + for _, v := range t.order { + if v.acknowledged { + buf = append(buf, v.pb) + } else { + break + } + } + + if len(buf) == 0 { + return + } + + do(buf) + t.order = t.order[len(buf):] +} + +// reset should be used upon disconnections +func (t *acksTracker) reset() { + t.mx.Lock() + defer t.mx.Unlock() + t.order = nil +} + +type packet struct { + pb *packets.Publish + acknowledged bool +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/auth.go b/vendor/github.com/eclipse/paho.golang/paho/auth.go new file mode 100644 index 000000000..7d3a3c972 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/auth.go @@ -0,0 +1,8 @@ +package paho + +// Auther is the interface for something that implements the extended authentication +// flows in MQTT v5 +type Auther interface { + Authenticate(*Auth) *Auth + Authenticated() +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/client.go b/vendor/github.com/eclipse/paho.golang/paho/client.go new file mode 100644 index 000000000..f41e3d068 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/client.go @@ -0,0 +1,923 @@ +package paho + +import ( + "context" + "errors" + "fmt" + "math" + "net" + "strings" + "sync" + "time" + + "github.com/eclipse/paho.golang/packets" + "golang.org/x/sync/semaphore" +) + +type MQTTVersion byte + +const ( + MQTTv311 MQTTVersion = 4 + MQTTv5 MQTTVersion = 5 +) + +const defaultSendAckInterval = 50 * time.Millisecond + +var ( + ErrManualAcknowledgmentDisabled = errors.New("manual acknowledgments disabled") +) + +type ( + // ClientConfig are the user configurable options for the client, an + // instance of this struct is passed into NewClient(), not all options + // are required to be set, defaults are provided for Persistence, MIDs, + // PingHandler, PacketTimeout and Router. + ClientConfig struct { + ClientID string + // Conn is the connection to broker. + // BEWARE that most wrapped net.Conn implementations like tls.Conn are + // not thread safe for writing. To fix, use packets.NewThreadSafeConn + // wrapper or extend the custom net.Conn struct with sync.Locker. + Conn net.Conn + MIDs MIDService + AuthHandler Auther + PingHandler Pinger + Router Router + Persistence Persistence + PacketTimeout time.Duration + // OnServerDisconnect is called only when a packets.DISCONNECT is received from server + OnServerDisconnect func(*Disconnect) + // OnClientError is for example called on net.Error + OnClientError func(error) + // PublishHook allows a user provided function to be called before + // a Publish packet is sent allowing it to inspect or modify the + // Publish, an example of the utility of this is provided in the + // Topic Alias Handler extension which will automatically assign + // and use topic alias values rather than topic strings. + PublishHook func(*Publish) + // EnableManualAcknowledgment is used to control the acknowledgment of packets manually. + // BEWARE that the MQTT specs require clients to send acknowledgments in the order in which the corresponding + // PUBLISH packets were received. + // Consider the following scenario: the client receives packets 1,2,3,4 + // If you acknowledge 3 first, no ack is actually sent to the server but it's buffered until also 1 and 2 + // are acknowledged. + EnableManualAcknowledgment bool + // SendAcksInterval is used only when EnableManualAcknowledgment is true + // it determines how often the client tries to send a batch of acknowledgments in the right order to the server. + SendAcksInterval time.Duration + } + // Client is the struct representing an MQTT client + Client struct { + mu sync.Mutex + ClientConfig + // raCtx is used for handling the MQTTv5 authentication exchange. + raCtx *CPContext + stop chan struct{} + publishPackets chan *packets.Publish + acksTracker acksTracker + workers sync.WaitGroup + serverProps CommsProperties + clientProps CommsProperties + serverInflight *semaphore.Weighted + clientInflight *semaphore.Weighted + debug Logger + errors Logger + } + + // CommsProperties is a struct of the communication properties that may + // be set by the server in the Connack and that the client needs to be + // aware of for future subscribes/publishes + CommsProperties struct { + MaximumPacketSize uint32 + ReceiveMaximum uint16 + TopicAliasMaximum uint16 + MaximumQoS byte + RetainAvailable bool + WildcardSubAvailable bool + SubIDAvailable bool + SharedSubAvailable bool + } + + caContext struct { + Context context.Context + Return chan *packets.Connack + } +) + +// NewClient is used to create a new default instance of an MQTT client. +// It returns a pointer to the new client instance. +// The default client uses the provided PingHandler, MessageID and +// StandardRouter implementations, and a noop Persistence. +// These should be replaced if desired before the client is connected. +// client.Conn *MUST* be set to an already connected net.Conn before +// Connect() is called. +func NewClient(conf ClientConfig) *Client { + c := &Client{ + serverProps: CommsProperties{ + ReceiveMaximum: 65535, + MaximumQoS: 2, + MaximumPacketSize: 0, + TopicAliasMaximum: 0, + RetainAvailable: true, + WildcardSubAvailable: true, + SubIDAvailable: true, + SharedSubAvailable: true, + }, + clientProps: CommsProperties{ + ReceiveMaximum: 65535, + MaximumQoS: 2, + MaximumPacketSize: 0, + TopicAliasMaximum: 0, + }, + ClientConfig: conf, + errors: NOOPLogger{}, + debug: NOOPLogger{}, + } + + if c.Persistence == nil { + c.Persistence = &noopPersistence{} + } + if c.MIDs == nil { + c.MIDs = &MIDs{index: make([]*CPContext, int(midMax))} + } + if c.PacketTimeout == 0 { + c.PacketTimeout = 10 * time.Second + } + if c.Router == nil { + c.Router = NewStandardRouter() + } + if c.PingHandler == nil { + c.PingHandler = DefaultPingerWithCustomFailHandler(func(e error) { + go c.error(e) + }) + } + if c.OnClientError == nil { + c.OnClientError = func(e error) {} + } + + return c +} + +// Connect is used to connect the client to a server. It presumes that +// the Client instance already has a working network connection. +// The function takes a pre-prepared Connect packet, and uses that to +// establish an MQTT connection. Assuming the connection completes +// successfully the rest of the client is initiated and the Connack +// returned. Otherwise the failure Connack (if there is one) is returned +// along with an error indicating the reason for the failure to connect. +func (c *Client) Connect(ctx context.Context, cp *Connect) (*Connack, error) { + if c.Conn == nil { + return nil, fmt.Errorf("client connection is nil") + } + + cleanup := func() { + close(c.stop) + close(c.publishPackets) + _ = c.Conn.Close() + c.mu.Unlock() + } + + c.mu.Lock() + c.stop = make(chan struct{}) + + var publishPacketsSize uint16 = math.MaxUint16 + if cp.Properties != nil && cp.Properties.ReceiveMaximum != nil { + publishPacketsSize = *cp.Properties.ReceiveMaximum + } + c.publishPackets = make(chan *packets.Publish, publishPacketsSize) + + keepalive := cp.KeepAlive + c.ClientID = cp.ClientID + if cp.Properties != nil { + if cp.Properties.MaximumPacketSize != nil { + c.clientProps.MaximumPacketSize = *cp.Properties.MaximumPacketSize + } + if cp.Properties.MaximumQOS != nil { + c.clientProps.MaximumQoS = *cp.Properties.MaximumQOS + } + if cp.Properties.ReceiveMaximum != nil { + c.clientProps.ReceiveMaximum = *cp.Properties.ReceiveMaximum + } + if cp.Properties.TopicAliasMaximum != nil { + c.clientProps.TopicAliasMaximum = *cp.Properties.TopicAliasMaximum + } + } + + c.debug.Println("connecting") + connCtx, cf := context.WithTimeout(ctx, c.PacketTimeout) + defer cf() + + ccp := cp.Packet() + ccp.ProtocolName = "MQTT" + ccp.ProtocolVersion = 5 + + c.debug.Println("sending CONNECT") + if _, err := ccp.WriteTo(c.Conn); err != nil { + cleanup() + return nil, err + } + + c.debug.Println("waiting for CONNACK/AUTH") + var ( + caPacket *packets.Connack + caPacketCh = make(chan *packets.Connack) + caPacketErr = make(chan error) + ) + go c.expectConnack(caPacketCh, caPacketErr) + select { + case <-connCtx.Done(): + if ctxErr := connCtx.Err(); ctxErr != nil { + c.debug.Println(fmt.Sprintf("terminated due to context: %v", ctxErr)) + } + cleanup() + return nil, connCtx.Err() + case err := <-caPacketErr: + c.debug.Println(err) + cleanup() + return nil, err + case caPacket = <-caPacketCh: + } + + ca := ConnackFromPacketConnack(caPacket) + + if ca.ReasonCode >= 0x80 { + var reason string + c.debug.Println("received an error code in Connack:", ca.ReasonCode) + if ca.Properties != nil { + reason = ca.Properties.ReasonString + } + cleanup() + return ca, fmt.Errorf("failed to connect to server: %s", reason) + } + + // no more possible calls to cleanup(), defer an unlock + defer c.mu.Unlock() + + if ca.Properties != nil { + if ca.Properties.ServerKeepAlive != nil { + keepalive = *ca.Properties.ServerKeepAlive + } + if ca.Properties.AssignedClientID != "" { + c.ClientID = ca.Properties.AssignedClientID + } + if ca.Properties.ReceiveMaximum != nil { + c.serverProps.ReceiveMaximum = *ca.Properties.ReceiveMaximum + } + if ca.Properties.MaximumQoS != nil { + c.serverProps.MaximumQoS = *ca.Properties.MaximumQoS + } + if ca.Properties.MaximumPacketSize != nil { + c.serverProps.MaximumPacketSize = *ca.Properties.MaximumPacketSize + } + if ca.Properties.TopicAliasMaximum != nil { + c.serverProps.TopicAliasMaximum = *ca.Properties.TopicAliasMaximum + } + c.serverProps.RetainAvailable = ca.Properties.RetainAvailable + c.serverProps.WildcardSubAvailable = ca.Properties.WildcardSubAvailable + c.serverProps.SubIDAvailable = ca.Properties.SubIDAvailable + c.serverProps.SharedSubAvailable = ca.Properties.SharedSubAvailable + } + + c.serverInflight = semaphore.NewWeighted(int64(c.serverProps.ReceiveMaximum)) + c.clientInflight = semaphore.NewWeighted(int64(c.clientProps.ReceiveMaximum)) + + c.debug.Println("received CONNACK, starting PingHandler") + c.workers.Add(1) + go func() { + defer c.workers.Done() + defer c.debug.Println("returning from ping handler worker") + c.PingHandler.Start(c.Conn, time.Duration(keepalive)*time.Second) + }() + + c.debug.Println("starting publish packets loop") + c.workers.Add(1) + go func() { + defer c.workers.Done() + defer c.debug.Println("returning from publish packets loop worker") + c.routePublishPackets() + }() + + c.debug.Println("starting incoming") + c.workers.Add(1) + go func() { + defer c.workers.Done() + defer c.debug.Println("returning from incoming worker") + c.incoming() + }() + + if c.EnableManualAcknowledgment { + c.debug.Println("starting acking routine") + + c.acksTracker.reset() + sendAcksInterval := defaultSendAckInterval + if c.SendAcksInterval > 0 { + sendAcksInterval = c.SendAcksInterval + } + + c.workers.Add(1) + go func() { + defer c.workers.Done() + defer c.debug.Println("returning from ack tracker routine") + t := time.NewTicker(sendAcksInterval) + for { + select { + case <-c.stop: + return + case <-t.C: + c.acksTracker.flush(func(pbs []*packets.Publish) { + for _, pb := range pbs { + c.ack(pb) + } + }) + } + } + }() + } + + return ca, nil +} + +func (c *Client) Ack(pb *Publish) error { + if !c.EnableManualAcknowledgment { + return ErrManualAcknowledgmentDisabled + } + if pb.QoS == 0 { + return nil + } + return c.acksTracker.markAsAcked(pb.Packet()) +} + +func (c *Client) ack(pb *packets.Publish) { + switch pb.QoS { + case 1: + pa := packets.Puback{ + Properties: &packets.Properties{}, + PacketID: pb.PacketID, + } + c.debug.Println("sending PUBACK") + _, err := pa.WriteTo(c.Conn) + if err != nil { + c.errors.Printf("failed to send PUBACK for %d: %s", pb.PacketID, err) + } + case 2: + pr := packets.Pubrec{ + Properties: &packets.Properties{}, + PacketID: pb.PacketID, + } + c.debug.Printf("sending PUBREC") + _, err := pr.WriteTo(c.Conn) + if err != nil { + c.errors.Printf("failed to send PUBREC for %d: %s", pb.PacketID, err) + } + } +} + +func (c *Client) routePublishPackets() { + for { + select { + case <-c.stop: + return + case pb, open := <-c.publishPackets: + if !open { + return + } + + if !c.ClientConfig.EnableManualAcknowledgment { + c.Router.Route(pb) + c.ack(pb) + continue + } + + if pb.QoS != 0 { + c.acksTracker.add(pb) + } + + c.Router.Route(pb) + } + } +} + +// incoming is the Client function that reads and handles incoming +// packets from the server. The function is started as a goroutine +// from Connect(), it exits when it receives a server initiated +// Disconnect, the Stop channel is closed or there is an error reading +// a packet from the network connection +func (c *Client) incoming() { + defer c.debug.Println("client stopping, incoming stopping") + for { + select { + case <-c.stop: + return + default: + recv, err := packets.ReadPacket(c.Conn) + if err != nil { + go c.error(err) + return + } + switch recv.Type { + case packets.CONNACK: + c.debug.Println("received CONNACK") + go c.error(fmt.Errorf("received unexpected CONNACK")) + return + case packets.AUTH: + c.debug.Println("received AUTH") + ap := recv.Content.(*packets.Auth) + switch ap.ReasonCode { + case 0x0: + if c.AuthHandler != nil { + go c.AuthHandler.Authenticated() + } + if c.raCtx != nil { + c.raCtx.Return <- *recv + } + case 0x18: + if c.AuthHandler != nil { + if _, err := c.AuthHandler.Authenticate(AuthFromPacketAuth(ap)).Packet().WriteTo(c.Conn); err != nil { + go c.error(err) + return + } + } + } + case packets.PUBLISH: + pb := recv.Content.(*packets.Publish) + c.debug.Printf("received QoS%d PUBLISH", pb.QoS) + c.mu.Lock() + select { + case <-c.stop: + c.mu.Unlock() + return + default: + c.publishPackets <- pb + c.mu.Unlock() + } + case packets.PUBACK, packets.PUBCOMP, packets.SUBACK, packets.UNSUBACK: + c.debug.Printf("received %s packet with id %d", recv.PacketType(), recv.PacketID()) + if cpCtx := c.MIDs.Get(recv.PacketID()); cpCtx != nil { + cpCtx.Return <- *recv + } else { + c.debug.Println("received a response for a message ID we don't know:", recv.PacketID()) + } + case packets.PUBREC: + c.debug.Println("received PUBREC for", recv.PacketID()) + if cpCtx := c.MIDs.Get(recv.PacketID()); cpCtx == nil { + c.debug.Println("received a PUBREC for a message ID we don't know:", recv.PacketID()) + pl := packets.Pubrel{ + PacketID: recv.Content.(*packets.Pubrec).PacketID, + ReasonCode: 0x92, + } + c.debug.Println("sending PUBREL for", pl.PacketID) + _, err := pl.WriteTo(c.Conn) + if err != nil { + c.errors.Printf("failed to send PUBREL for %d: %s", pl.PacketID, err) + } + } else { + pr := recv.Content.(*packets.Pubrec) + if pr.ReasonCode >= 0x80 { + //Received a failure code, shortcut and return + cpCtx.Return <- *recv + } else { + pl := packets.Pubrel{ + PacketID: pr.PacketID, + } + c.debug.Println("sending PUBREL for", pl.PacketID) + _, err := pl.WriteTo(c.Conn) + if err != nil { + c.errors.Printf("failed to send PUBREL for %d: %s", pl.PacketID, err) + } + } + } + case packets.PUBREL: + c.debug.Println("received PUBREL for", recv.PacketID()) + //Auto respond to pubrels unless failure code + pr := recv.Content.(*packets.Pubrel) + if pr.ReasonCode >= 0x80 { + //Received a failure code, continue + continue + } else { + pc := packets.Pubcomp{ + PacketID: pr.PacketID, + } + c.debug.Println("sending PUBCOMP for", pr.PacketID) + _, err := pc.WriteTo(c.Conn) + if err != nil { + c.errors.Printf("failed to send PUBCOMP for %d: %s", pc.PacketID, err) + } + } + case packets.DISCONNECT: + c.debug.Println("received DISCONNECT") + if c.raCtx != nil { + c.raCtx.Return <- *recv + } + go func() { + if c.OnServerDisconnect != nil { + go c.serverDisconnect(DisconnectFromPacketDisconnect(recv.Content.(*packets.Disconnect))) + } else { + go c.error(fmt.Errorf("server initiated disconnect")) + } + }() + return + case packets.PINGRESP: + c.debug.Println("received PINGRESP") + c.PingHandler.PingResp() + } + } + } +} + +func (c *Client) close() { + c.mu.Lock() + defer c.mu.Unlock() + + select { + case <-c.stop: + //already shutting down, do nothing + return + default: + } + + close(c.stop) + close(c.publishPackets) + + c.debug.Println("client stopped") + c.PingHandler.Stop() + c.debug.Println("ping stopped") + _ = c.Conn.Close() + c.debug.Println("conn closed") + c.acksTracker.reset() + c.debug.Println("acks tracker reset") +} + +// error is called to signify that an error situation has occurred, this +// causes the client's Stop channel to be closed (if it hasn't already been) +// which results in the other client goroutines terminating. +// It also closes the client network connection. +func (c *Client) error(e error) { + c.debug.Println("error called:", e) + c.close() + c.workers.Wait() + go c.OnClientError(e) +} + +func (c *Client) serverDisconnect(d *Disconnect) { + c.close() + c.workers.Wait() + c.debug.Println("calling OnServerDisconnect") + go c.OnServerDisconnect(d) +} + +// Authenticate is used to initiate a reauthentication of credentials with the +// server. This function sends the initial Auth packet to start the reauthentication +// then relies on the client AuthHandler managing any further requests from the +// server until either a successful Auth packet is passed back, or a Disconnect +// is received. +func (c *Client) Authenticate(ctx context.Context, a *Auth) (*AuthResponse, error) { + c.debug.Println("client initiated reauthentication") + + c.mu.Lock() + if c.raCtx != nil { + c.mu.Unlock() + return nil, fmt.Errorf("previous authentication is still in progress") + } + c.raCtx = &CPContext{ctx, make(chan packets.ControlPacket, 1)} + c.mu.Unlock() + defer func() { + c.mu.Lock() + c.raCtx = nil + c.mu.Unlock() + }() + + c.debug.Println("sending AUTH") + if _, err := a.Packet().WriteTo(c.Conn); err != nil { + return nil, err + } + + var rp packets.ControlPacket + select { + case <-ctx.Done(): + if ctxErr := ctx.Err(); ctxErr != nil { + c.debug.Println(fmt.Sprintf("terminated due to context: %v", ctxErr)) + return nil, ctxErr + } + case rp = <-c.raCtx.Return: + } + + switch rp.Type { + case packets.AUTH: + //If we've received one here it must be successful, the only way + //to abort a reauth is a server initiated disconnect + return AuthResponseFromPacketAuth(rp.Content.(*packets.Auth)), nil + case packets.DISCONNECT: + return AuthResponseFromPacketDisconnect(rp.Content.(*packets.Disconnect)), nil + } + + return nil, fmt.Errorf("error with Auth, didn't receive Auth or Disconnect") +} + +// Subscribe is used to send a Subscription request to the MQTT server. +// It is passed a pre-prepared Subscribe packet and blocks waiting for +// a response Suback, or for the timeout to fire. Any response Suback +// is returned from the function, along with any errors. +func (c *Client) Subscribe(ctx context.Context, s *Subscribe) (*Suback, error) { + if !c.serverProps.WildcardSubAvailable { + for t := range s.Subscriptions { + if strings.ContainsAny(t, "#+") { + // Using a wildcard in a subscription when not supported + return nil, fmt.Errorf("cannot subscribe to %s, server does not support wildcards", t) + } + } + } + if !c.serverProps.SubIDAvailable && s.Properties != nil && s.Properties.SubscriptionIdentifier != nil { + return nil, fmt.Errorf("cannot send subscribe with subID set, server does not support subID") + } + if !c.serverProps.SharedSubAvailable { + for t := range s.Subscriptions { + if strings.HasPrefix(t, "$share") { + return nil, fmt.Errorf("cannont subscribe to %s, server does not support shared subscriptions", t) + } + } + } + + c.debug.Printf("subscribing to %+v", s.Subscriptions) + + subCtx, cf := context.WithTimeout(ctx, c.PacketTimeout) + defer cf() + cpCtx := &CPContext{subCtx, make(chan packets.ControlPacket, 1)} + + sp := s.Packet() + + mid, err := c.MIDs.Request(cpCtx) + if err != nil { + return nil, err + } + defer c.MIDs.Free(mid) + sp.PacketID = mid + + c.debug.Println("sending SUBSCRIBE") + if _, err := sp.WriteTo(c.Conn); err != nil { + return nil, err + } + c.debug.Println("waiting for SUBACK") + var sap packets.ControlPacket + + select { + case <-subCtx.Done(): + if ctxErr := subCtx.Err(); ctxErr != nil { + c.debug.Println(fmt.Sprintf("terminated due to context: %v", ctxErr)) + return nil, ctxErr + } + case sap = <-cpCtx.Return: + } + + if sap.Type != packets.SUBACK { + return nil, fmt.Errorf("received %d instead of Suback", sap.Type) + } + c.debug.Println("received SUBACK") + + sa := SubackFromPacketSuback(sap.Content.(*packets.Suback)) + switch { + case len(sa.Reasons) == 1: + if sa.Reasons[0] >= 0x80 { + var reason string + c.debug.Println("received an error code in Suback:", sa.Reasons[0]) + if sa.Properties != nil { + reason = sa.Properties.ReasonString + } + return sa, fmt.Errorf("failed to subscribe to topic: %s", reason) + } + default: + for _, code := range sa.Reasons { + if code >= 0x80 { + c.debug.Println("received an error code in Suback:", code) + return sa, fmt.Errorf("at least one requested subscription failed") + } + } + } + + return sa, nil +} + +// Unsubscribe is used to send an Unsubscribe request to the MQTT server. +// It is passed a pre-prepared Unsubscribe packet and blocks waiting for +// a response Unsuback, or for the timeout to fire. Any response Unsuback +// is returned from the function, along with any errors. +func (c *Client) Unsubscribe(ctx context.Context, u *Unsubscribe) (*Unsuback, error) { + c.debug.Printf("unsubscribing from %+v", u.Topics) + unsubCtx, cf := context.WithTimeout(ctx, c.PacketTimeout) + defer cf() + cpCtx := &CPContext{unsubCtx, make(chan packets.ControlPacket, 1)} + + up := u.Packet() + + mid, err := c.MIDs.Request(cpCtx) + if err != nil { + return nil, err + } + defer c.MIDs.Free(mid) + up.PacketID = mid + + c.debug.Println("sending UNSUBSCRIBE") + if _, err := up.WriteTo(c.Conn); err != nil { + return nil, err + } + c.debug.Println("waiting for UNSUBACK") + var uap packets.ControlPacket + + select { + case <-unsubCtx.Done(): + if ctxErr := unsubCtx.Err(); ctxErr != nil { + c.debug.Println(fmt.Sprintf("terminated due to context: %v", ctxErr)) + return nil, ctxErr + } + case uap = <-cpCtx.Return: + } + + if uap.Type != packets.UNSUBACK { + return nil, fmt.Errorf("received %d instead of Unsuback", uap.Type) + } + c.debug.Println("received SUBACK") + + ua := UnsubackFromPacketUnsuback(uap.Content.(*packets.Unsuback)) + switch { + case len(ua.Reasons) == 1: + if ua.Reasons[0] >= 0x80 { + var reason string + c.debug.Println("received an error code in Unsuback:", ua.Reasons[0]) + if ua.Properties != nil { + reason = ua.Properties.ReasonString + } + return ua, fmt.Errorf("failed to unsubscribe from topic: %s", reason) + } + default: + for _, code := range ua.Reasons { + if code >= 0x80 { + c.debug.Println("received an error code in Suback:", code) + return ua, fmt.Errorf("at least one requested unsubscribe failed") + } + } + } + + return ua, nil +} + +// Publish is used to send a publication to the MQTT server. +// It is passed a pre-prepared Publish packet and blocks waiting for +// the appropriate response, or for the timeout to fire. +// Any response message is returned from the function, along with any errors. +func (c *Client) Publish(ctx context.Context, p *Publish) (*PublishResponse, error) { + if p.QoS > c.serverProps.MaximumQoS { + return nil, fmt.Errorf("cannot send Publish with QoS %d, server maximum QoS is %d", p.QoS, c.serverProps.MaximumQoS) + } + if p.Properties != nil && p.Properties.TopicAlias != nil { + if c.serverProps.TopicAliasMaximum > 0 && *p.Properties.TopicAlias > c.serverProps.TopicAliasMaximum { + return nil, fmt.Errorf("cannot send publish with TopicAlias %d, server topic alias maximum is %d", *p.Properties.TopicAlias, c.serverProps.TopicAliasMaximum) + } + } + if !c.serverProps.RetainAvailable && p.Retain { + return nil, fmt.Errorf("cannot send Publish with retain flag set, server does not support retained messages") + } + if (p.Properties == nil || p.Properties.TopicAlias == nil) && p.Topic == "" { + return nil, fmt.Errorf("cannot send a publish with no TopicAlias and no Topic set") + } + + if c.ClientConfig.PublishHook != nil { + c.ClientConfig.PublishHook(p) + } + + c.debug.Printf("sending message to %s", p.Topic) + + pb := p.Packet() + + switch p.QoS { + case 0: + c.debug.Println("sending QoS0 message") + if _, err := pb.WriteTo(c.Conn); err != nil { + return nil, err + } + return nil, nil + case 1, 2: + return c.publishQoS12(ctx, pb) + } + + return nil, fmt.Errorf("QoS isn't 0, 1 or 2") +} + +func (c *Client) publishQoS12(ctx context.Context, pb *packets.Publish) (*PublishResponse, error) { + c.debug.Println("sending QoS12 message") + pubCtx, cf := context.WithTimeout(ctx, c.PacketTimeout) + defer cf() + if err := c.serverInflight.Acquire(pubCtx, 1); err != nil { + return nil, err + } + defer c.serverInflight.Release(1) + cpCtx := &CPContext{pubCtx, make(chan packets.ControlPacket, 1)} + + mid, err := c.MIDs.Request(cpCtx) + if err != nil { + return nil, err + } + defer c.MIDs.Free(mid) + pb.PacketID = mid + + if _, err := pb.WriteTo(c.Conn); err != nil { + return nil, err + } + var resp packets.ControlPacket + + select { + case <-pubCtx.Done(): + if ctxErr := pubCtx.Err(); ctxErr != nil { + c.debug.Println(fmt.Sprintf("terminated due to context: %v", ctxErr)) + return nil, ctxErr + } + case resp = <-cpCtx.Return: + } + + switch pb.QoS { + case 1: + if resp.Type != packets.PUBACK { + return nil, fmt.Errorf("received %d instead of PUBACK", resp.Type) + } + + pr := PublishResponseFromPuback(resp.Content.(*packets.Puback)) + if pr.ReasonCode >= 0x80 { + c.debug.Println("received an error code in Puback:", pr.ReasonCode) + return pr, fmt.Errorf("error publishing: %s", resp.Content.(*packets.Puback).Reason()) + } + return pr, nil + case 2: + switch resp.Type { + case packets.PUBCOMP: + pr := PublishResponseFromPubcomp(resp.Content.(*packets.Pubcomp)) + return pr, nil + case packets.PUBREC: + c.debug.Printf("received PUBREC for %s (must have errored)", pb.PacketID) + pr := PublishResponseFromPubrec(resp.Content.(*packets.Pubrec)) + return pr, nil + default: + return nil, fmt.Errorf("received %d instead of PUBCOMP", resp.Type) + } + } + + c.debug.Println("ended up with a non QoS1/2 message:", pb.QoS) + return nil, fmt.Errorf("ended up with a non QoS1/2 message: %d", pb.QoS) +} + +func (c *Client) expectConnack(packet chan<- *packets.Connack, errs chan<- error) { + recv, err := packets.ReadPacket(c.Conn) + if err != nil { + errs <- err + return + } + switch r := recv.Content.(type) { + case *packets.Connack: + c.debug.Println("received CONNACK") + if r.ReasonCode == packets.ConnackSuccess && r.Properties != nil && r.Properties.AuthMethod != "" { + // Successful connack and AuthMethod is defined, must have successfully authed during connect + go c.AuthHandler.Authenticated() + } + packet <- r + case *packets.Auth: + c.debug.Println("received AUTH") + if c.AuthHandler == nil { + errs <- fmt.Errorf("enhanced authentication flow started but no AuthHandler configured") + return + } + c.debug.Println("sending AUTH") + _, err := c.AuthHandler.Authenticate(AuthFromPacketAuth(r)).Packet().WriteTo(c.Conn) + if err != nil { + errs <- fmt.Errorf("error sending authentication packet: %w", err) + return + } + // go round again, either another AUTH or CONNACK + go c.expectConnack(packet, errs) + default: + errs <- fmt.Errorf("received unexpected packet %v", recv.Type) + } + +} + +// Disconnect is used to send a Disconnect packet to the MQTT server +// Whether or not the attempt to send the Disconnect packet fails +// (and if it does this function returns any error) the network connection +// is closed. +func (c *Client) Disconnect(d *Disconnect) error { + c.debug.Println("disconnecting") + _, err := d.Packet().WriteTo(c.Conn) + + c.close() + c.workers.Wait() + + return err +} + +// SetDebugLogger takes an instance of the paho Logger interface +// and sets it to be used by the debug log endpoint +func (c *Client) SetDebugLogger(l Logger) { + c.debug = l +} + +// SetErrorLogger takes an instance of the paho Logger interface +// and sets it to be used by the error log endpoint +func (c *Client) SetErrorLogger(l Logger) { + c.errors = l +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_auth.go b/vendor/github.com/eclipse/paho.golang/paho/cp_auth.go new file mode 100644 index 000000000..6ccef9b47 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_auth.go @@ -0,0 +1,92 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Auth is a representation of the MQTT Auth packet + Auth struct { + Properties *AuthProperties + ReasonCode byte + } + + // AuthProperties is a struct of the properties that can be set + // for a Auth packet + AuthProperties struct { + AuthData []byte + AuthMethod string + ReasonString string + User UserProperties + } +) + +// InitProperties is a function that takes a lower level +// Properties struct and completes the properties of the Auth on +// which it is called +func (a *Auth) InitProperties(p *packets.Properties) { + a.Properties = &AuthProperties{ + AuthMethod: p.AuthMethod, + AuthData: p.AuthData, + ReasonString: p.ReasonString, + User: UserPropertiesFromPacketUser(p.User), + } +} + +// AuthFromPacketAuth takes a packets library Auth and +// returns a paho library Auth +func AuthFromPacketAuth(a *packets.Auth) *Auth { + v := &Auth{ReasonCode: a.ReasonCode} + v.InitProperties(a.Properties) + + return v +} + +// Packet returns a packets library Auth from the paho Auth +// on which it is called +func (a *Auth) Packet() *packets.Auth { + v := &packets.Auth{ReasonCode: a.ReasonCode} + + if a.Properties != nil { + v.Properties = &packets.Properties{ + AuthMethod: a.Properties.AuthMethod, + AuthData: a.Properties.AuthData, + ReasonString: a.Properties.ReasonString, + User: a.Properties.User.ToPacketProperties(), + } + } + + return v +} + +// AuthResponse is a represenation of the response to an Auth +// packet +type AuthResponse struct { + Properties *AuthProperties + ReasonCode byte + Success bool +} + +// AuthResponseFromPacketAuth takes a packets library Auth and +// returns a paho library AuthResponse +func AuthResponseFromPacketAuth(a *packets.Auth) *AuthResponse { + return &AuthResponse{ + Success: true, + ReasonCode: a.ReasonCode, + Properties: &AuthProperties{ + ReasonString: a.Properties.ReasonString, + User: UserPropertiesFromPacketUser(a.Properties.User), + }, + } +} + +// AuthResponseFromPacketDisconnect takes a packets library Disconnect and +// returns a paho library AuthResponse +func AuthResponseFromPacketDisconnect(d *packets.Disconnect) *AuthResponse { + return &AuthResponse{ + Success: true, + ReasonCode: d.ReasonCode, + Properties: &AuthProperties{ + ReasonString: d.Properties.ReasonString, + User: UserPropertiesFromPacketUser(d.Properties.User), + }, + } +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_connack.go b/vendor/github.com/eclipse/paho.golang/paho/cp_connack.go new file mode 100644 index 000000000..9c7233618 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_connack.go @@ -0,0 +1,84 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Connack is a representation of the MQTT Connack packet + Connack struct { + Properties *ConnackProperties + ReasonCode byte + SessionPresent bool + } + + // ConnackProperties is a struct of the properties that can be set + // for a Connack packet + ConnackProperties struct { + SessionExpiryInterval *uint32 + AuthData []byte + AuthMethod string + ResponseInfo string + ServerReference string + ReasonString string + AssignedClientID string + MaximumPacketSize *uint32 + ReceiveMaximum *uint16 + TopicAliasMaximum *uint16 + ServerKeepAlive *uint16 + MaximumQoS *byte + User UserProperties + WildcardSubAvailable bool + SubIDAvailable bool + SharedSubAvailable bool + RetainAvailable bool + } +) + +// InitProperties is a function that takes a lower level +// Properties struct and completes the properties of the Connack on +// which it is called +func (c *Connack) InitProperties(p *packets.Properties) { + c.Properties = &ConnackProperties{ + AssignedClientID: p.AssignedClientID, + ServerKeepAlive: p.ServerKeepAlive, + WildcardSubAvailable: true, + SubIDAvailable: true, + SharedSubAvailable: true, + RetainAvailable: true, + ResponseInfo: p.ResponseInfo, + SessionExpiryInterval: p.SessionExpiryInterval, + AuthMethod: p.AuthMethod, + AuthData: p.AuthData, + ServerReference: p.ServerReference, + ReasonString: p.ReasonString, + ReceiveMaximum: p.ReceiveMaximum, + TopicAliasMaximum: p.TopicAliasMaximum, + MaximumQoS: p.MaximumQOS, + MaximumPacketSize: p.MaximumPacketSize, + User: UserPropertiesFromPacketUser(p.User), + } + + if p.WildcardSubAvailable != nil { + c.Properties.WildcardSubAvailable = *p.WildcardSubAvailable == 1 + } + if p.SubIDAvailable != nil { + c.Properties.SubIDAvailable = *p.SubIDAvailable == 1 + } + if p.SharedSubAvailable != nil { + c.Properties.SharedSubAvailable = *p.SharedSubAvailable == 1 + } + if p.RetainAvailable != nil { + c.Properties.RetainAvailable = *p.RetainAvailable == 1 + } +} + +// ConnackFromPacketConnack takes a packets library Connack and +// returns a paho library Connack +func ConnackFromPacketConnack(c *packets.Connack) *Connack { + v := &Connack{ + SessionPresent: c.SessionPresent, + ReasonCode: c.ReasonCode, + } + v.InitProperties(c.Properties) + + return v +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_connect.go b/vendor/github.com/eclipse/paho.golang/paho/cp_connect.go new file mode 100644 index 000000000..8d731764d --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_connect.go @@ -0,0 +1,180 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Connect is a representation of the MQTT Connect packet + Connect struct { + Password []byte + Username string + ClientID string + Properties *ConnectProperties + WillMessage *WillMessage + WillProperties *WillProperties + KeepAlive uint16 + CleanStart bool + UsernameFlag bool + PasswordFlag bool + } + + // ConnectProperties is a struct of the properties that can be set + // for a Connect packet + ConnectProperties struct { + AuthData []byte + AuthMethod string + SessionExpiryInterval *uint32 + WillDelayInterval *uint32 + ReceiveMaximum *uint16 + TopicAliasMaximum *uint16 + MaximumQOS *byte + MaximumPacketSize *uint32 + User UserProperties + RequestProblemInfo bool + RequestResponseInfo bool + } +) + +// InitProperties is a function that takes a lower level +// Properties struct and completes the properties of the Connect on +// which it is called +func (c *Connect) InitProperties(p *packets.Properties) { + c.Properties = &ConnectProperties{ + SessionExpiryInterval: p.SessionExpiryInterval, + AuthMethod: p.AuthMethod, + AuthData: p.AuthData, + WillDelayInterval: p.WillDelayInterval, + RequestResponseInfo: false, + RequestProblemInfo: true, + ReceiveMaximum: p.ReceiveMaximum, + TopicAliasMaximum: p.TopicAliasMaximum, + MaximumQOS: p.MaximumQOS, + MaximumPacketSize: p.MaximumPacketSize, + User: UserPropertiesFromPacketUser(p.User), + } + + if p.RequestResponseInfo != nil { + c.Properties.RequestResponseInfo = *p.RequestProblemInfo == 1 + } + if p.RequestProblemInfo != nil { + c.Properties.RequestProblemInfo = *p.RequestProblemInfo == 1 + } +} + +// InitWillProperties is a function that takes a lower level +// Properties struct and completes the properties of the Will in the Connect on +// which it is called +func (c *Connect) InitWillProperties(p *packets.Properties) { + c.WillProperties = &WillProperties{ + WillDelayInterval: p.WillDelayInterval, + PayloadFormat: p.PayloadFormat, + MessageExpiry: p.MessageExpiry, + ContentType: p.ContentType, + ResponseTopic: p.ResponseTopic, + CorrelationData: p.CorrelationData, + User: UserPropertiesFromPacketUser(p.User), + } +} + +// ConnectFromPacketConnect takes a packets library Connect and +// returns a paho library Connect +func ConnectFromPacketConnect(p *packets.Connect) *Connect { + v := &Connect{ + UsernameFlag: p.UsernameFlag, + Username: p.Username, + PasswordFlag: p.PasswordFlag, + Password: p.Password, + ClientID: p.ClientID, + CleanStart: p.CleanStart, + KeepAlive: p.KeepAlive, + } + v.InitProperties(p.Properties) + if p.WillFlag { + v.WillMessage = &WillMessage{ + Retain: p.WillRetain, + QoS: p.WillQOS, + Topic: p.WillTopic, + Payload: p.WillMessage, + } + v.InitWillProperties(p.WillProperties) + } + + return v +} + +// Packet returns a packets library Connect from the paho Connect +// on which it is called +func (c *Connect) Packet() *packets.Connect { + v := &packets.Connect{ + UsernameFlag: c.UsernameFlag, + Username: c.Username, + PasswordFlag: c.PasswordFlag, + Password: c.Password, + ClientID: c.ClientID, + CleanStart: c.CleanStart, + KeepAlive: c.KeepAlive, + } + + if c.Properties != nil { + v.Properties = &packets.Properties{ + SessionExpiryInterval: c.Properties.SessionExpiryInterval, + AuthMethod: c.Properties.AuthMethod, + AuthData: c.Properties.AuthData, + WillDelayInterval: c.Properties.WillDelayInterval, + ReceiveMaximum: c.Properties.ReceiveMaximum, + TopicAliasMaximum: c.Properties.TopicAliasMaximum, + MaximumQOS: c.Properties.MaximumQOS, + MaximumPacketSize: c.Properties.MaximumPacketSize, + User: c.Properties.User.ToPacketProperties(), + } + if c.Properties.RequestResponseInfo { + v.Properties.RequestResponseInfo = Byte(1) + } + if !c.Properties.RequestProblemInfo { + v.Properties.RequestProblemInfo = Byte(0) + } + } + + if c.WillMessage != nil { + v.WillFlag = true + v.WillQOS = c.WillMessage.QoS + v.WillTopic = c.WillMessage.Topic + v.WillRetain = c.WillMessage.Retain + v.WillMessage = c.WillMessage.Payload + if c.WillProperties != nil { + v.WillProperties = &packets.Properties{ + WillDelayInterval: c.WillProperties.WillDelayInterval, + PayloadFormat: c.WillProperties.PayloadFormat, + MessageExpiry: c.WillProperties.MessageExpiry, + ContentType: c.WillProperties.ContentType, + ResponseTopic: c.WillProperties.ResponseTopic, + CorrelationData: c.WillProperties.CorrelationData, + User: c.WillProperties.User.ToPacketProperties(), + } + } + } + + return v +} + +type ( + // WillMessage is a representation of the LWT message that can + // be sent with the Connect packet + WillMessage struct { + Retain bool + QoS byte + Topic string + Payload []byte + } + + // WillProperties is a struct of the properties that can be set + // for a Will in a Connect packet + WillProperties struct { + WillDelayInterval *uint32 + PayloadFormat *byte + MessageExpiry *uint32 + ContentType string + ResponseTopic string + CorrelationData []byte + User UserProperties + } +) diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_disconnect.go b/vendor/github.com/eclipse/paho.golang/paho/cp_disconnect.go new file mode 100644 index 000000000..5caa85b14 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_disconnect.go @@ -0,0 +1,58 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Disconnect is a representation of the MQTT Disconnect packet + Disconnect struct { + Properties *DisconnectProperties + ReasonCode byte + } + + // DisconnectProperties is a struct of the properties that can be set + // for a Disconnect packet + DisconnectProperties struct { + ServerReference string + ReasonString string + SessionExpiryInterval *uint32 + User UserProperties + } +) + +// InitProperties is a function that takes a lower level +// Properties struct and completes the properties of the Disconnect on +// which it is called +func (d *Disconnect) InitProperties(p *packets.Properties) { + d.Properties = &DisconnectProperties{ + SessionExpiryInterval: p.SessionExpiryInterval, + ServerReference: p.ServerReference, + ReasonString: p.ReasonString, + User: UserPropertiesFromPacketUser(p.User), + } +} + +// DisconnectFromPacketDisconnect takes a packets library Disconnect and +// returns a paho library Disconnect +func DisconnectFromPacketDisconnect(p *packets.Disconnect) *Disconnect { + v := &Disconnect{ReasonCode: p.ReasonCode} + v.InitProperties(p.Properties) + + return v +} + +// Packet returns a packets library Disconnect from the paho Disconnect +// on which it is called +func (d *Disconnect) Packet() *packets.Disconnect { + v := &packets.Disconnect{ReasonCode: d.ReasonCode} + + if d.Properties != nil { + v.Properties = &packets.Properties{ + SessionExpiryInterval: d.Properties.SessionExpiryInterval, + ServerReference: d.Properties.ServerReference, + ReasonString: d.Properties.ReasonString, + User: d.Properties.User.ToPacketProperties(), + } + } + + return v +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_publish.go b/vendor/github.com/eclipse/paho.golang/paho/cp_publish.go new file mode 100644 index 000000000..1bb9654b3 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_publish.go @@ -0,0 +1,123 @@ +package paho + +import ( + "bytes" + "fmt" + + "github.com/eclipse/paho.golang/packets" +) + +type ( + // Publish is a representation of the MQTT Publish packet + Publish struct { + PacketID uint16 + QoS byte + Retain bool + Topic string + Properties *PublishProperties + Payload []byte + } + + // PublishProperties is a struct of the properties that can be set + // for a Publish packet + PublishProperties struct { + CorrelationData []byte + ContentType string + ResponseTopic string + PayloadFormat *byte + MessageExpiry *uint32 + SubscriptionIdentifier *int + TopicAlias *uint16 + User UserProperties + } +) + +// InitProperties is a function that takes a lower level +// Properties struct and completes the properties of the Publish on +// which it is called +func (p *Publish) InitProperties(prop *packets.Properties) { + p.Properties = &PublishProperties{ + PayloadFormat: prop.PayloadFormat, + MessageExpiry: prop.MessageExpiry, + ContentType: prop.ContentType, + ResponseTopic: prop.ResponseTopic, + CorrelationData: prop.CorrelationData, + TopicAlias: prop.TopicAlias, + SubscriptionIdentifier: prop.SubscriptionIdentifier, + User: UserPropertiesFromPacketUser(prop.User), + } +} + +// PublishFromPacketPublish takes a packets library Publish and +// returns a paho library Publish +func PublishFromPacketPublish(p *packets.Publish) *Publish { + v := &Publish{ + PacketID: p.PacketID, + QoS: p.QoS, + Retain: p.Retain, + Topic: p.Topic, + Payload: p.Payload, + } + v.InitProperties(p.Properties) + + return v +} + +// Packet returns a packets library Publish from the paho Publish +// on which it is called +func (p *Publish) Packet() *packets.Publish { + v := &packets.Publish{ + PacketID: p.PacketID, + QoS: p.QoS, + Retain: p.Retain, + Topic: p.Topic, + Payload: p.Payload, + } + if p.Properties != nil { + v.Properties = &packets.Properties{ + PayloadFormat: p.Properties.PayloadFormat, + MessageExpiry: p.Properties.MessageExpiry, + ContentType: p.Properties.ContentType, + ResponseTopic: p.Properties.ResponseTopic, + CorrelationData: p.Properties.CorrelationData, + TopicAlias: p.Properties.TopicAlias, + SubscriptionIdentifier: p.Properties.SubscriptionIdentifier, + User: p.Properties.User.ToPacketProperties(), + } + } + + return v +} + +func (p *Publish) String() string { + var b bytes.Buffer + + fmt.Fprintf(&b, "topic: %s qos: %d retain: %t\n", p.Topic, p.QoS, p.Retain) + if p.Properties.PayloadFormat != nil { + fmt.Fprintf(&b, "PayloadFormat: %v\n", p.Properties.PayloadFormat) + } + if p.Properties.MessageExpiry != nil { + fmt.Fprintf(&b, "MessageExpiry: %v\n", p.Properties.MessageExpiry) + } + if p.Properties.ContentType != "" { + fmt.Fprintf(&b, "ContentType: %v\n", p.Properties.ContentType) + } + if p.Properties.ResponseTopic != "" { + fmt.Fprintf(&b, "ResponseTopic: %v\n", p.Properties.ResponseTopic) + } + if p.Properties.CorrelationData != nil { + fmt.Fprintf(&b, "CorrelationData: %v\n", p.Properties.CorrelationData) + } + if p.Properties.TopicAlias != nil { + fmt.Fprintf(&b, "TopicAlias: %d\n", p.Properties.TopicAlias) + } + if p.Properties.SubscriptionIdentifier != nil { + fmt.Fprintf(&b, "SubscriptionIdentifier: %v\n", p.Properties.SubscriptionIdentifier) + } + for _, v := range p.Properties.User { + fmt.Fprintf(&b, "User: %s : %s\n", v.Key, v.Value) + } + b.WriteString(string(p.Payload)) + + return b.String() +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_pubresp.go b/vendor/github.com/eclipse/paho.golang/paho/cp_pubresp.go new file mode 100644 index 000000000..0c4e174aa --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_pubresp.go @@ -0,0 +1,55 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // PublishResponse is a generic representation of a response + // to a QoS1 or QoS2 Publish + PublishResponse struct { + Properties *PublishResponseProperties + ReasonCode byte + } + + // PublishResponseProperties is the properties associated with + // a response to a QoS1 or QoS2 Publish + PublishResponseProperties struct { + ReasonString string + User UserProperties + } +) + +// PublishResponseFromPuback takes a packets library Puback and +// returns a paho library PublishResponse +func PublishResponseFromPuback(pa *packets.Puback) *PublishResponse { + return &PublishResponse{ + ReasonCode: pa.ReasonCode, + Properties: &PublishResponseProperties{ + ReasonString: pa.Properties.ReasonString, + User: UserPropertiesFromPacketUser(pa.Properties.User), + }, + } +} + +// PublishResponseFromPubcomp takes a packets library Pubcomp and +// returns a paho library PublishResponse +func PublishResponseFromPubcomp(pc *packets.Pubcomp) *PublishResponse { + return &PublishResponse{ + ReasonCode: pc.ReasonCode, + Properties: &PublishResponseProperties{ + ReasonString: pc.Properties.ReasonString, + User: UserPropertiesFromPacketUser(pc.Properties.User), + }, + } +} + +// PublishResponseFromPubrec takes a packets library Pubrec and +// returns a paho library PublishResponse +func PublishResponseFromPubrec(pr *packets.Pubrec) *PublishResponse { + return &PublishResponse{ + ReasonCode: pr.ReasonCode, + Properties: &PublishResponseProperties{ + ReasonString: pr.Properties.ReasonString, + User: UserPropertiesFromPacketUser(pr.Properties.User), + }, + } +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_suback.go b/vendor/github.com/eclipse/paho.golang/paho/cp_suback.go new file mode 100644 index 000000000..c1034c26c --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_suback.go @@ -0,0 +1,41 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Suback is a representation of an MQTT suback packet + Suback struct { + Properties *SubackProperties + Reasons []byte + } + + // SubackProperties is a struct of the properties that can be set + // for a Suback packet + SubackProperties struct { + ReasonString string + User UserProperties + } +) + +// Packet returns a packets library Suback from the paho Suback +// on which it is called +func (s *Suback) Packet() *packets.Suback { + return &packets.Suback{ + Reasons: s.Reasons, + Properties: &packets.Properties{ + User: s.Properties.User.ToPacketProperties(), + }, + } +} + +// SubackFromPacketSuback takes a packets library Suback and +// returns a paho library Suback +func SubackFromPacketSuback(s *packets.Suback) *Suback { + return &Suback{ + Reasons: s.Reasons, + Properties: &SubackProperties{ + ReasonString: s.Properties.ReasonString, + User: UserPropertiesFromPacketUser(s.Properties.User), + }, + } +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_subscribe.go b/vendor/github.com/eclipse/paho.golang/paho/cp_subscribe.go new file mode 100644 index 000000000..e111f0cf6 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_subscribe.go @@ -0,0 +1,67 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Subscribe is a representation of a MQTT subscribe packet + Subscribe struct { + Properties *SubscribeProperties + Subscriptions map[string]SubscribeOptions + } + + // SubscribeOptions is the struct representing the options for a subscription + SubscribeOptions struct { + QoS byte + RetainHandling byte + NoLocal bool + RetainAsPublished bool + } +) + +// SubscribeProperties is a struct of the properties that can be set +// for a Subscribe packet +type SubscribeProperties struct { + SubscriptionIdentifier *int + User UserProperties +} + +// InitProperties is a function that takes a packet library +// Properties struct and completes the properties of the Subscribe on +// which it is called +func (s *Subscribe) InitProperties(prop *packets.Properties) { + s.Properties = &SubscribeProperties{ + SubscriptionIdentifier: prop.SubscriptionIdentifier, + User: UserPropertiesFromPacketUser(prop.User), + } +} + +// PacketSubOptionsFromSubscribeOptions returns a map of string to packet +// library SubOptions for the paho Subscribe on which it is called +func (s *Subscribe) PacketSubOptionsFromSubscribeOptions() map[string]packets.SubOptions { + r := make(map[string]packets.SubOptions) + for k, v := range s.Subscriptions { + r[k] = packets.SubOptions{ + QoS: v.QoS, + NoLocal: v.NoLocal, + RetainAsPublished: v.RetainAsPublished, + RetainHandling: v.RetainHandling, + } + } + + return r +} + +// Packet returns a packets library Subscribe from the paho Subscribe +// on which it is called +func (s *Subscribe) Packet() *packets.Subscribe { + v := &packets.Subscribe{Subscriptions: s.PacketSubOptionsFromSubscribeOptions()} + + if s.Properties != nil { + v.Properties = &packets.Properties{ + SubscriptionIdentifier: s.Properties.SubscriptionIdentifier, + User: s.Properties.User.ToPacketProperties(), + } + } + + return v +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_unsuback.go b/vendor/github.com/eclipse/paho.golang/paho/cp_unsuback.go new file mode 100644 index 000000000..15ca83885 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_unsuback.go @@ -0,0 +1,41 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Unsuback is a representation of an MQTT Unsuback packet + Unsuback struct { + Reasons []byte + Properties *UnsubackProperties + } + + // UnsubackProperties is a struct of the properties that can be set + // for a Unsuback packet + UnsubackProperties struct { + ReasonString string + User UserProperties + } +) + +// Packet returns a packets library Unsuback from the paho Unsuback +// on which it is called +func (u *Unsuback) Packet() *packets.Unsuback { + return &packets.Unsuback{ + Reasons: u.Reasons, + Properties: &packets.Properties{ + User: u.Properties.User.ToPacketProperties(), + }, + } +} + +// UnsubackFromPacketUnsuback takes a packets library Unsuback and +// returns a paho library Unsuback +func UnsubackFromPacketUnsuback(u *packets.Unsuback) *Unsuback { + return &Unsuback{ + Reasons: u.Reasons, + Properties: &UnsubackProperties{ + ReasonString: u.Properties.ReasonString, + User: UserPropertiesFromPacketUser(u.Properties.User), + }, + } +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_unsubscribe.go b/vendor/github.com/eclipse/paho.golang/paho/cp_unsubscribe.go new file mode 100644 index 000000000..375b917c8 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_unsubscribe.go @@ -0,0 +1,31 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Unsubscribe is a representation of an MQTT unsubscribe packet + Unsubscribe struct { + Topics []string + Properties *UnsubscribeProperties + } + + // UnsubscribeProperties is a struct of the properties that can be set + // for a Unsubscribe packet + UnsubscribeProperties struct { + User UserProperties + } +) + +// Packet returns a packets library Unsubscribe from the paho Unsubscribe +// on which it is called +func (u *Unsubscribe) Packet() *packets.Unsubscribe { + v := &packets.Unsubscribe{Topics: u.Topics} + + if u.Properties != nil { + v.Properties = &packets.Properties{ + User: u.Properties.User.ToPacketProperties(), + } + } + + return v +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_utils.go b/vendor/github.com/eclipse/paho.golang/paho/cp_utils.go new file mode 100644 index 000000000..2d7995f5c --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_utils.go @@ -0,0 +1,100 @@ +package paho + +import ( + "github.com/eclipse/paho.golang/packets" +) + +// UserProperty is a struct for the user provided values +// permitted in the properties section +type UserProperty struct { + Key, Value string +} + +// UserProperties is a slice of UserProperty +type UserProperties []UserProperty + +// Add is a helper function for easily adding a new user property +func (u *UserProperties) Add(key, value string) *UserProperties { + *u = append(*u, UserProperty{key, value}) + + return u +} + +// Get returns the first entry in the UserProperties that matches +// key, or an empty string if the key is not found. Note that it is +// permitted to have multiple entries with the same key, use GetAll +// if it is expected to have multiple matches +func (u UserProperties) Get(key string) string { + for _, v := range u { + if v.Key == key { + return v.Value + } + } + + return "" +} + +// GetAll returns a slice of all entries in the UserProperties +// that match key, or a nil slice if none were found. +func (u UserProperties) GetAll(key string) []string { + var ret []string + for _, v := range u { + if v.Key == key { + ret = append(ret, v.Value) + } + } + + return ret +} + +// ToPacketProperties converts a UserProperties to a slice +// of packets.User which is used internally in the packets +// library for user properties +func (u UserProperties) ToPacketProperties() []packets.User { + ret := make([]packets.User, len(u)) + for i, v := range u { + ret[i] = packets.User{Key: v.Key, Value: v.Value} + } + + return ret +} + +// UserPropertiesFromPacketUser converts a slice of packets.User +// to an instance of UserProperties for easier consumption within +// the client library +func UserPropertiesFromPacketUser(up []packets.User) UserProperties { + ret := make(UserProperties, len(up)) + for i, v := range up { + ret[i] = UserProperty{v.Key, v.Value} + } + + return ret +} + +// Byte is a helper function that take a byte and returns +// a pointer to a byte of that value +func Byte(b byte) *byte { + return &b +} + +// Uint32 is a helper function that take a uint32 and returns +// a pointer to a uint32 of that value +func Uint32(u uint32) *uint32 { + return &u +} + +// Uint16 is a helper function that take a uint16 and returns +// a pointer to a uint16 of that value +func Uint16(u uint16) *uint16 { + return &u +} + +// BoolToByte is a helper function that take a bool and returns +// a pointer to a byte of value 1 if true or 0 if false +func BoolToByte(b bool) *byte { + var v byte + if b { + v = 1 + } + return &v +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/message_ids.go b/vendor/github.com/eclipse/paho.golang/paho/message_ids.go new file mode 100644 index 000000000..58b03e324 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/message_ids.go @@ -0,0 +1,93 @@ +package paho + +import ( + "context" + "errors" + "sync" + + "github.com/eclipse/paho.golang/packets" +) + +const ( + midMin uint16 = 1 + midMax uint16 = 65535 +) + +// ErrorMidsExhausted is returned from Request() when there are no +// free message ids to be used. +var ErrorMidsExhausted = errors.New("all message ids in use") + +// MIDService defines the interface for a struct that handles the +// relationship between message ids and CPContexts +// Request() takes a *CPContext and returns a uint16 that is the +// messageid that should be used by the code that called Request() +// Get() takes a uint16 that is a messageid and returns the matching +// *CPContext that the MIDService has associated with that messageid +// Free() takes a uint16 that is a messageid and instructs the MIDService +// to mark that messageid as available for reuse +// Clear() resets the internal state of the MIDService +type MIDService interface { + Request(*CPContext) (uint16, error) + Get(uint16) *CPContext + Free(uint16) + Clear() +} + +// CPContext is the struct that is used to return responses to +// ControlPackets that have them, eg: the suback to a subscribe. +// The response packet is send down the Return channel and the +// Context is used to track timeouts. +type CPContext struct { + Context context.Context + Return chan packets.ControlPacket +} + +// MIDs is the default MIDService provided by this library. +// It uses a map of uint16 to *CPContext to track responses +// to messages with a messageid +type MIDs struct { + sync.Mutex + lastMid uint16 + index []*CPContext +} + +// Request is the library provided MIDService's implementation of +// the required interface function() +func (m *MIDs) Request(c *CPContext) (uint16, error) { + m.Lock() + defer m.Unlock() + for i := uint16(1); i < midMax; i++ { + v := (m.lastMid + i) % midMax + if v == 0 { + continue + } + if inuse := m.index[v]; inuse == nil { + m.index[v] = c + m.lastMid = v + return v, nil + } + } + return 0, ErrorMidsExhausted +} + +// Get is the library provided MIDService's implementation of +// the required interface function() +func (m *MIDs) Get(i uint16) *CPContext { + m.Lock() + defer m.Unlock() + return m.index[i] +} + +// Free is the library provided MIDService's implementation of +// the required interface function() +func (m *MIDs) Free(i uint16) { + m.Lock() + m.index[i] = nil + m.Unlock() +} + +// Clear is the library provided MIDService's implementation of +// the required interface function() +func (m *MIDs) Clear() { + m.index = make([]*CPContext, int(midMax)) +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/noop_persistence.go b/vendor/github.com/eclipse/paho.golang/paho/noop_persistence.go new file mode 100644 index 000000000..d2d15704f --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/noop_persistence.go @@ -0,0 +1,23 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type noopPersistence struct{} + +func (n *noopPersistence) Open() {} + +func (n *noopPersistence) Put(id uint16, cp packets.ControlPacket) {} + +func (n *noopPersistence) Get(id uint16) packets.ControlPacket { + return packets.ControlPacket{} +} + +func (n *noopPersistence) All() []packets.ControlPacket { + return nil +} + +func (n *noopPersistence) Delete(id uint16) {} + +func (n *noopPersistence) Close() {} + +func (n *noopPersistence) Reset() {} diff --git a/vendor/github.com/eclipse/paho.golang/paho/persistence.go b/vendor/github.com/eclipse/paho.golang/paho/persistence.go new file mode 100644 index 000000000..f02b846cc --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/persistence.go @@ -0,0 +1,98 @@ +package paho + +import ( + "sync" + + "github.com/eclipse/paho.golang/packets" +) + +// Persistence is an interface of the functions for a struct +// that is used to persist ControlPackets. +// Open() is an initialiser to prepare the Persistence for use +// Put() takes a uint16 which is a messageid and a ControlPacket +// to persist against that messageid +// Get() takes a uint16 which is a messageid and returns the +// persisted ControlPacket from the Persistence for that messageid +// All() returns a slice of all ControlPackets persisted +// Delete() takes a uint16 which is a messageid and deletes the +// associated stored ControlPacket from the Persistence +// Close() closes the Persistence +// Reset() clears the Persistence and prepares it to be reused +type Persistence interface { + Open() + Put(uint16, packets.ControlPacket) + Get(uint16) packets.ControlPacket + All() []packets.ControlPacket + Delete(uint16) + Close() + Reset() +} + +// MemoryPersistence is an implementation of a Persistence +// that stores the ControlPackets in memory using a map +type MemoryPersistence struct { + sync.RWMutex + packets map[uint16]packets.ControlPacket +} + +// Open is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Open() { + m.Lock() + m.packets = make(map[uint16]packets.ControlPacket) + m.Unlock() +} + +// Put is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Put(id uint16, cp packets.ControlPacket) { + m.Lock() + m.packets[id] = cp + m.Unlock() +} + +// Get is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Get(id uint16) packets.ControlPacket { + m.RLock() + defer m.RUnlock() + return m.packets[id] +} + +// All is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) All() []packets.ControlPacket { + m.Lock() + defer m.RUnlock() + ret := make([]packets.ControlPacket, len(m.packets)) + + for _, cp := range m.packets { + ret = append(ret, cp) + } + + return ret +} + +// Delete is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Delete(id uint16) { + m.Lock() + delete(m.packets, id) + m.Unlock() +} + +// Close is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Close() { + m.Lock() + m.packets = nil + m.Unlock() +} + +// Reset is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Reset() { + m.Lock() + m.packets = make(map[uint16]packets.ControlPacket) + m.Unlock() +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/pinger.go b/vendor/github.com/eclipse/paho.golang/paho/pinger.go new file mode 100644 index 000000000..e135d25ac --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/pinger.go @@ -0,0 +1,122 @@ +package paho + +import ( + "fmt" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/eclipse/paho.golang/packets" +) + +// PingFailHandler is a type for the function that is invoked +// when we have sent a Pingreq to the server and not received +// a Pingresp within 1.5x our pingtimeout +type PingFailHandler func(error) + +// Pinger is an interface of the functions for a struct that is +// used to manage sending PingRequests and responding to +// PingResponses +// Start() takes a net.Conn which is a connection over which an +// MQTT session has already been established, and a time.Duration +// of the keepalive setting passed to the server when the MQTT +// session was established. +// Stop() is used to stop the Pinger +// PingResp() is the function that is called by the Client when +// a PingResponse is received +// SetDebug() is used to pass in a Logger to be used to log debug +// information, for example sharing a logger with the main client +type Pinger interface { + Start(net.Conn, time.Duration) + Stop() + PingResp() + SetDebug(Logger) +} + +// PingHandler is the library provided default Pinger +type PingHandler struct { + mu sync.Mutex + lastPing time.Time + conn net.Conn + stop chan struct{} + pingFailHandler PingFailHandler + pingOutstanding int32 + debug Logger +} + +// DefaultPingerWithCustomFailHandler returns an instance of the +// default Pinger but with a custom PingFailHandler that is called +// when the client has not received a response to a PingRequest +// within the appropriate amount of time +func DefaultPingerWithCustomFailHandler(pfh PingFailHandler) *PingHandler { + return &PingHandler{ + pingFailHandler: pfh, + debug: NOOPLogger{}, + } +} + +// Start is the library provided Pinger's implementation of +// the required interface function() +func (p *PingHandler) Start(c net.Conn, pt time.Duration) { + p.mu.Lock() + p.conn = c + p.stop = make(chan struct{}) + p.mu.Unlock() + checkTicker := time.NewTicker(pt / 4) + defer checkTicker.Stop() + for { + select { + case <-p.stop: + return + case <-checkTicker.C: + if atomic.LoadInt32(&p.pingOutstanding) > 0 && time.Since(p.lastPing) > (pt+pt>>1) { + p.pingFailHandler(fmt.Errorf("ping resp timed out")) + //ping outstanding and not reset in 1.5 times ping timer + return + } + if time.Since(p.lastPing) >= pt { + //time to send a ping + if _, err := packets.NewControlPacket(packets.PINGREQ).WriteTo(p.conn); err != nil { + if p.pingFailHandler != nil { + p.pingFailHandler(err) + } + return + } + atomic.AddInt32(&p.pingOutstanding, 1) + p.lastPing = time.Now() + p.debug.Println("pingHandler sending ping request") + } + } + } +} + +// Stop is the library provided Pinger's implementation of +// the required interface function() +func (p *PingHandler) Stop() { + p.mu.Lock() + defer p.mu.Unlock() + if p.stop == nil { + return + } + p.debug.Println("pingHandler stopping") + select { + case <-p.stop: + //Already stopped, do nothing + default: + close(p.stop) + } +} + +// PingResp is the library provided Pinger's implementation of +// the required interface function() +func (p *PingHandler) PingResp() { + p.debug.Println("pingHandler resetting pingOutstanding") + atomic.StoreInt32(&p.pingOutstanding, 0) +} + +// SetDebug sets the logger l to be used for printing debug +// information for the pinger +func (p *PingHandler) SetDebug(l Logger) { + p.debug = l +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/router.go b/vendor/github.com/eclipse/paho.golang/paho/router.go new file mode 100644 index 000000000..05031596f --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/router.go @@ -0,0 +1,212 @@ +package paho + +import ( + "strings" + "sync" + + "github.com/eclipse/paho.golang/packets" +) + +// MessageHandler is a type for a function that is invoked +// by a Router when it has received a Publish. +type MessageHandler func(*Publish) + +// Router is an interface of the functions for a struct that is +// used to handle invoking MessageHandlers depending on the +// the topic the message was published on. +// RegisterHandler() takes a string of the topic, and a MessageHandler +// to be invoked when Publishes are received that match that topic +// UnregisterHandler() takes a string of the topic to remove +// MessageHandlers for +// Route() takes a Publish message and determines which MessageHandlers +// should be invoked +type Router interface { + RegisterHandler(string, MessageHandler) + UnregisterHandler(string) + Route(*packets.Publish) + SetDebugLogger(Logger) +} + +// StandardRouter is a library provided implementation of a Router that +// allows for unique and multiple MessageHandlers per topic +type StandardRouter struct { + sync.RWMutex + subscriptions map[string][]MessageHandler + aliases map[uint16]string + debug Logger +} + +// NewStandardRouter instantiates and returns an instance of a StandardRouter +func NewStandardRouter() *StandardRouter { + return &StandardRouter{ + subscriptions: make(map[string][]MessageHandler), + aliases: make(map[uint16]string), + debug: NOOPLogger{}, + } +} + +// RegisterHandler is the library provided StandardRouter's +// implementation of the required interface function() +func (r *StandardRouter) RegisterHandler(topic string, h MessageHandler) { + r.debug.Println("registering handler for:", topic) + r.Lock() + defer r.Unlock() + + r.subscriptions[topic] = append(r.subscriptions[topic], h) +} + +// UnregisterHandler is the library provided StandardRouter's +// implementation of the required interface function() +func (r *StandardRouter) UnregisterHandler(topic string) { + r.debug.Println("unregistering handler for:", topic) + r.Lock() + defer r.Unlock() + + delete(r.subscriptions, topic) +} + +// Route is the library provided StandardRouter's implementation +// of the required interface function() +func (r *StandardRouter) Route(pb *packets.Publish) { + r.debug.Println("routing message for:", pb.Topic) + r.RLock() + defer r.RUnlock() + + m := PublishFromPacketPublish(pb) + + var topic string + if pb.Properties.TopicAlias != nil { + r.debug.Println("message is using topic aliasing") + if pb.Topic != "" { + //Register new alias + r.debug.Printf("registering new topic alias '%d' for topic '%s'", *pb.Properties.TopicAlias, m.Topic) + r.aliases[*pb.Properties.TopicAlias] = pb.Topic + } + if t, ok := r.aliases[*pb.Properties.TopicAlias]; ok { + r.debug.Printf("aliased topic '%d' translates to '%s'", *pb.Properties.TopicAlias, m.Topic) + topic = t + } + } else { + topic = m.Topic + } + + for route, handlers := range r.subscriptions { + if match(route, topic) { + r.debug.Println("found handler for:", route) + for _, handler := range handlers { + handler(m) + } + } + } +} + +// SetDebugLogger sets the logger l to be used for printing debug +// information for the router +func (r *StandardRouter) SetDebugLogger(l Logger) { + r.debug = l +} + +func match(route, topic string) bool { + return route == topic || routeIncludesTopic(route, topic) +} + +func matchDeep(route []string, topic []string) bool { + if len(route) == 0 { + return len(topic) == 0 + } + + if len(topic) == 0 { + return route[0] == "#" + } + + if route[0] == "#" { + return true + } + + if (route[0] == "+") || (route[0] == topic[0]) { + return matchDeep(route[1:], topic[1:]) + } + return false +} + +func routeIncludesTopic(route, topic string) bool { + return matchDeep(routeSplit(route), topicSplit(topic)) +} + +func routeSplit(route string) []string { + if len(route) == 0 { + return nil + } + var result []string + if strings.HasPrefix(route, "$share") { + result = strings.Split(route, "/")[2:] + } else { + result = strings.Split(route, "/") + } + return result +} + +func topicSplit(topic string) []string { + if len(topic) == 0 { + return nil + } + return strings.Split(topic, "/") +} + +// SingleHandlerRouter is a library provided implementation of a Router +// that stores only a single MessageHandler and invokes this MessageHandler +// for all received Publishes +type SingleHandlerRouter struct { + sync.Mutex + aliases map[uint16]string + handler MessageHandler + debug Logger +} + +// NewSingleHandlerRouter instantiates and returns an instance of a SingleHandlerRouter +func NewSingleHandlerRouter(h MessageHandler) *SingleHandlerRouter { + return &SingleHandlerRouter{ + aliases: make(map[uint16]string), + handler: h, + debug: NOOPLogger{}, + } +} + +// RegisterHandler is the library provided SingleHandlerRouter's +// implementation of the required interface function() +func (s *SingleHandlerRouter) RegisterHandler(topic string, h MessageHandler) { + s.debug.Println("registering handler for:", topic) + s.handler = h +} + +// UnregisterHandler is the library provided SingleHandlerRouter's +// implementation of the required interface function() +func (s *SingleHandlerRouter) UnregisterHandler(topic string) {} + +// Route is the library provided SingleHandlerRouter's +// implementation of the required interface function() +func (s *SingleHandlerRouter) Route(pb *packets.Publish) { + m := PublishFromPacketPublish(pb) + + s.debug.Println("routing message for:", m.Topic) + + if pb.Properties.TopicAlias != nil { + s.debug.Println("message is using topic aliasing") + if pb.Topic != "" { + //Register new alias + s.debug.Printf("registering new topic alias '%d' for topic '%s'", *pb.Properties.TopicAlias, m.Topic) + s.aliases[*pb.Properties.TopicAlias] = pb.Topic + } + if t, ok := s.aliases[*pb.Properties.TopicAlias]; ok { + s.debug.Printf("aliased topic '%d' translates to '%s'", *pb.Properties.TopicAlias, m.Topic) + m.Topic = t + } + } + s.handler(m) +} + +// SetDebugLogger sets the logger l to be used for printing debug +// information for the router +func (s *SingleHandlerRouter) SetDebugLogger(l Logger) { + s.debug = l +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/trace.go b/vendor/github.com/eclipse/paho.golang/paho/trace.go new file mode 100644 index 000000000..586c92398 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/trace.go @@ -0,0 +1,22 @@ +package paho + +type ( + // Logger interface allows implementations to provide to this package any + // object that implements the methods defined in it. + Logger interface { + Println(v ...interface{}) + Printf(format string, v ...interface{}) + } + + // NOOPLogger implements the logger that does not perform any operation + // by default. This allows us to efficiently discard the unwanted messages. + NOOPLogger struct{} +) + +// Println is the library provided NOOPLogger's +// implementation of the required interface function() +func (NOOPLogger) Println(v ...interface{}) {} + +// Printf is the library provided NOOPLogger's +// implementation of the required interface function(){} +func (NOOPLogger) Printf(format string, v ...interface{}) {} diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore new file mode 100644 index 000000000..cd3fcd1ef --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +.idea/ +*.iml diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS new file mode 100644 index 000000000..1931f4006 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/AUTHORS @@ -0,0 +1,9 @@ +# This is the official list of Gorilla WebSocket authors for copyright +# purposes. +# +# Please keep the list sorted. + +Gary Burd +Google LLC (https://opensource.google.com/) +Joachim Bauch + diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE new file mode 100644 index 000000000..9171c9722 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md new file mode 100644 index 000000000..2517a2871 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/README.md @@ -0,0 +1,39 @@ +# Gorilla WebSocket + +[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) +[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket) + +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the +[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. + + +--- + +⚠️ **[The Gorilla WebSocket Package is looking for a new maintainer](https://github.com/gorilla/websocket/issues/370)** + +--- + +### Documentation + +* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc) +* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) +* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) +* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) +* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) + +### Status + +The Gorilla WebSocket package provides a complete and tested implementation of +the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The +package API is stable. + +### Installation + + go get github.com/gorilla/websocket + +### Protocol Compliance + +The Gorilla WebSocket package passes the server tests in the [Autobahn Test +Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn +subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). + diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go new file mode 100644 index 000000000..2efd83555 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client.go @@ -0,0 +1,422 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "context" + "crypto/tls" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptrace" + "net/url" + "strings" + "time" +) + +// ErrBadHandshake is returned when the server response to opening handshake is +// invalid. +var ErrBadHandshake = errors.New("websocket: bad handshake") + +var errInvalidCompression = errors.New("websocket: invalid compression negotiation") + +// NewClient creates a new client connection using the given net connection. +// The URL u specifies the host and request URI. Use requestHeader to specify +// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies +// (Cookie). Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etc. +// +// Deprecated: Use Dialer instead. +func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { + d := Dialer{ + ReadBufferSize: readBufSize, + WriteBufferSize: writeBufSize, + NetDial: func(net, addr string) (net.Conn, error) { + return netConn, nil + }, + } + return d.Dial(u.String(), requestHeader) +} + +// A Dialer contains options for connecting to WebSocket server. +// +// It is safe to call Dialer's methods concurrently. +type Dialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // NetDialContext specifies the dial function for creating TCP connections. If + // NetDialContext is nil, NetDial is used. + NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error) + + // NetDialTLSContext specifies the dial function for creating TLS/TCP connections. If + // NetDialTLSContext is nil, NetDialContext is used. + // If NetDialTLSContext is set, Dial assumes the TLS handshake is done there and + // TLSClientConfig is ignored. + NetDialTLSContext func(ctx context.Context, network, addr string) (net.Conn, error) + + // Proxy specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxy func(*http.Request) (*url.URL, error) + + // TLSClientConfig specifies the TLS configuration to use with tls.Client. + // If nil, the default configuration is used. + // If either NetDialTLS or NetDialTLSContext are set, Dial assumes the TLS handshake + // is done there and TLSClientConfig is ignored. + TLSClientConfig *tls.Config + + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer + // size is zero, then a useful default size is used. The I/O buffer sizes + // do not limit the size of the messages that can be sent or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the client's requested subprotocols. + Subprotocols []string + + // EnableCompression specifies if the client should attempt to negotiate + // per message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool + + // Jar specifies the cookie jar. + // If Jar is nil, cookies are not sent in requests and ignored + // in responses. + Jar http.CookieJar +} + +// Dial creates a new client connection by calling DialContext with a background context. +func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + return d.DialContext(context.Background(), urlStr, requestHeader) +} + +var errMalformedURL = errors.New("malformed ws or wss URL") + +func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { + hostPort = u.Host + hostNoPort = u.Host + if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { + hostNoPort = hostNoPort[:i] + } else { + switch u.Scheme { + case "wss": + hostPort += ":443" + case "https": + hostPort += ":443" + default: + hostPort += ":80" + } + } + return hostPort, hostNoPort +} + +// DefaultDialer is a dialer with all fields set to the default values. +var DefaultDialer = &Dialer{ + Proxy: http.ProxyFromEnvironment, + HandshakeTimeout: 45 * time.Second, +} + +// nilDialer is dialer to use when receiver is nil. +var nilDialer = *DefaultDialer + +// DialContext creates a new client connection. Use requestHeader to specify the +// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). +// Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// The context will be used in the request and in the Dialer. +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etcetera. The response body may not contain the entire response and does not +// need to be closed by the application. +func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + if d == nil { + d = &nilDialer + } + + challengeKey, err := generateChallengeKey() + if err != nil { + return nil, nil, err + } + + u, err := url.Parse(urlStr) + if err != nil { + return nil, nil, err + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + default: + return nil, nil, errMalformedURL + } + + if u.User != nil { + // User name and password are not allowed in websocket URIs. + return nil, nil, errMalformedURL + } + + req := &http.Request{ + Method: http.MethodGet, + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: u.Host, + } + req = req.WithContext(ctx) + + // Set the cookies present in the cookie jar of the dialer + if d.Jar != nil { + for _, cookie := range d.Jar.Cookies(u) { + req.AddCookie(cookie) + } + } + + // Set the request headers using the capitalization for names and values in + // RFC examples. Although the capitalization shouldn't matter, there are + // servers that depend on it. The Header.Set method is not used because the + // method canonicalizes the header names. + req.Header["Upgrade"] = []string{"websocket"} + req.Header["Connection"] = []string{"Upgrade"} + req.Header["Sec-WebSocket-Key"] = []string{challengeKey} + req.Header["Sec-WebSocket-Version"] = []string{"13"} + if len(d.Subprotocols) > 0 { + req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} + } + for k, vs := range requestHeader { + switch { + case k == "Host": + if len(vs) > 0 { + req.Host = vs[0] + } + case k == "Upgrade" || + k == "Connection" || + k == "Sec-Websocket-Key" || + k == "Sec-Websocket-Version" || + k == "Sec-Websocket-Extensions" || + (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): + return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) + case k == "Sec-Websocket-Protocol": + req.Header["Sec-WebSocket-Protocol"] = vs + default: + req.Header[k] = vs + } + } + + if d.EnableCompression { + req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"} + } + + if d.HandshakeTimeout != 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout) + defer cancel() + } + + // Get network dial function. + var netDial func(network, add string) (net.Conn, error) + + switch u.Scheme { + case "http": + if d.NetDialContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialContext(ctx, network, addr) + } + } else if d.NetDial != nil { + netDial = d.NetDial + } + case "https": + if d.NetDialTLSContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialTLSContext(ctx, network, addr) + } + } else if d.NetDialContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialContext(ctx, network, addr) + } + } else if d.NetDial != nil { + netDial = d.NetDial + } + default: + return nil, nil, errMalformedURL + } + + if netDial == nil { + netDialer := &net.Dialer{} + netDial = func(network, addr string) (net.Conn, error) { + return netDialer.DialContext(ctx, network, addr) + } + } + + // If needed, wrap the dial function to set the connection deadline. + if deadline, ok := ctx.Deadline(); ok { + forwardDial := netDial + netDial = func(network, addr string) (net.Conn, error) { + c, err := forwardDial(network, addr) + if err != nil { + return nil, err + } + err = c.SetDeadline(deadline) + if err != nil { + c.Close() + return nil, err + } + return c, nil + } + } + + // If needed, wrap the dial function to connect through a proxy. + if d.Proxy != nil { + proxyURL, err := d.Proxy(req) + if err != nil { + return nil, nil, err + } + if proxyURL != nil { + dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) + if err != nil { + return nil, nil, err + } + netDial = dialer.Dial + } + } + + hostPort, hostNoPort := hostPortNoPort(u) + trace := httptrace.ContextClientTrace(ctx) + if trace != nil && trace.GetConn != nil { + trace.GetConn(hostPort) + } + + netConn, err := netDial("tcp", hostPort) + if trace != nil && trace.GotConn != nil { + trace.GotConn(httptrace.GotConnInfo{ + Conn: netConn, + }) + } + if err != nil { + return nil, nil, err + } + + defer func() { + if netConn != nil { + netConn.Close() + } + }() + + if u.Scheme == "https" && d.NetDialTLSContext == nil { + // If NetDialTLSContext is set, assume that the TLS handshake has already been done + + cfg := cloneTLSConfig(d.TLSClientConfig) + if cfg.ServerName == "" { + cfg.ServerName = hostNoPort + } + tlsConn := tls.Client(netConn, cfg) + netConn = tlsConn + + if trace != nil && trace.TLSHandshakeStart != nil { + trace.TLSHandshakeStart() + } + err := doHandshake(ctx, tlsConn, cfg) + if trace != nil && trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(tlsConn.ConnectionState(), err) + } + + if err != nil { + return nil, nil, err + } + } + + conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil) + + if err := req.Write(netConn); err != nil { + return nil, nil, err + } + + if trace != nil && trace.GotFirstResponseByte != nil { + if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 { + trace.GotFirstResponseByte() + } + } + + resp, err := http.ReadResponse(conn.br, req) + if err != nil { + return nil, nil, err + } + + if d.Jar != nil { + if rc := resp.Cookies(); len(rc) > 0 { + d.Jar.SetCookies(u, rc) + } + } + + if resp.StatusCode != 101 || + !tokenListContainsValue(resp.Header, "Upgrade", "websocket") || + !tokenListContainsValue(resp.Header, "Connection", "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { + // Before closing the network connection on return from this + // function, slurp up some of the response to aid application + // debugging. + buf := make([]byte, 1024) + n, _ := io.ReadFull(resp.Body, buf) + resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + return nil, resp, ErrBadHandshake + } + + for _, ext := range parseExtensions(resp.Header) { + if ext[""] != "permessage-deflate" { + continue + } + _, snct := ext["server_no_context_takeover"] + _, cnct := ext["client_no_context_takeover"] + if !snct || !cnct { + return nil, resp, errInvalidCompression + } + conn.newCompressionWriter = compressNoContextTakeover + conn.newDecompressionReader = decompressNoContextTakeover + break + } + + resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") + + netConn.SetDeadline(time.Time{}) + netConn = nil // to avoid close in defer. + return conn, resp, nil +} + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return cfg.Clone() +} diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go new file mode 100644 index 000000000..813ffb1e8 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -0,0 +1,148 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "compress/flate" + "errors" + "io" + "strings" + "sync" +) + +const ( + minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 + maxCompressionLevel = flate.BestCompression + defaultCompressionLevel = 1 +) + +var ( + flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool + flateReaderPool = sync.Pool{New: func() interface{} { + return flate.NewReader(nil) + }} +) + +func decompressNoContextTakeover(r io.Reader) io.ReadCloser { + const tail = + // Add four bytes as specified in RFC + "\x00\x00\xff\xff" + + // Add final block to squelch unexpected EOF error from flate reader. + "\x01\x00\x00\xff\xff" + + fr, _ := flateReaderPool.Get().(io.ReadCloser) + fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) + return &flateReadWrapper{fr} +} + +func isValidCompressionLevel(level int) bool { + return minCompressionLevel <= level && level <= maxCompressionLevel +} + +func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { + p := &flateWriterPools[level-minCompressionLevel] + tw := &truncWriter{w: w} + fw, _ := p.Get().(*flate.Writer) + if fw == nil { + fw, _ = flate.NewWriter(tw, level) + } else { + fw.Reset(tw) + } + return &flateWriteWrapper{fw: fw, tw: tw, p: p} +} + +// truncWriter is an io.Writer that writes all but the last four bytes of the +// stream to another io.Writer. +type truncWriter struct { + w io.WriteCloser + n int + p [4]byte +} + +func (w *truncWriter) Write(p []byte) (int, error) { + n := 0 + + // fill buffer first for simplicity. + if w.n < len(w.p) { + n = copy(w.p[w.n:], p) + p = p[n:] + w.n += n + if len(p) == 0 { + return n, nil + } + } + + m := len(p) + if m > len(w.p) { + m = len(w.p) + } + + if nn, err := w.w.Write(w.p[:m]); err != nil { + return n + nn, err + } + + copy(w.p[:], w.p[m:]) + copy(w.p[len(w.p)-m:], p[len(p)-m:]) + nn, err := w.w.Write(p[:len(p)-m]) + return n + nn, err +} + +type flateWriteWrapper struct { + fw *flate.Writer + tw *truncWriter + p *sync.Pool +} + +func (w *flateWriteWrapper) Write(p []byte) (int, error) { + if w.fw == nil { + return 0, errWriteClosed + } + return w.fw.Write(p) +} + +func (w *flateWriteWrapper) Close() error { + if w.fw == nil { + return errWriteClosed + } + err1 := w.fw.Flush() + w.p.Put(w.fw) + w.fw = nil + if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { + return errors.New("websocket: internal error, unexpected bytes at end of flate stream") + } + err2 := w.tw.w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +type flateReadWrapper struct { + fr io.ReadCloser +} + +func (r *flateReadWrapper) Read(p []byte) (int, error) { + if r.fr == nil { + return 0, io.ErrClosedPipe + } + n, err := r.fr.Read(p) + if err == io.EOF { + // Preemptively place the reader back in the pool. This helps with + // scenarios where the application does not call NextReader() soon after + // this final read. + r.Close() + } + return n, err +} + +func (r *flateReadWrapper) Close() error { + if r.fr == nil { + return io.ErrClosedPipe + } + err := r.fr.Close() + flateReaderPool.Put(r.fr) + r.fr = nil + return err +} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go new file mode 100644 index 000000000..331eebc85 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -0,0 +1,1230 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/binary" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" +) + +const ( + // Frame header byte 0 bits from Section 5.2 of RFC 6455 + finalBit = 1 << 7 + rsv1Bit = 1 << 6 + rsv2Bit = 1 << 5 + rsv3Bit = 1 << 4 + + // Frame header byte 1 bits from Section 5.2 of RFC 6455 + maskBit = 1 << 7 + + maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask + maxControlFramePayloadSize = 125 + + writeWait = time.Second + + defaultReadBufferSize = 4096 + defaultWriteBufferSize = 4096 + + continuationFrame = 0 + noFrame = -1 +) + +// Close codes defined in RFC 6455, section 11.7. +const ( + CloseNormalClosure = 1000 + CloseGoingAway = 1001 + CloseProtocolError = 1002 + CloseUnsupportedData = 1003 + CloseNoStatusReceived = 1005 + CloseAbnormalClosure = 1006 + CloseInvalidFramePayloadData = 1007 + ClosePolicyViolation = 1008 + CloseMessageTooBig = 1009 + CloseMandatoryExtension = 1010 + CloseInternalServerErr = 1011 + CloseServiceRestart = 1012 + CloseTryAgainLater = 1013 + CloseTLSHandshake = 1015 +) + +// The message types are defined in RFC 6455, section 11.8. +const ( + // TextMessage denotes a text data message. The text message payload is + // interpreted as UTF-8 encoded text data. + TextMessage = 1 + + // BinaryMessage denotes a binary data message. + BinaryMessage = 2 + + // CloseMessage denotes a close control message. The optional message + // payload contains a numeric code and text. Use the FormatCloseMessage + // function to format a close message payload. + CloseMessage = 8 + + // PingMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PingMessage = 9 + + // PongMessage denotes a pong control message. The optional message payload + // is UTF-8 encoded text. + PongMessage = 10 +) + +// ErrCloseSent is returned when the application writes a message to the +// connection after sending a close message. +var ErrCloseSent = errors.New("websocket: close sent") + +// ErrReadLimit is returned when reading a message that is larger than the +// read limit set for the connection. +var ErrReadLimit = errors.New("websocket: read limit exceeded") + +// netError satisfies the net Error interface. +type netError struct { + msg string + temporary bool + timeout bool +} + +func (e *netError) Error() string { return e.msg } +func (e *netError) Temporary() bool { return e.temporary } +func (e *netError) Timeout() bool { return e.timeout } + +// CloseError represents a close message. +type CloseError struct { + // Code is defined in RFC 6455, section 11.7. + Code int + + // Text is the optional text payload. + Text string +} + +func (e *CloseError) Error() string { + s := []byte("websocket: close ") + s = strconv.AppendInt(s, int64(e.Code), 10) + switch e.Code { + case CloseNormalClosure: + s = append(s, " (normal)"...) + case CloseGoingAway: + s = append(s, " (going away)"...) + case CloseProtocolError: + s = append(s, " (protocol error)"...) + case CloseUnsupportedData: + s = append(s, " (unsupported data)"...) + case CloseNoStatusReceived: + s = append(s, " (no status)"...) + case CloseAbnormalClosure: + s = append(s, " (abnormal closure)"...) + case CloseInvalidFramePayloadData: + s = append(s, " (invalid payload data)"...) + case ClosePolicyViolation: + s = append(s, " (policy violation)"...) + case CloseMessageTooBig: + s = append(s, " (message too big)"...) + case CloseMandatoryExtension: + s = append(s, " (mandatory extension missing)"...) + case CloseInternalServerErr: + s = append(s, " (internal server error)"...) + case CloseTLSHandshake: + s = append(s, " (TLS handshake error)"...) + } + if e.Text != "" { + s = append(s, ": "...) + s = append(s, e.Text...) + } + return string(s) +} + +// IsCloseError returns boolean indicating whether the error is a *CloseError +// with one of the specified codes. +func IsCloseError(err error, codes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range codes { + if e.Code == code { + return true + } + } + } + return false +} + +// IsUnexpectedCloseError returns boolean indicating whether the error is a +// *CloseError with a code not in the list of expected codes. +func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range expectedCodes { + if e.Code == code { + return false + } + } + return true + } + return false +} + +var ( + errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} + errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} + errBadWriteOpCode = errors.New("websocket: bad write message type") + errWriteClosed = errors.New("websocket: write closed") + errInvalidControlFrame = errors.New("websocket: invalid control frame") +) + +func newMaskKey() [4]byte { + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} +} + +func hideTempErr(err error) error { + if e, ok := err.(net.Error); ok && e.Temporary() { + err = &netError{msg: e.Error(), timeout: e.Timeout()} + } + return err +} + +func isControl(frameType int) bool { + return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage +} + +func isData(frameType int) bool { + return frameType == TextMessage || frameType == BinaryMessage +} + +var validReceivedCloseCodes = map[int]bool{ + // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number + + CloseNormalClosure: true, + CloseGoingAway: true, + CloseProtocolError: true, + CloseUnsupportedData: true, + CloseNoStatusReceived: false, + CloseAbnormalClosure: false, + CloseInvalidFramePayloadData: true, + ClosePolicyViolation: true, + CloseMessageTooBig: true, + CloseMandatoryExtension: true, + CloseInternalServerErr: true, + CloseServiceRestart: true, + CloseTryAgainLater: true, + CloseTLSHandshake: false, +} + +func isValidReceivedCloseCode(code int) bool { + return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) +} + +// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this +// interface. The type of the value stored in a pool is not specified. +type BufferPool interface { + // Get gets a value from the pool or returns nil if the pool is empty. + Get() interface{} + // Put adds a value to the pool. + Put(interface{}) +} + +// writePoolData is the type added to the write buffer pool. This wrapper is +// used to prevent applications from peeking at and depending on the values +// added to the pool. +type writePoolData struct{ buf []byte } + +// The Conn type represents a WebSocket connection. +type Conn struct { + conn net.Conn + isServer bool + subprotocol string + + // Write fields + mu chan struct{} // used as mutex to protect write to conn + writeBuf []byte // frame is constructed in this buffer. + writePool BufferPool + writeBufSize int + writeDeadline time.Time + writer io.WriteCloser // the current writer returned to the application + isWriting bool // for best-effort concurrent write detection + + writeErrMu sync.Mutex + writeErr error + + enableWriteCompression bool + compressionLevel int + newCompressionWriter func(io.WriteCloser, int) io.WriteCloser + + // Read fields + reader io.ReadCloser // the current reader returned to the application + readErr error + br *bufio.Reader + // bytes remaining in current frame. + // set setReadRemaining to safely update this value and prevent overflow + readRemaining int64 + readFinal bool // true the current message has more frames. + readLength int64 // Message size. + readLimit int64 // Maximum message size. + readMaskPos int + readMaskKey [4]byte + handlePong func(string) error + handlePing func(string) error + handleClose func(int, string) error + readErrCount int + messageReader *messageReader // the current low-level reader + + readDecompress bool // whether last read frame had RSV1 set + newDecompressionReader func(io.Reader) io.ReadCloser +} + +func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn { + + if br == nil { + if readBufferSize == 0 { + readBufferSize = defaultReadBufferSize + } else if readBufferSize < maxControlFramePayloadSize { + // must be large enough for control frame + readBufferSize = maxControlFramePayloadSize + } + br = bufio.NewReaderSize(conn, readBufferSize) + } + + if writeBufferSize <= 0 { + writeBufferSize = defaultWriteBufferSize + } + writeBufferSize += maxFrameHeaderSize + + if writeBuf == nil && writeBufferPool == nil { + writeBuf = make([]byte, writeBufferSize) + } + + mu := make(chan struct{}, 1) + mu <- struct{}{} + c := &Conn{ + isServer: isServer, + br: br, + conn: conn, + mu: mu, + readFinal: true, + writeBuf: writeBuf, + writePool: writeBufferPool, + writeBufSize: writeBufferSize, + enableWriteCompression: true, + compressionLevel: defaultCompressionLevel, + } + c.SetCloseHandler(nil) + c.SetPingHandler(nil) + c.SetPongHandler(nil) + return c +} + +// setReadRemaining tracks the number of bytes remaining on the connection. If n +// overflows, an ErrReadLimit is returned. +func (c *Conn) setReadRemaining(n int64) error { + if n < 0 { + return ErrReadLimit + } + + c.readRemaining = n + return nil +} + +// Subprotocol returns the negotiated protocol for the connection. +func (c *Conn) Subprotocol() string { + return c.subprotocol +} + +// Close closes the underlying network connection without sending or waiting +// for a close message. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// Write methods + +func (c *Conn) writeFatal(err error) error { + err = hideTempErr(err) + c.writeErrMu.Lock() + if c.writeErr == nil { + c.writeErr = err + } + c.writeErrMu.Unlock() + return err +} + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + c.br.Discard(len(p)) + return p, err +} + +func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error { + <-c.mu + defer func() { c.mu <- struct{}{} }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + if len(buf1) == 0 { + _, err = c.conn.Write(buf0) + } else { + err = c.writeBufs(buf0, buf1) + } + if err != nil { + return c.writeFatal(err) + } + if frameType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return nil +} + +func (c *Conn) writeBufs(bufs ...[]byte) error { + b := net.Buffers(bufs) + _, err := b.WriteTo(c.conn) + return err +} + +// WriteControl writes a control message with the given deadline. The allowed +// message types are CloseMessage, PingMessage and PongMessage. +func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { + if !isControl(messageType) { + return errBadWriteOpCode + } + if len(data) > maxControlFramePayloadSize { + return errInvalidControlFrame + } + + b0 := byte(messageType) | finalBit + b1 := byte(len(data)) + if !c.isServer { + b1 |= maskBit + } + + buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) + buf = append(buf, b0, b1) + + if c.isServer { + buf = append(buf, data...) + } else { + key := newMaskKey() + buf = append(buf, key[:]...) + buf = append(buf, data...) + maskBytes(key, 0, buf[6:]) + } + + d := 1000 * time.Hour + if !deadline.IsZero() { + d = deadline.Sub(time.Now()) + if d < 0 { + return errWriteTimeout + } + } + + timer := time.NewTimer(d) + select { + case <-c.mu: + timer.Stop() + case <-timer.C: + return errWriteTimeout + } + defer func() { c.mu <- struct{}{} }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + _, err = c.conn.Write(buf) + if err != nil { + return c.writeFatal(err) + } + if messageType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return err +} + +// beginMessage prepares a connection and message writer for a new message. +func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { + // Close previous writer if not already closed by the application. It's + // probably better to return an error in this situation, but we cannot + // change this without breaking existing applications. + if c.writer != nil { + c.writer.Close() + c.writer = nil + } + + if !isControl(messageType) && !isData(messageType) { + return errBadWriteOpCode + } + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + mw.c = c + mw.frameType = messageType + mw.pos = maxFrameHeaderSize + + if c.writeBuf == nil { + wpd, ok := c.writePool.Get().(writePoolData) + if ok { + c.writeBuf = wpd.buf + } else { + c.writeBuf = make([]byte, c.writeBufSize) + } + } + return nil +} + +// NextWriter returns a writer for the next message to send. The writer's Close +// method flushes the complete message to the network. +// +// There can be at most one open writer on a connection. NextWriter closes the +// previous writer if the application has not already done so. +// +// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and +// PongMessage) are supported. +func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { + return nil, err + } + c.writer = &mw + if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { + w := c.newCompressionWriter(c.writer, c.compressionLevel) + mw.compress = true + c.writer = w + } + return c.writer, nil +} + +type messageWriter struct { + c *Conn + compress bool // whether next call to flushFrame should set RSV1 + pos int // end of data in writeBuf. + frameType int // type of the current frame. + err error +} + +func (w *messageWriter) endMessage(err error) error { + if w.err != nil { + return err + } + c := w.c + w.err = err + c.writer = nil + if c.writePool != nil { + c.writePool.Put(writePoolData{buf: c.writeBuf}) + c.writeBuf = nil + } + return err +} + +// flushFrame writes buffered data and extra as a frame to the network. The +// final argument indicates that this is the last frame in the message. +func (w *messageWriter) flushFrame(final bool, extra []byte) error { + c := w.c + length := w.pos - maxFrameHeaderSize + len(extra) + + // Check for invalid control frames. + if isControl(w.frameType) && + (!final || length > maxControlFramePayloadSize) { + return w.endMessage(errInvalidControlFrame) + } + + b0 := byte(w.frameType) + if final { + b0 |= finalBit + } + if w.compress { + b0 |= rsv1Bit + } + w.compress = false + + b1 := byte(0) + if !c.isServer { + b1 |= maskBit + } + + // Assume that the frame starts at beginning of c.writeBuf. + framePos := 0 + if c.isServer { + // Adjust up if mask not included in the header. + framePos = 4 + } + + switch { + case length >= 65536: + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 127 + binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) + case length > 125: + framePos += 6 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 126 + binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) + default: + framePos += 8 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | byte(length) + } + + if !c.isServer { + key := newMaskKey() + copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) + if len(extra) > 0 { + return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode"))) + } + } + + // Write the buffers to the connection with best-effort detection of + // concurrent writes. See the concurrency section in the package + // documentation for more info. + + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + + err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) + + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + + if err != nil { + return w.endMessage(err) + } + + if final { + w.endMessage(errWriteClosed) + return nil + } + + // Setup for next frame. + w.pos = maxFrameHeaderSize + w.frameType = continuationFrame + return nil +} + +func (w *messageWriter) ncopy(max int) (int, error) { + n := len(w.c.writeBuf) - w.pos + if n <= 0 { + if err := w.flushFrame(false, nil); err != nil { + return 0, err + } + n = len(w.c.writeBuf) - w.pos + } + if n > max { + n = max + } + return n, nil +} + +func (w *messageWriter) Write(p []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + + if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { + // Don't buffer large messages. + err := w.flushFrame(false, p) + if err != nil { + return 0, err + } + return len(p), nil + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) WriteString(p string) (int, error) { + if w.err != nil { + return 0, w.err + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { + if w.err != nil { + return 0, w.err + } + for { + if w.pos == len(w.c.writeBuf) { + err = w.flushFrame(false, nil) + if err != nil { + break + } + } + var n int + n, err = r.Read(w.c.writeBuf[w.pos:]) + w.pos += n + nn += int64(n) + if err != nil { + if err == io.EOF { + err = nil + } + break + } + } + return nn, err +} + +func (w *messageWriter) Close() error { + if w.err != nil { + return w.err + } + return w.flushFrame(true, nil) +} + +// WritePreparedMessage writes prepared message into connection. +func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { + frameType, frameData, err := pm.frame(prepareKey{ + isServer: c.isServer, + compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), + compressionLevel: c.compressionLevel, + }) + if err != nil { + return err + } + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + err = c.write(frameType, c.writeDeadline, frameData, nil) + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + return err +} + +// WriteMessage is a helper method for getting a writer using NextWriter, +// writing the message and closing the writer. +func (c *Conn) WriteMessage(messageType int, data []byte) error { + + if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { + // Fast path with no allocations and single frame. + + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { + return err + } + n := copy(c.writeBuf[mw.pos:], data) + mw.pos += n + data = data[n:] + return mw.flushFrame(true, data) + } + + w, err := c.NextWriter(messageType) + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + return w.Close() +} + +// SetWriteDeadline sets the write deadline on the underlying network +// connection. After a write has timed out, the websocket state is corrupt and +// all future writes will return an error. A zero value for t means writes will +// not time out. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = t + return nil +} + +// Read methods + +func (c *Conn) advanceFrame() (int, error) { + // 1. Skip remainder of previous frame. + + if c.readRemaining > 0 { + if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { + return noFrame, err + } + } + + // 2. Read and parse first two bytes of frame header. + // To aid debugging, collect and report all errors in the first two bytes + // of the header. + + var errors []string + + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + frameType := int(p[0] & 0xf) + final := p[0]&finalBit != 0 + rsv1 := p[0]&rsv1Bit != 0 + rsv2 := p[0]&rsv2Bit != 0 + rsv3 := p[0]&rsv3Bit != 0 + mask := p[1]&maskBit != 0 + c.setReadRemaining(int64(p[1] & 0x7f)) + + c.readDecompress = false + if rsv1 { + if c.newDecompressionReader != nil { + c.readDecompress = true + } else { + errors = append(errors, "RSV1 set") + } + } + + if rsv2 { + errors = append(errors, "RSV2 set") + } + + if rsv3 { + errors = append(errors, "RSV3 set") + } + + switch frameType { + case CloseMessage, PingMessage, PongMessage: + if c.readRemaining > maxControlFramePayloadSize { + errors = append(errors, "len > 125 for control") + } + if !final { + errors = append(errors, "FIN not set on control") + } + case TextMessage, BinaryMessage: + if !c.readFinal { + errors = append(errors, "data before FIN") + } + c.readFinal = final + case continuationFrame: + if c.readFinal { + errors = append(errors, "continuation after FIN") + } + c.readFinal = final + default: + errors = append(errors, "bad opcode "+strconv.Itoa(frameType)) + } + + if mask != c.isServer { + errors = append(errors, "bad MASK") + } + + if len(errors) > 0 { + return noFrame, c.handleProtocolError(strings.Join(errors, ", ")) + } + + // 3. Read and parse frame length as per + // https://tools.ietf.org/html/rfc6455#section-5.2 + // + // The length of the "Payload data", in bytes: if 0-125, that is the payload + // length. + // - If 126, the following 2 bytes interpreted as a 16-bit unsigned + // integer are the payload length. + // - If 127, the following 8 bytes interpreted as + // a 64-bit unsigned integer (the most significant bit MUST be 0) are the + // payload length. Multibyte length quantities are expressed in network byte + // order. + + switch c.readRemaining { + case 126: + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + if err := c.setReadRemaining(int64(binary.BigEndian.Uint16(p))); err != nil { + return noFrame, err + } + case 127: + p, err := c.read(8) + if err != nil { + return noFrame, err + } + + if err := c.setReadRemaining(int64(binary.BigEndian.Uint64(p))); err != nil { + return noFrame, err + } + } + + // 4. Handle frame masking. + + if mask { + c.readMaskPos = 0 + p, err := c.read(len(c.readMaskKey)) + if err != nil { + return noFrame, err + } + copy(c.readMaskKey[:], p) + } + + // 5. For text and binary messages, enforce read limit and return. + + if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { + + c.readLength += c.readRemaining + // Don't allow readLength to overflow in the presence of a large readRemaining + // counter. + if c.readLength < 0 { + return noFrame, ErrReadLimit + } + + if c.readLimit > 0 && c.readLength > c.readLimit { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) + return noFrame, ErrReadLimit + } + + return frameType, nil + } + + // 6. Read control frame payload. + + var payload []byte + if c.readRemaining > 0 { + payload, err = c.read(int(c.readRemaining)) + c.setReadRemaining(0) + if err != nil { + return noFrame, err + } + if c.isServer { + maskBytes(c.readMaskKey, 0, payload) + } + } + + // 7. Process control frame payload. + + switch frameType { + case PongMessage: + if err := c.handlePong(string(payload)); err != nil { + return noFrame, err + } + case PingMessage: + if err := c.handlePing(string(payload)); err != nil { + return noFrame, err + } + case CloseMessage: + closeCode := CloseNoStatusReceived + closeText := "" + if len(payload) >= 2 { + closeCode = int(binary.BigEndian.Uint16(payload)) + if !isValidReceivedCloseCode(closeCode) { + return noFrame, c.handleProtocolError("bad close code " + strconv.Itoa(closeCode)) + } + closeText = string(payload[2:]) + if !utf8.ValidString(closeText) { + return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") + } + } + if err := c.handleClose(closeCode, closeText); err != nil { + return noFrame, err + } + return noFrame, &CloseError{Code: closeCode, Text: closeText} + } + + return frameType, nil +} + +func (c *Conn) handleProtocolError(message string) error { + data := FormatCloseMessage(CloseProtocolError, message) + if len(data) > maxControlFramePayloadSize { + data = data[:maxControlFramePayloadSize] + } + c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)) + return errors.New("websocket: " + message) +} + +// NextReader returns the next data message received from the peer. The +// returned messageType is either TextMessage or BinaryMessage. +// +// There can be at most one open reader on a connection. NextReader discards +// the previous message if the application has not already consumed it. +// +// Applications must break out of the application's read loop when this method +// returns a non-nil error value. Errors returned from this method are +// permanent. Once this method returns a non-nil error, all subsequent calls to +// this method return the same error. +func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + // Close previous reader, only relevant for decompression. + if c.reader != nil { + c.reader.Close() + c.reader = nil + } + + c.messageReader = nil + c.readLength = 0 + + for c.readErr == nil { + frameType, err := c.advanceFrame() + if err != nil { + c.readErr = hideTempErr(err) + break + } + + if frameType == TextMessage || frameType == BinaryMessage { + c.messageReader = &messageReader{c} + c.reader = c.messageReader + if c.readDecompress { + c.reader = c.newDecompressionReader(c.reader) + } + return frameType, c.reader, nil + } + } + + // Applications that do handle the error returned from this method spin in + // tight loop on connection failure. To help application developers detect + // this error, panic on repeated reads to the failed connection. + c.readErrCount++ + if c.readErrCount >= 1000 { + panic("repeated read on failed websocket connection") + } + + return noFrame, nil, c.readErr +} + +type messageReader struct{ c *Conn } + +func (r *messageReader) Read(b []byte) (int, error) { + c := r.c + if c.messageReader != r { + return 0, io.EOF + } + + for c.readErr == nil { + + if c.readRemaining > 0 { + if int64(len(b)) > c.readRemaining { + b = b[:c.readRemaining] + } + n, err := c.br.Read(b) + c.readErr = hideTempErr(err) + if c.isServer { + c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) + } + rem := c.readRemaining + rem -= int64(n) + c.setReadRemaining(rem) + if c.readRemaining > 0 && c.readErr == io.EOF { + c.readErr = errUnexpectedEOF + } + return n, c.readErr + } + + if c.readFinal { + c.messageReader = nil + return 0, io.EOF + } + + frameType, err := c.advanceFrame() + switch { + case err != nil: + c.readErr = hideTempErr(err) + case frameType == TextMessage || frameType == BinaryMessage: + c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") + } + } + + err := c.readErr + if err == io.EOF && c.messageReader == r { + err = errUnexpectedEOF + } + return 0, err +} + +func (r *messageReader) Close() error { + return nil +} + +// ReadMessage is a helper method for getting a reader using NextReader and +// reading from that reader to a buffer. +func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { + var r io.Reader + messageType, r, err = c.NextReader() + if err != nil { + return messageType, nil, err + } + p, err = ioutil.ReadAll(r) + return messageType, p, err +} + +// SetReadDeadline sets the read deadline on the underlying network connection. +// After a read has timed out, the websocket connection state is corrupt and +// all future reads will return an error. A zero value for t means reads will +// not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetReadLimit sets the maximum size in bytes for a message read from the peer. If a +// message exceeds the limit, the connection sends a close message to the peer +// and returns ErrReadLimit to the application. +func (c *Conn) SetReadLimit(limit int64) { + c.readLimit = limit +} + +// CloseHandler returns the current close handler +func (c *Conn) CloseHandler() func(code int, text string) error { + return c.handleClose +} + +// SetCloseHandler sets the handler for close messages received from the peer. +// The code argument to h is the received close code or CloseNoStatusReceived +// if the close message is empty. The default close handler sends a close +// message back to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// close messages as described in the section on Control Messages above. +// +// The connection read methods return a CloseError when a close message is +// received. Most applications should handle close messages as part of their +// normal error handling. Applications should only set a close handler when the +// application must perform some action before sending a close message back to +// the peer. +func (c *Conn) SetCloseHandler(h func(code int, text string) error) { + if h == nil { + h = func(code int, text string) error { + message := FormatCloseMessage(code, "") + c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) + return nil + } + } + c.handleClose = h +} + +// PingHandler returns the current ping handler +func (c *Conn) PingHandler() func(appData string) error { + return c.handlePing +} + +// SetPingHandler sets the handler for ping messages received from the peer. +// The appData argument to h is the PING message application data. The default +// ping handler sends a pong to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// ping messages as described in the section on Control Messages above. +func (c *Conn) SetPingHandler(h func(appData string) error) { + if h == nil { + h = func(message string) error { + err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + if err == ErrCloseSent { + return nil + } else if e, ok := err.(net.Error); ok && e.Temporary() { + return nil + } + return err + } + } + c.handlePing = h +} + +// PongHandler returns the current pong handler +func (c *Conn) PongHandler() func(appData string) error { + return c.handlePong +} + +// SetPongHandler sets the handler for pong messages received from the peer. +// The appData argument to h is the PONG message application data. The default +// pong handler does nothing. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// pong messages as described in the section on Control Messages above. +func (c *Conn) SetPongHandler(h func(appData string) error) { + if h == nil { + h = func(string) error { return nil } + } + c.handlePong = h +} + +// UnderlyingConn returns the internal net.Conn. This can be used to further +// modifications to connection specific flags. +func (c *Conn) UnderlyingConn() net.Conn { + return c.conn +} + +// EnableWriteCompression enables and disables write compression of +// subsequent text and binary messages. This function is a noop if +// compression was not negotiated with the peer. +func (c *Conn) EnableWriteCompression(enable bool) { + c.enableWriteCompression = enable +} + +// SetCompressionLevel sets the flate compression level for subsequent text and +// binary messages. This function is a noop if compression was not negotiated +// with the peer. See the compress/flate package for a description of +// compression levels. +func (c *Conn) SetCompressionLevel(level int) error { + if !isValidCompressionLevel(level) { + return errors.New("websocket: invalid compression level") + } + c.compressionLevel = level + return nil +} + +// FormatCloseMessage formats closeCode and text as a WebSocket close message. +// An empty message is returned for code CloseNoStatusReceived. +func FormatCloseMessage(closeCode int, text string) []byte { + if closeCode == CloseNoStatusReceived { + // Return empty message because it's illegal to send + // CloseNoStatusReceived. Return non-nil value in case application + // checks for nil. + return []byte{} + } + buf := make([]byte, 2+len(text)) + binary.BigEndian.PutUint16(buf, uint16(closeCode)) + copy(buf[2:], text) + return buf +} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go new file mode 100644 index 000000000..8db0cef95 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/doc.go @@ -0,0 +1,227 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements the WebSocket protocol defined in RFC 6455. +// +// Overview +// +// The Conn type represents a WebSocket connection. A server application calls +// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: +// +// var upgrader = websocket.Upgrader{ +// ReadBufferSize: 1024, +// WriteBufferSize: 1024, +// } +// +// func handler(w http.ResponseWriter, r *http.Request) { +// conn, err := upgrader.Upgrade(w, r, nil) +// if err != nil { +// log.Println(err) +// return +// } +// ... Use conn to send and receive messages. +// } +// +// Call the connection's WriteMessage and ReadMessage methods to send and +// receive messages as a slice of bytes. This snippet of code shows how to echo +// messages using these methods: +// +// for { +// messageType, p, err := conn.ReadMessage() +// if err != nil { +// log.Println(err) +// return +// } +// if err := conn.WriteMessage(messageType, p); err != nil { +// log.Println(err) +// return +// } +// } +// +// In above snippet of code, p is a []byte and messageType is an int with value +// websocket.BinaryMessage or websocket.TextMessage. +// +// An application can also send and receive messages using the io.WriteCloser +// and io.Reader interfaces. To send a message, call the connection NextWriter +// method to get an io.WriteCloser, write the message to the writer and close +// the writer when done. To receive a message, call the connection NextReader +// method to get an io.Reader and read until io.EOF is returned. This snippet +// shows how to echo messages using the NextWriter and NextReader methods: +// +// for { +// messageType, r, err := conn.NextReader() +// if err != nil { +// return +// } +// w, err := conn.NextWriter(messageType) +// if err != nil { +// return err +// } +// if _, err := io.Copy(w, r); err != nil { +// return err +// } +// if err := w.Close(); err != nil { +// return err +// } +// } +// +// Data Messages +// +// The WebSocket protocol distinguishes between text and binary data messages. +// Text messages are interpreted as UTF-8 encoded text. The interpretation of +// binary messages is left to the application. +// +// This package uses the TextMessage and BinaryMessage integer constants to +// identify the two data message types. The ReadMessage and NextReader methods +// return the type of the received message. The messageType argument to the +// WriteMessage and NextWriter methods specifies the type of a sent message. +// +// It is the application's responsibility to ensure that text messages are +// valid UTF-8 encoded text. +// +// Control Messages +// +// The WebSocket protocol defines three types of control messages: close, ping +// and pong. Call the connection WriteControl, WriteMessage or NextWriter +// methods to send a control message to the peer. +// +// Connections handle received close messages by calling the handler function +// set with the SetCloseHandler method and by returning a *CloseError from the +// NextReader, ReadMessage or the message Read method. The default close +// handler sends a close message to the peer. +// +// Connections handle received ping messages by calling the handler function +// set with the SetPingHandler method. The default ping handler sends a pong +// message to the peer. +// +// Connections handle received pong messages by calling the handler function +// set with the SetPongHandler method. The default pong handler does nothing. +// If an application sends ping messages, then the application should set a +// pong handler to receive the corresponding pong. +// +// The control message handler functions are called from the NextReader, +// ReadMessage and message reader Read methods. The default close and ping +// handlers can block these methods for a short time when the handler writes to +// the connection. +// +// The application must read the connection to process close, ping and pong +// messages sent from the peer. If the application is not otherwise interested +// in messages from the peer, then the application should start a goroutine to +// read and discard messages from the peer. A simple example is: +// +// func readLoop(c *websocket.Conn) { +// for { +// if _, _, err := c.NextReader(); err != nil { +// c.Close() +// break +// } +// } +// } +// +// Concurrency +// +// Connections support one concurrent reader and one concurrent writer. +// +// Applications are responsible for ensuring that no more than one goroutine +// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, +// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and +// that no more than one goroutine calls the read methods (NextReader, +// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) +// concurrently. +// +// The Close and WriteControl methods can be called concurrently with all other +// methods. +// +// Origin Considerations +// +// Web browsers allow Javascript applications to open a WebSocket connection to +// any host. It's up to the server to enforce an origin policy using the Origin +// request header sent by the browser. +// +// The Upgrader calls the function specified in the CheckOrigin field to check +// the origin. If the CheckOrigin function returns false, then the Upgrade +// method fails the WebSocket handshake with HTTP status 403. +// +// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail +// the handshake if the Origin request header is present and the Origin host is +// not equal to the Host request header. +// +// The deprecated package-level Upgrade function does not perform origin +// checking. The application is responsible for checking the Origin header +// before calling the Upgrade function. +// +// Buffers +// +// Connections buffer network input and output to reduce the number +// of system calls when reading or writing messages. +// +// Write buffers are also used for constructing WebSocket frames. See RFC 6455, +// Section 5 for a discussion of message framing. A WebSocket frame header is +// written to the network each time a write buffer is flushed to the network. +// Decreasing the size of the write buffer can increase the amount of framing +// overhead on the connection. +// +// The buffer sizes in bytes are specified by the ReadBufferSize and +// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default +// size of 4096 when a buffer size field is set to zero. The Upgrader reuses +// buffers created by the HTTP server when a buffer size field is set to zero. +// The HTTP server buffers have a size of 4096 at the time of this writing. +// +// The buffer sizes do not limit the size of a message that can be read or +// written by a connection. +// +// Buffers are held for the lifetime of the connection by default. If the +// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the +// write buffer only when writing a message. +// +// Applications should tune the buffer sizes to balance memory use and +// performance. Increasing the buffer size uses more memory, but can reduce the +// number of system calls to read or write the network. In the case of writing, +// increasing the buffer size can reduce the number of frame headers written to +// the network. +// +// Some guidelines for setting buffer parameters are: +// +// Limit the buffer sizes to the maximum expected message size. Buffers larger +// than the largest message do not provide any benefit. +// +// Depending on the distribution of message sizes, setting the buffer size to +// a value less than the maximum expected message size can greatly reduce memory +// use with a small impact on performance. Here's an example: If 99% of the +// messages are smaller than 256 bytes and the maximum message size is 512 +// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls +// than a buffer size of 512 bytes. The memory savings is 50%. +// +// A write buffer pool is useful when the application has a modest number +// writes over a large number of connections. when buffers are pooled, a larger +// buffer size has a reduced impact on total memory use and has the benefit of +// reducing system calls and frame overhead. +// +// Compression EXPERIMENTAL +// +// Per message compression extensions (RFC 7692) are experimentally supported +// by this package in a limited capacity. Setting the EnableCompression option +// to true in Dialer or Upgrader will attempt to negotiate per message deflate +// support. +// +// var upgrader = websocket.Upgrader{ +// EnableCompression: true, +// } +// +// If compression was successfully negotiated with the connection's peer, any +// message received in compressed form will be automatically decompressed. +// All Read methods will return uncompressed bytes. +// +// Per message compression of messages written to a connection can be enabled +// or disabled by calling the corresponding Conn method: +// +// conn.EnableWriteCompression(false) +// +// Currently this package does not support compression with "context takeover". +// This means that messages must be compressed and decompressed in isolation, +// without retaining sliding window or dictionary state across messages. For +// more details refer to RFC 7692. +// +// Use of compression is experimental and may result in decreased performance. +package websocket diff --git a/vendor/github.com/gorilla/websocket/join.go b/vendor/github.com/gorilla/websocket/join.go new file mode 100644 index 000000000..c64f8c829 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/join.go @@ -0,0 +1,42 @@ +// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "io" + "strings" +) + +// JoinMessages concatenates received messages to create a single io.Reader. +// The string term is appended to each message. The returned reader does not +// support concurrent calls to the Read method. +func JoinMessages(c *Conn, term string) io.Reader { + return &joinReader{c: c, term: term} +} + +type joinReader struct { + c *Conn + term string + r io.Reader +} + +func (r *joinReader) Read(p []byte) (int, error) { + if r.r == nil { + var err error + _, r.r, err = r.c.NextReader() + if err != nil { + return 0, err + } + if r.term != "" { + r.r = io.MultiReader(r.r, strings.NewReader(r.term)) + } + } + n, err := r.r.Read(p) + if err == io.EOF { + err = nil + r.r = nil + } + return n, err +} diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go new file mode 100644 index 000000000..dc2c1f641 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/json.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "encoding/json" + "io" +) + +// WriteJSON writes the JSON encoding of v as a message. +// +// Deprecated: Use c.WriteJSON instead. +func WriteJSON(c *Conn, v interface{}) error { + return c.WriteJSON(v) +} + +// WriteJSON writes the JSON encoding of v as a message. +// +// See the documentation for encoding/json Marshal for details about the +// conversion of Go values to JSON. +func (c *Conn) WriteJSON(v interface{}) error { + w, err := c.NextWriter(TextMessage) + if err != nil { + return err + } + err1 := json.NewEncoder(w).Encode(v) + err2 := w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// Deprecated: Use c.ReadJSON instead. +func ReadJSON(c *Conn, v interface{}) error { + return c.ReadJSON(v) +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// See the documentation for the encoding/json Unmarshal function for details +// about the conversion of JSON to a Go value. +func (c *Conn) ReadJSON(v interface{}) error { + _, r, err := c.NextReader() + if err != nil { + return err + } + err = json.NewDecoder(r).Decode(v) + if err == io.EOF { + // One value is expected in the message. + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go new file mode 100644 index 000000000..d0742bf2a --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -0,0 +1,55 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +//go:build !appengine +// +build !appengine + +package websocket + +import "unsafe" + +const wordSize = int(unsafe.Sizeof(uintptr(0))) + +func maskBytes(key [4]byte, pos int, b []byte) int { + // Mask one byte at a time for small buffers. + if len(b) < 2*wordSize { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 + } + + // Mask one byte at a time to word boundary. + if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { + n = wordSize - n + for i := range b[:n] { + b[i] ^= key[pos&3] + pos++ + } + b = b[n:] + } + + // Create aligned word size key. + var k [wordSize]byte + for i := range k { + k[i] = key[(pos+i)&3] + } + kw := *(*uintptr)(unsafe.Pointer(&k)) + + // Mask one word at a time. + n := (len(b) / wordSize) * wordSize + for i := 0; i < n; i += wordSize { + *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw + } + + // Mask one byte at a time for remaining bytes. + b = b[n:] + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go new file mode 100644 index 000000000..36250ca7c --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask_safe.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +//go:build appengine +// +build appengine + +package websocket + +func maskBytes(key [4]byte, pos int, b []byte) int { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go new file mode 100644 index 000000000..c854225e9 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/prepared.go @@ -0,0 +1,102 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "net" + "sync" + "time" +) + +// PreparedMessage caches on the wire representations of a message payload. +// Use PreparedMessage to efficiently send a message payload to multiple +// connections. PreparedMessage is especially useful when compression is used +// because the CPU and memory expensive compression operation can be executed +// once for a given set of compression options. +type PreparedMessage struct { + messageType int + data []byte + mu sync.Mutex + frames map[prepareKey]*preparedFrame +} + +// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. +type prepareKey struct { + isServer bool + compress bool + compressionLevel int +} + +// preparedFrame contains data in wire representation. +type preparedFrame struct { + once sync.Once + data []byte +} + +// NewPreparedMessage returns an initialized PreparedMessage. You can then send +// it to connection using WritePreparedMessage method. Valid wire +// representation will be calculated lazily only once for a set of current +// connection options. +func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { + pm := &PreparedMessage{ + messageType: messageType, + frames: make(map[prepareKey]*preparedFrame), + data: data, + } + + // Prepare a plain server frame. + _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) + if err != nil { + return nil, err + } + + // To protect against caller modifying the data argument, remember the data + // copied to the plain server frame. + pm.data = frameData[len(frameData)-len(data):] + return pm, nil +} + +func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { + pm.mu.Lock() + frame, ok := pm.frames[key] + if !ok { + frame = &preparedFrame{} + pm.frames[key] = frame + } + pm.mu.Unlock() + + var err error + frame.once.Do(func() { + // Prepare a frame using a 'fake' connection. + // TODO: Refactor code in conn.go to allow more direct construction of + // the frame. + mu := make(chan struct{}, 1) + mu <- struct{}{} + var nc prepareConn + c := &Conn{ + conn: &nc, + mu: mu, + isServer: key.isServer, + compressionLevel: key.compressionLevel, + enableWriteCompression: true, + writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), + } + if key.compress { + c.newCompressionWriter = compressNoContextTakeover + } + err = c.WriteMessage(pm.messageType, pm.data) + frame.data = nc.buf.Bytes() + }) + return pm.messageType, frame.data, err +} + +type prepareConn struct { + buf bytes.Buffer + net.Conn +} + +func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } +func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go new file mode 100644 index 000000000..e0f466b72 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -0,0 +1,77 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/base64" + "errors" + "net" + "net/http" + "net/url" + "strings" +) + +type netDialerFunc func(network, addr string) (net.Conn, error) + +func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { + return fn(network, addr) +} + +func init() { + proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { + return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil + }) +} + +type httpProxyDialer struct { + proxyURL *url.URL + forwardDial func(network, addr string) (net.Conn, error) +} + +func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { + hostPort, _ := hostPortNoPort(hpd.proxyURL) + conn, err := hpd.forwardDial(network, hostPort) + if err != nil { + return nil, err + } + + connectHeader := make(http.Header) + if user := hpd.proxyURL.User; user != nil { + proxyUser := user.Username() + if proxyPassword, passwordSet := user.Password(); passwordSet { + credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) + connectHeader.Set("Proxy-Authorization", "Basic "+credential) + } + } + + connectReq := &http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Opaque: addr}, + Host: addr, + Header: connectHeader, + } + + if err := connectReq.Write(conn); err != nil { + conn.Close() + return nil, err + } + + // Read response. It's OK to use and discard buffered reader here becaue + // the remote server does not speak until spoken to. + br := bufio.NewReader(conn) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + conn.Close() + return nil, err + } + + if resp.StatusCode != 200 { + conn.Close() + f := strings.SplitN(resp.Status, " ", 2) + return nil, errors.New(f[1]) + } + return conn, nil +} diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go new file mode 100644 index 000000000..24d53b38a --- /dev/null +++ b/vendor/github.com/gorilla/websocket/server.go @@ -0,0 +1,365 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "errors" + "io" + "net/http" + "net/url" + "strings" + "time" +) + +// HandshakeError describes an error with the handshake from the peer. +type HandshakeError struct { + message string +} + +func (e HandshakeError) Error() string { return e.message } + +// Upgrader specifies parameters for upgrading an HTTP connection to a +// WebSocket connection. +// +// It is safe to call Upgrader's methods concurrently. +type Upgrader struct { + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer + // size is zero, then buffers allocated by the HTTP server are used. The + // I/O buffer sizes do not limit the size of the messages that can be sent + // or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the server's supported protocols in order of + // preference. If this field is not nil, then the Upgrade method negotiates a + // subprotocol by selecting the first match in this list with a protocol + // requested by the client. If there's no match, then no protocol is + // negotiated (the Sec-Websocket-Protocol header is not included in the + // handshake response). + Subprotocols []string + + // Error specifies the function for generating HTTP error responses. If Error + // is nil, then http.Error is used to generate the HTTP response. + Error func(w http.ResponseWriter, r *http.Request, status int, reason error) + + // CheckOrigin returns true if the request Origin header is acceptable. If + // CheckOrigin is nil, then a safe default is used: return false if the + // Origin request header is present and the origin host is not equal to + // request Host header. + // + // A CheckOrigin function should carefully validate the request origin to + // prevent cross-site request forgery. + CheckOrigin func(r *http.Request) bool + + // EnableCompression specify if the server should attempt to negotiate per + // message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool +} + +func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { + err := HandshakeError{reason} + if u.Error != nil { + u.Error(w, r, status, err) + } else { + w.Header().Set("Sec-Websocket-Version", "13") + http.Error(w, http.StatusText(status), status) + } + return nil, err +} + +// checkSameOrigin returns true if the origin is not set or is equal to the request host. +func checkSameOrigin(r *http.Request) bool { + origin := r.Header["Origin"] + if len(origin) == 0 { + return true + } + u, err := url.Parse(origin[0]) + if err != nil { + return false + } + return equalASCIIFold(u.Host, r.Host) +} + +func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { + if u.Subprotocols != nil { + clientProtocols := Subprotocols(r) + for _, serverProtocol := range u.Subprotocols { + for _, clientProtocol := range clientProtocols { + if clientProtocol == serverProtocol { + return clientProtocol + } + } + } + } else if responseHeader != nil { + return responseHeader.Get("Sec-Websocket-Protocol") + } + return "" +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie). To specify +// subprotocols supported by the server, set Upgrader.Subprotocols directly. +// +// If the upgrade fails, then Upgrade replies to the client with an HTTP error +// response. +func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { + const badHandshake = "websocket: the client is not using the websocket protocol: " + + if !tokenListContainsValue(r.Header, "Connection", "upgrade") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header") + } + + if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") + } + + if r.Method != http.MethodGet { + return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET") + } + + if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") + } + + if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported") + } + + checkOrigin := u.CheckOrigin + if checkOrigin == nil { + checkOrigin = checkSameOrigin + } + if !checkOrigin(r) { + return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin") + } + + challengeKey := r.Header.Get("Sec-Websocket-Key") + if challengeKey == "" { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank") + } + + subprotocol := u.selectSubprotocol(r, responseHeader) + + // Negotiate PMCE + var compress bool + if u.EnableCompression { + for _, ext := range parseExtensions(r.Header) { + if ext[""] != "permessage-deflate" { + continue + } + compress = true + break + } + } + + h, ok := w.(http.Hijacker) + if !ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") + } + var brw *bufio.ReadWriter + netConn, brw, err := h.Hijack() + if err != nil { + return u.returnError(w, r, http.StatusInternalServerError, err.Error()) + } + + if brw.Reader.Buffered() > 0 { + netConn.Close() + return nil, errors.New("websocket: client sent data before handshake is complete") + } + + var br *bufio.Reader + if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 { + // Reuse hijacked buffered reader as connection reader. + br = brw.Reader + } + + buf := bufioWriterBuffer(netConn, brw.Writer) + + var writeBuf []byte + if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 { + // Reuse hijacked write buffer as connection buffer. + writeBuf = buf + } + + c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf) + c.subprotocol = subprotocol + + if compress { + c.newCompressionWriter = compressNoContextTakeover + c.newDecompressionReader = decompressNoContextTakeover + } + + // Use larger of hijacked buffer and connection write buffer for header. + p := buf + if len(c.writeBuf) > len(p) { + p = c.writeBuf + } + p = p[:0] + + p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) + p = append(p, computeAcceptKey(challengeKey)...) + p = append(p, "\r\n"...) + if c.subprotocol != "" { + p = append(p, "Sec-WebSocket-Protocol: "...) + p = append(p, c.subprotocol...) + p = append(p, "\r\n"...) + } + if compress { + p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) + } + for k, vs := range responseHeader { + if k == "Sec-Websocket-Protocol" { + continue + } + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + for i := 0; i < len(v); i++ { + b := v[i] + if b <= 31 { + // prevent response splitting. + b = ' ' + } + p = append(p, b) + } + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + // Clear deadlines set by HTTP server. + netConn.SetDeadline(time.Time{}) + + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) + } + if _, err = netConn.Write(p); err != nil { + netConn.Close() + return nil, err + } + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Time{}) + } + + return c, nil +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// Deprecated: Use websocket.Upgrader instead. +// +// Upgrade does not perform origin checking. The application is responsible for +// checking the Origin header before calling Upgrade. An example implementation +// of the same origin policy check is: +// +// if req.Header.Get("Origin") != "http://"+req.Host { +// http.Error(w, "Origin not allowed", http.StatusForbidden) +// return +// } +// +// If the endpoint supports subprotocols, then the application is responsible +// for negotiating the protocol used on the connection. Use the Subprotocols() +// function to get the subprotocols requested by the client. Use the +// Sec-Websocket-Protocol response header to specify the subprotocol selected +// by the application. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// negotiated subprotocol (Sec-Websocket-Protocol). +// +// The connection buffers IO to the underlying network connection. The +// readBufSize and writeBufSize parameters specify the size of the buffers to +// use. Messages can be larger than the buffers. +// +// If the request is not a valid WebSocket handshake, then Upgrade returns an +// error of type HandshakeError. Applications should handle this error by +// replying to the client with an HTTP error response. +func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { + u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} + u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { + // don't return errors to maintain backwards compatibility + } + u.CheckOrigin = func(r *http.Request) bool { + // allow all connections by default + return true + } + return u.Upgrade(w, r, responseHeader) +} + +// Subprotocols returns the subprotocols requested by the client in the +// Sec-Websocket-Protocol header. +func Subprotocols(r *http.Request) []string { + h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) + if h == "" { + return nil + } + protocols := strings.Split(h, ",") + for i := range protocols { + protocols[i] = strings.TrimSpace(protocols[i]) + } + return protocols +} + +// IsWebSocketUpgrade returns true if the client requested upgrade to the +// WebSocket protocol. +func IsWebSocketUpgrade(r *http.Request) bool { + return tokenListContainsValue(r.Header, "Connection", "upgrade") && + tokenListContainsValue(r.Header, "Upgrade", "websocket") +} + +// bufioReaderSize size returns the size of a bufio.Reader. +func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int { + // This code assumes that peek on a reset reader returns + // bufio.Reader.buf[:0]. + // TODO: Use bufio.Reader.Size() after Go 1.10 + br.Reset(originalReader) + if p, err := br.Peek(0); err == nil { + return cap(p) + } + return 0 +} + +// writeHook is an io.Writer that records the last slice passed to it vio +// io.Writer.Write. +type writeHook struct { + p []byte +} + +func (wh *writeHook) Write(p []byte) (int, error) { + wh.p = p + return len(p), nil +} + +// bufioWriterBuffer grabs the buffer from a bufio.Writer. +func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { + // This code assumes that bufio.Writer.buf[:1] is passed to the + // bufio.Writer's underlying writer. + var wh writeHook + bw.Reset(&wh) + bw.WriteByte(0) + bw.Flush() + + bw.Reset(originalWriter) + + return wh.p[:cap(wh.p)] +} diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go new file mode 100644 index 000000000..a62b68ccb --- /dev/null +++ b/vendor/github.com/gorilla/websocket/tls_handshake.go @@ -0,0 +1,21 @@ +//go:build go1.17 +// +build go1.17 + +package websocket + +import ( + "context" + "crypto/tls" +) + +func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.HandshakeContext(ctx); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/tls_handshake_116.go b/vendor/github.com/gorilla/websocket/tls_handshake_116.go new file mode 100644 index 000000000..e1b2b44f6 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/tls_handshake_116.go @@ -0,0 +1,21 @@ +//go:build !go1.17 +// +build !go1.17 + +package websocket + +import ( + "context" + "crypto/tls" +) + +func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.Handshake(); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go new file mode 100644 index 000000000..7bf2f66c6 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/util.go @@ -0,0 +1,283 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "io" + "net/http" + "strings" + "unicode/utf8" +) + +var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +func computeAcceptKey(challengeKey string) string { + h := sha1.New() + h.Write([]byte(challengeKey)) + h.Write(keyGUID) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func generateChallengeKey() (string, error) { + p := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, p); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(p), nil +} + +// Token octets per RFC 2616. +var isTokenOctet = [256]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +// skipSpace returns a slice of the string s with all leading RFC 2616 linear +// whitespace removed. +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if b := s[i]; b != ' ' && b != '\t' { + break + } + } + return s[i:] +} + +// nextToken returns the leading RFC 2616 token of s and the string following +// the token. +func nextToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if !isTokenOctet[s[i]] { + break + } + } + return s[:i], s[i:] +} + +// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616 +// and the string following the token or quoted string. +func nextTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return nextToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} + +// equalASCIIFold returns true if s is equal to t with ASCII case folding as +// defined in RFC 4790. +func equalASCIIFold(s, t string) bool { + for s != "" && t != "" { + sr, size := utf8.DecodeRuneInString(s) + s = s[size:] + tr, size := utf8.DecodeRuneInString(t) + t = t[size:] + if sr == tr { + continue + } + if 'A' <= sr && sr <= 'Z' { + sr = sr + 'a' - 'A' + } + if 'A' <= tr && tr <= 'Z' { + tr = tr + 'a' - 'A' + } + if sr != tr { + return false + } + } + return s == t +} + +// tokenListContainsValue returns true if the 1#token header with the given +// name contains a token equal to value with ASCII case folding. +func tokenListContainsValue(header http.Header, name string, value string) bool { +headers: + for _, s := range header[name] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + s = skipSpace(s) + if s != "" && s[0] != ',' { + continue headers + } + if equalASCIIFold(t, value) { + return true + } + if s == "" { + continue headers + } + s = s[1:] + } + } + return false +} + +// parseExtensions parses WebSocket extensions from a header. +func parseExtensions(header http.Header) []map[string]string { + // From RFC 6455: + // + // Sec-WebSocket-Extensions = extension-list + // extension-list = 1#extension + // extension = extension-token *( ";" extension-param ) + // extension-token = registered-token + // registered-token = token + // extension-param = token [ "=" (token | quoted-string) ] + // ;When using the quoted-string syntax variant, the value + // ;after quoted-string unescaping MUST conform to the + // ;'token' ABNF. + + var result []map[string]string +headers: + for _, s := range header["Sec-Websocket-Extensions"] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + ext := map[string]string{"": t} + for { + s = skipSpace(s) + if !strings.HasPrefix(s, ";") { + break + } + var k string + k, s = nextToken(skipSpace(s[1:])) + if k == "" { + continue headers + } + s = skipSpace(s) + var v string + if strings.HasPrefix(s, "=") { + v, s = nextTokenOrQuoted(skipSpace(s[1:])) + s = skipSpace(s) + } + if s != "" && s[0] != ',' && s[0] != ';' { + continue headers + } + ext[k] = v + } + if s != "" && s[0] != ',' { + continue headers + } + result = append(result, ext) + if s == "" { + continue headers + } + s = s[1:] + } + } + return result +} diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go new file mode 100644 index 000000000..2e668f6b8 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/x_net_proxy.go @@ -0,0 +1,473 @@ +// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. +//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy + +// Package proxy provides support for a variety of protocols to proxy network +// data. +// + +package websocket + +import ( + "errors" + "io" + "net" + "net/url" + "os" + "strconv" + "strings" + "sync" +) + +type proxy_direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var proxy_Direct = proxy_direct{} + +func (proxy_direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type proxy_PerHost struct { + def, bypass proxy_Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { + return &proxy_PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *proxy_PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *proxy_PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *proxy_PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *proxy_PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} + +// A Dialer is a means to establish a connection. +type proxy_Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type proxy_Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func proxy_FromEnvironment() proxy_Dialer { + allProxy := proxy_allProxyEnv.Get() + if len(allProxy) == 0 { + return proxy_Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return proxy_Direct + } + proxy, err := proxy_FromURL(proxyURL, proxy_Direct) + if err != nil { + return proxy_Direct + } + + noProxy := proxy_noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := proxy_NewPerHost(proxy, proxy_Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { + if proxy_proxySchemes == nil { + proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) + } + proxy_proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { + var auth *proxy_Auth + if u.User != nil { + auth = new(proxy_Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return proxy_SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxy_proxySchemes != nil { + if f, ok := proxy_proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + proxy_allProxyEnv = &proxy_envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + proxy_noProxyEnv = &proxy_envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type proxy_envOnce struct { + names []string + once sync.Once + val string +} + +func (e *proxy_envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *proxy_envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address +// with an optional username and password. See RFC 1928 and RFC 1929. +func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { + s := &proxy_socks5{ + network: network, + addr: addr, + forward: forward, + } + if auth != nil { + s.user = auth.User + s.password = auth.Password + } + + return s, nil +} + +type proxy_socks5 struct { + user, password string + network, addr string + forward proxy_Dialer +} + +const proxy_socks5Version = 5 + +const ( + proxy_socks5AuthNone = 0 + proxy_socks5AuthPassword = 2 +) + +const proxy_socks5Connect = 1 + +const ( + proxy_socks5IP4 = 1 + proxy_socks5Domain = 3 + proxy_socks5IP6 = 4 +) + +var proxy_socks5Errors = []string{ + "", + "general failure", + "connection forbidden", + "network unreachable", + "host unreachable", + "connection refused", + "TTL expired", + "command not supported", + "address type not supported", +} + +// Dial connects to the address addr on the given network via the SOCKS5 proxy. +func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) + } + + conn, err := s.forward.Dial(s.network, s.addr) + if err != nil { + return nil, err + } + if err := s.connect(conn, addr); err != nil { + conn.Close() + return nil, err + } + return conn, nil +} + +// connect takes an existing connection to a socks5 proxy server, +// and commands the server to extend that connection to target, +// which must be a canonical address with a host and port. +func (s *proxy_socks5) connect(conn net.Conn, target string) error { + host, portStr, err := net.SplitHostPort(target) + if err != nil { + return err + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return errors.New("proxy: failed to parse port number: " + portStr) + } + if port < 1 || port > 0xffff { + return errors.New("proxy: port number out of range: " + portStr) + } + + // the size here is just an estimate + buf := make([]byte, 0, 6+len(host)) + + buf = append(buf, proxy_socks5Version) + if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { + buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) + } else { + buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) + } + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + if buf[0] != 5 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + } + if buf[1] == 0xff { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + } + + // See RFC 1929 + if buf[1] == proxy_socks5AuthPassword { + buf = buf[:0] + buf = append(buf, 1 /* password protocol version */) + buf = append(buf, uint8(len(s.user))) + buf = append(buf, s.user...) + buf = append(buf, uint8(len(s.password))) + buf = append(buf, s.password...) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if buf[1] != 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + } + } + + buf = buf[:0] + buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) + + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + buf = append(buf, proxy_socks5IP4) + ip = ip4 + } else { + buf = append(buf, proxy_socks5IP6) + } + buf = append(buf, ip...) + } else { + if len(host) > 255 { + return errors.New("proxy: destination host name too long: " + host) + } + buf = append(buf, proxy_socks5Domain) + buf = append(buf, byte(len(host))) + buf = append(buf, host...) + } + buf = append(buf, byte(port>>8), byte(port)) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:4]); err != nil { + return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + failure := "unknown error" + if int(buf[1]) < len(proxy_socks5Errors) { + failure = proxy_socks5Errors[buf[1]] + } + + if len(failure) > 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + } + + bytesToDiscard := 0 + switch buf[3] { + case proxy_socks5IP4: + bytesToDiscard = net.IPv4len + case proxy_socks5IP6: + bytesToDiscard = net.IPv6len + case proxy_socks5Domain: + _, err := io.ReadFull(conn, buf[:1]) + if err != nil { + return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + bytesToDiscard = int(buf[0]) + default: + return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + } + + if cap(buf) < bytesToDiscard { + buf = make([]byte, bytesToDiscard) + } else { + buf = buf[:bytesToDiscard] + } + if _, err := io.ReadFull(conn, buf); err != nil { + return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + // Also need to discard the port number + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + return nil +} diff --git a/vendor/github.com/mattn/go-colorable/LICENSE b/vendor/github.com/mattn/go-colorable/LICENSE new file mode 100644 index 000000000..91b5cef30 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md new file mode 100644 index 000000000..ca0483711 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/README.md @@ -0,0 +1,48 @@ +# go-colorable + +[![Build Status](https://github.com/mattn/go-colorable/workflows/test/badge.svg)](https://github.com/mattn/go-colorable/actions?query=workflow%3Atest) +[![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable) +[![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) + +Colorable writer for windows. + +For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) +This package is possible to handle escape sequence for ansi color on windows. + +## Too Bad! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) + + +## So Good! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) + +## Usage + +```go +logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) +logrus.SetOutput(colorable.NewColorableStdout()) + +logrus.Info("succeeded") +logrus.Warn("not correct") +logrus.Error("something error") +logrus.Fatal("panic") +``` + +You can compile above code on non-windows OSs. + +## Installation + +``` +$ go get github.com/mattn/go-colorable +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go new file mode 100644 index 000000000..416d1bbbf --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -0,0 +1,38 @@ +//go:build appengine +// +build appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go new file mode 100644 index 000000000..766d94603 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -0,0 +1,38 @@ +//go:build !windows && !appengine +// +build !windows,!appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go new file mode 100644 index 000000000..1846ad5ab --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -0,0 +1,1047 @@ +//go:build windows && !appengine +// +build windows,!appengine + +package colorable + +import ( + "bytes" + "io" + "math" + "os" + "strconv" + "strings" + "sync" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) + commonLvbUnderscore = 0x8000 + + cENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 +) + +const ( + genericRead = 0x80000000 + genericWrite = 0x40000000 +) + +const ( + consoleTextmodeBuffer = 0x1 +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type consoleCursorInfo struct { + size dword + visible int32 +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") + procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") + procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") + procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") +) + +// Writer provides colorable Writer to the console +type Writer struct { + out io.Writer + handle syscall.Handle + althandle syscall.Handle + oldattr word + oldpos coord + rest bytes.Buffer + mutex sync.Mutex +} + +// NewColorable returns new instance of Writer which handles escape sequence from File. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var mode uint32 + if r, _, _ := procGetConsoleMode.Call(file.Fd(), uintptr(unsafe.Pointer(&mode))); r != 0 && mode&cENABLE_VIRTUAL_TERMINAL_PROCESSING != 0 { + return file + } + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} + } + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +// `\033]0;TITLESTR\007` +func doTitleSequence(er *bytes.Reader) error { + var c byte + var err error + + c, err = er.ReadByte() + if err != nil { + return err + } + if c != '0' && c != '2' { + return nil + } + c, err = er.ReadByte() + if err != nil { + return err + } + if c != ';' { + return nil + } + title := make([]byte, 0, 80) + for { + c, err = er.ReadByte() + if err != nil { + return err + } + if c == 0x07 || c == '\n' { + break + } + title = append(title, c) + } + if len(title) > 0 { + title8, err := syscall.UTF16PtrFromString(string(title)) + if err == nil { + procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8))) + } + } + return nil +} + +// returns Atoi(s) unless s == "" in which case it returns def +func atoiWithDefault(s string, def int) (int, error) { + if s == "" { + return def, nil + } + return strconv.Atoi(s) +} + +// Write writes data on console +func (w *Writer) Write(data []byte) (n int, err error) { + w.mutex.Lock() + defer w.mutex.Unlock() + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + handle := w.handle + + var er *bytes.Reader + if w.rest.Len() > 0 { + var rest bytes.Buffer + w.rest.WriteTo(&rest) + w.rest.Reset() + rest.Write(data) + er = bytes.NewReader(rest.Bytes()) + } else { + er = bytes.NewReader(data) + } + var plaintext bytes.Buffer +loop: + for { + c1, err := er.ReadByte() + if err != nil { + plaintext.WriteTo(w.out) + break loop + } + if c1 != 0x1b { + plaintext.WriteByte(c1) + continue + } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + + switch c2 { + case '>': + continue + case ']': + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 { + break loop + } + er = bytes.NewReader(w.rest.Bytes()[2:]) + err := doTitleSequence(er) + if err != nil { + break loop + } + w.rest.Reset() + continue + // https://github.com/mattn/go-colorable/issues/27 + case '7': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + continue + case '8': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + continue + case 0x5b: + // execute part after switch + default: + continue + } + + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + + var buf bytes.Buffer + var m byte + for i, c := range w.rest.Bytes()[2:] { + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + er = bytes.NewReader(w.rest.Bytes()[2+i+1:]) + w.rest.Reset() + break + } + buf.Write([]byte(string(c))) + } + if m == 0 { + break loop + } + + switch m { + case 'A': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + if csbi.cursorPosition.x < 0 { + csbi.cursorPosition.x = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n < 1 { + n = 1 + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n - 1) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H', 'f': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + if buf.Len() > 0 { + token := strings.Split(buf.String(), ";") + switch len(token) { + case 1: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + csbi.cursorPosition.y = short(n1 - 1) + case 2: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2 - 1) + csbi.cursorPosition.y = short(n1 - 1) + } + } else { + csbi.cursorPosition.y = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + var count, written dword + var cursor coord + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.window.top-csbi.cursorPosition.y)*dword(csbi.size.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var count, written dword + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'X': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var written dword + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i++ { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case n == 4: + attr |= commonLvbUnderscore + case (1 <= n && n <= 3) || n == 5: + attr |= foregroundIntensity + case n == 7 || n == 27: + attr = + (attr &^ (foregroundMask | backgroundMask)) | + ((attr & foregroundMask) << 4) | + ((attr & backgroundMask) >> 4) + case n == 22: + attr &^= foregroundIntensity + case n == 24: + attr &^= commonLvbUnderscore + case 30 <= n && n <= 37: + attr &= backgroundMask + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256%len(n256foreAttr)] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= foregroundRed + } + if g > 127 { + attr |= foregroundGreen + } + if b > 127 { + attr |= foregroundBlue + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr &= foregroundMask + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256%len(n256backAttr)] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= backgroundRed + } + if g > 127 { + attr |= backgroundGreen + } + if b > 127 { + attr |= backgroundBlue + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr)) + } + } + case 'h': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle == 0 { + h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0) + w.althandle = syscall.Handle(h) + if w.althandle != 0 { + handle = w.althandle + } + } + } + case 'l': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle != 0 { + syscall.CloseHandle(w.althandle) + w.althandle = 0 + handle = w.handle + } + } + case 's': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + case 'u': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + } + } + + return len(data), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + {0x000000, false, false, false, false}, + {0x000080, false, false, true, false}, + {0x008000, false, true, false, false}, + {0x008080, false, true, true, false}, + {0x800000, true, false, false, false}, + {0x800080, true, false, true, false}, + {0x808000, true, true, false, false}, + {0xc0c0c0, true, true, true, false}, + {0x808080, false, false, false, true}, + {0x0000ff, false, false, true, true}, + {0x00ff00, false, true, false, true}, + {0x00ffff, false, true, true, true}, + {0xff0000, true, false, false, true}, + {0xff00ff, true, false, true, true}, + {0xffff00, true, true, false, true}, + {0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + var mode uint32 + h := os.Stdout.Fd() + if r, _, _ := procGetConsoleMode.Call(h, uintptr(unsafe.Pointer(&mode))); r != 0 { + if r, _, _ = procSetConsoleMode.Call(h, uintptr(mode|cENABLE_VIRTUAL_TERMINAL_PROCESSING)); r != 0 { + if enabled != nil { + *enabled = true + } + return func() { + procSetConsoleMode.Call(h, uintptr(mode)) + } + } + } + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/go.test.sh b/vendor/github.com/mattn/go-colorable/go.test.sh new file mode 100644 index 000000000..012162b07 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go new file mode 100644 index 000000000..05d6f74bf --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -0,0 +1,57 @@ +package colorable + +import ( + "bytes" + "io" +) + +// NonColorable holds writer but removes escape sequence. +type NonColorable struct { + out io.Writer +} + +// NewNonColorable returns new instance of Writer which removes escape sequence from Writer. +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +// Write writes data on console +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewReader(data) + var plaintext bytes.Buffer +loop: + for { + c1, err := er.ReadByte() + if err != nil { + plaintext.WriteTo(w.out) + break loop + } + if c1 != 0x1b { + plaintext.WriteByte(c1) + continue + } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + if c2 != 0x5b { + continue + } + + for { + c, err := er.ReadByte() + if err != nil { + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + } + } + + return len(data), nil +} diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 000000000..65dc692b6 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md new file mode 100644 index 000000000..38418353e --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -0,0 +1,50 @@ +# go-isatty + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) +[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { + fmt.Println("Is Cygwin/MSYS2 Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +## License + +MIT + +## Author + +Yasuhiro Matsumoto (a.k.a mattn) + +## Thanks + +* k-takata: base idea for IsCygwinTerminal + + https://github.com/k-takata/go-iscygpty diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 000000000..17d4f90eb --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/mattn/go-isatty/go.test.sh b/vendor/github.com/mattn/go-isatty/go.test.sh new file mode 100644 index 000000000..012162b07 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 000000000..39bbcf00f --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,19 @@ +//go:build (darwin || freebsd || openbsd || netbsd || dragonfly) && !appengine +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go new file mode 100644 index 000000000..31503226f --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -0,0 +1,16 @@ +//go:build appengine || js || nacl || wasm +// +build appengine js nacl wasm + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on js and appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/vendor/github.com/mattn/go-isatty/isatty_plan9.go new file mode 100644 index 000000000..bae7f9bb3 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_plan9.go @@ -0,0 +1,23 @@ +//go:build plan9 +// +build plan9 + +package isatty + +import ( + "syscall" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + path, err := syscall.Fd2path(int(fd)) + if err != nil { + return false + } + return path == "/dev/cons" || path == "/mnt/term/dev/cons" +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 000000000..0c3acf2dc --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,21 @@ +//go:build solaris && !appengine +// +build solaris,!appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: https://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libc/port/gen/isatty.c +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermio(int(fd), unix.TCGETA) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go new file mode 100644 index 000000000..67787657f --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -0,0 +1,19 @@ +//go:build (linux || aix || zos) && !appengine +// +build linux aix zos +// +build !appengine + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 000000000..8e3c99171 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,125 @@ +//go:build windows && !appengine +// +build windows,!appengine + +package isatty + +import ( + "errors" + "strings" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + objectNameInfo uintptr = 1 + fileNameInfo = 2 + fileTypePipe = 3 +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + ntdll = syscall.NewLazyDLL("ntdll.dll") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = kernel32.NewProc("GetFileType") + procNtQueryObject = ntdll.NewProc("NtQueryObject") +) + +func init() { + // Check if GetFileInformationByHandleEx is available. + if procGetFileInformationByHandleEx.Find() != nil { + procGetFileInformationByHandleEx = nil + } +} + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// Check pipe name is used for cygwin/msys2 pty. +// Cygwin/MSYS2 PTY has a name like: +// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master +func isCygwinPipeName(name string) bool { + token := strings.Split(name, "-") + if len(token) < 5 { + return false + } + + if token[0] != `\msys` && + token[0] != `\cygwin` && + token[0] != `\Device\NamedPipe\msys` && + token[0] != `\Device\NamedPipe\cygwin` { + return false + } + + if token[1] == "" { + return false + } + + if !strings.HasPrefix(token[2], "pty") { + return false + } + + if token[3] != `from` && token[3] != `to` { + return false + } + + if token[4] != "master" { + return false + } + + return true +} + +// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler +// since GetFileInformationByHandleEx is not available under windows Vista and still some old fashion +// guys are using Windows XP, this is a workaround for those guys, it will also work on system from +// Windows vista to 10 +// see https://stackoverflow.com/a/18792477 for details +func getFileNameByHandle(fd uintptr) (string, error) { + if procNtQueryObject == nil { + return "", errors.New("ntdll.dll: NtQueryObject not supported") + } + + var buf [4 + syscall.MAX_PATH]uint16 + var result int + r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5, + fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0) + if r != 0 { + return "", e + } + return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. +func IsCygwinTerminal(fd uintptr) bool { + if procGetFileInformationByHandleEx == nil { + name, err := getFileNameByHandle(fd) + if err != nil { + return false + } + return isCygwinPipeName(name) + } + + // Cygwin/msys's pty is a pipe. + ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if ft != fileTypePipe || e != 0 { + return false + } + + var buf [2 + syscall.MAX_PATH]uint16 + r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), + 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), + uintptr(len(buf)*2), 0, 0) + if r == 0 || e != 0 { + return false + } + + l := *(*uint32)(unsafe.Pointer(&buf)) + return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/.gitignore b/vendor/github.com/mochi-mqtt/server/v2/.gitignore new file mode 100644 index 000000000..21b4810e3 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/.gitignore @@ -0,0 +1,4 @@ +cmd/mqtt +.DS_Store +*.db +.idea \ No newline at end of file diff --git a/vendor/github.com/mochi-mqtt/server/v2/.golangci.yml b/vendor/github.com/mochi-mqtt/server/v2/.golangci.yml new file mode 100644 index 000000000..908393664 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/.golangci.yml @@ -0,0 +1,103 @@ +linters: + disable-all: false + fix: false # Fix found issues (if it's supported by the linter). + enable: + # - asasalint + # - asciicheck + # - bidichk + # - bodyclose + # - containedctx + # - contextcheck + #- cyclop + # - deadcode + - decorder + # - depguard + # - dogsled + # - dupl + - durationcheck + # - errchkjson + # - errname + - errorlint + # - execinquery + # - exhaustive + # - exhaustruct + # - exportloopref + #- forcetypeassert + #- forbidigo + #- funlen + #- gci + # - gochecknoglobals + # - gochecknoinits + # - gocognit + # - goconst + # - gocritic + - gocyclo + - godot + # - godox + # - goerr113 + # - gofmt + # - gofumpt + # - goheader + - goimports + # - golint + # - gomnd + # - gomoddirectives + # - gomodguard + # - goprintffuncname + - gosec + - gosimple + - govet + # - grouper + # - ifshort + - importas + - ineffassign + # - interfacebloat + # - interfacer + # - ireturn + # - lll + # - maintidx + # - makezero + - maligned + - misspell + # - nakedret + # - nestif + # - nilerr + # - nilnil + # - nlreturn + # - noctx + # - nolintlint + # - nonamedreturns + # - nosnakecase + # - nosprintfhostport + # - paralleltest + # - prealloc + # - predeclared + # - promlinter + - reassign + # - revive + # - rowserrcheck + # - scopelint + # - sqlclosecheck + # - staticcheck + # - structcheck + # - stylecheck + # - tagliatelle + # - tenv + # - testpackage + # - thelper + - tparallel + # - typecheck + - unconvert + - unparam + - unused + - usestdlibvars + # - varcheck + # - varnamelen + - wastedassign + - whitespace + # - wrapcheck + # - wsl + disable: + - errcheck + + diff --git a/vendor/github.com/mochi-mqtt/server/v2/Dockerfile b/vendor/github.com/mochi-mqtt/server/v2/Dockerfile new file mode 100644 index 000000000..de71c8bab --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/Dockerfile @@ -0,0 +1,31 @@ +FROM golang:1.19.0-alpine3.15 AS builder + +RUN apk update +RUN apk add git + +WORKDIR /app + +COPY go.mod ./ +COPY go.sum ./ +RUN go mod download + +COPY . ./ + +RUN go build -o /app/mochi ./cmd + + +FROM alpine + +WORKDIR / +COPY --from=builder /app/mochi . + +# tcp +EXPOSE 1883 + +# websockets +EXPOSE 1882 + +# dashboard +EXPOSE 8080 + +ENTRYPOINT [ "/mochi" ] diff --git a/vendor/github.com/mochi-mqtt/server/v2/LICENSE.md b/vendor/github.com/mochi-mqtt/server/v2/LICENSE.md new file mode 100644 index 000000000..25718aceb --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/LICENSE.md @@ -0,0 +1,23 @@ + +The MIT License (MIT) + +Copyright (c) 2023 Mochi-MQTT Organisation +Copyright (c) 2019, 2022, 2023 Jonathan Blake (mochi-co) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mochi-mqtt/server/v2/README.md b/vendor/github.com/mochi-mqtt/server/v2/README.md new file mode 100644 index 000000000..dcce56cb5 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/README.md @@ -0,0 +1,424 @@ + +

+ +![build status](https://github.com/mochi-mqtt/server/actions/workflows/build.yml/badge.svg) +[![Coverage Status](https://coveralls.io/repos/github/mochi-mqtt/server/badge.svg?branch=master&v2)](https://coveralls.io/github/mochi-mqtt/server?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/mochi-mqtt/server)](https://goreportcard.com/report/github.com/mochi-mqtt/server/v2) +[![Go Reference](https://pkg.go.dev/badge/github.com/mochi-mqtt/server.svg)](https://pkg.go.dev/github.com/mochi-mqtt/server/v2) +[![contributions welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/mochi-mqtt/server/issues) + +

+ +# Mochi MQTT Broker +## The fully compliant, embeddable high-performance Go MQTT v5 (and v3.1.1) broker server +Mochi MQTT is an embeddable [fully compliant](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html) MQTT v5 broker server written in Go, designed for the development of telemetry and internet-of-things projects. The server can be used either as a standalone binary or embedded as a library in your own applications, and has been designed to be as lightweight and fast as possible, with great care taken to ensure the quality and maintainability of the project. + +### What is MQTT? +MQTT stands for [MQ Telemetry Transport](https://en.wikipedia.org/wiki/MQTT). It is a publish/subscribe, extremely simple and lightweight messaging protocol, designed for constrained devices and low-bandwidth, high-latency or unreliable networks ([Learn more](https://mqtt.org/faq)). Mochi MQTT fully implements version 5.0.0 of the MQTT protocol. + +### When is this repo updated? +Unless it's a critical issue, new releases typically go out over the weekend. + +## What's new in Version 2? +Version 2.0.0 takes all the great things we loved about Mochi MQTT v1.0.0, learns from the mistakes, and improves on the things we wished we'd had. It's a total from-scratch rewrite, designed to fully implement MQTT v5 as a first-class feature. + +Don't forget to use the new v2 import paths: +```go +import "github.com/mochi-mqtt/server/v2" +``` + +- Full MQTTv5 Feature Compliance, compatibility for MQTT v3.1.1 and v3.0.0: + - User and MQTTv5 Packet Properties + - Topic Aliases + - Shared Subscriptions + - Subscription Options and Subscription Identifiers + - Message Expiry + - Client Session Expiry + - Send and Receive QoS Flow Control Quotas + - Server-side Disconnect and Auth Packets + - Will Delay Intervals + - Plus all the original MQTT features of Mochi MQTT v1, such as Full QoS(0,1,2), $SYS topics, retained messages, etc. +- Developer-centric: + - Most core broker code is now exported and accessible, for total developer control. + - Full-featured and flexible Hook-based interfacing system to provide easy 'plugin' development. + - Direct Packet Injection using special inline client, or masquerade as existing clients. +- Performant and Stable: + - Our classic trie-based Topic-Subscription model. + - Client-specific write buffers to avoid issues with slow-reading or irregular client behaviour. + - Passes all [Paho Interoperability Tests](https://github.com/eclipse/paho.mqtt.testing/tree/master/interoperability) for MQTT v5 and MQTT v3. + - Over a thousand carefully considered unit test scenarios. +- TCP, Websocket (including SSL/TLS), and $SYS Dashboard listeners. +- Built-in Redis, Badger, and Bolt Persistence using Hooks (but you can also make your own). +- Built-in Rule-based Authentication and ACL Ledger using Hooks (also make your own). + +> There is no upgrade path from v1.0.0. Please review the documentation and this readme to get a sense of the changes required (e.g. the v1 events system, auth, and persistence have all been replaced with the new hooks system). + +### Compatibility Notes +Because of the overlap between the v5 specification and previous versions of mqtt, the server can accept both v5 and v3 clients, but note that in cases where both v5 an v3 clients are connected, properties and features provided for v5 clients will be downgraded for v3 clients (such as user properties). + +Support for MQTT v3.0.0 and v3.1.1 is considered hybrid-compatibility. Where not specifically restricted in the v3 specification, more modern and safety-first v5 behaviours are used instead - such as expiry for inflight and retained messages, and clients - and quality-of-service flow control limits. + +## Roadmap +- Please [open an issue](https://github.com/mochi-mqtt/server/issues) to request new features or event hooks! +- Cluster support. +- Enhanced Metrics support. +- File-based server configuration (supporting docker). + +## Quick Start +### Running the Broker with Go +Mochi MQTT can be used as a standalone broker. Simply checkout this repository and run the [cmd/main.go](cmd/main.go) entrypoint in the [cmd](cmd) folder which will expose tcp (:1883), websocket (:1882), and dashboard (:8080) listeners. + +``` +cd cmd +go build -o mqtt && ./mqtt +``` + +### Using Docker +A simple Dockerfile is provided for running the [cmd/main.go](cmd/main.go) Websocket, TCP, and Stats server: + +```sh +docker build -t mochi:latest . +docker run -p 1883:1883 -p 1882:1882 -p 8080:8080 mochi:latest +``` + +## Developing with Mochi MQTT +### Importing as a package +Importing Mochi MQTT as a package requires just a few lines of code to get started. +``` go +import ( + "log" + + "github.com/mochi-mqtt/server/v2" + "github.com/mochi-mqtt/server/v2/hooks/auth" + "github.com/mochi-mqtt/server/v2/listeners" +) + +func main() { + // Create the new MQTT Server. + server := mqtt.New(nil) + + // Allow all connections. + _ = server.AddHook(new(auth.AllowHook), nil) + + // Create a TCP listener on a standard port. + tcp := listeners.NewTCP("t1", ":1883", nil) + err := server.AddListener(tcp) + if err != nil { + log.Fatal(err) + } + + err = server.Serve() + if err != nil { + log.Fatal(err) + } +} +``` + +Examples of running the broker with various configurations can be found in the [examples](examples) folder. + +#### Network Listeners +The server comes with a variety of pre-packaged network listeners which allow the broker to accept connections on different protocols. The current listeners are: + +| Listener | Usage | +|------------------------------|----------------------------------------------------------------------------------------------| +| listeners.NewTCP | A TCP listener | +| listeners.NewUnixSock | A Unix Socket listener | +| listeners.NewNet | A net.Listener listener | +| listeners.NewWebsocket | A Websocket listener | +| listeners.NewHTTPStats | An HTTP $SYS info dashboard | +| listeners.NewHTTPHealthCheck | An HTTP healthcheck listener to provide health check responses for e.g. cloud infrastructure | + +> Use the `listeners.Listener` interface to develop new listeners. If you do, please let us know! + +A `*listeners.Config` may be passed to configure TLS. + +Examples of usage can be found in the [examples](examples) folder or [cmd/main.go](cmd/main.go). + +### Server Options and Capabilities +A number of configurable options are available which can be used to alter the behaviour or restrict access to certain features in the server. + +```go +server := mqtt.New(&mqtt.Options{ + Capabilities: mqtt.Capabilities{ + MaximumSessionExpiryInterval: 3600, + Compatibilities: mqtt.Compatibilities{ + ObscureNotAuthorized: true, + }, + }, + ClientNetWriteBufferSize: 4096, + ClientNetReadBufferSize: 4096, + SysTopicResendInterval: 10, +}) +``` + +Review the mqtt.Options, mqtt.Capabilities, and mqtt.Compatibilities structs for a comprehensive list of options. `ClientNetWriteBufferSize` and `ClientNetReadBufferSize` can be configured to adjust memory usage per client, based on your needs. + + +## Event Hooks +A universal event hooks system allows developers to hook into various parts of the server and client life cycle to add and modify functionality of the broker. These universal hooks are used to provide everything from authentication, persistent storage, to debugging tools. + +Hooks are stackable - you can add multiple hooks to a server, and they will be run in the order they were added. Some hooks modify values, and these modified values will be passed to the subsequent hooks before being returned to the runtime code. + +| Type | Import | Info | +| -- | -- | -- | +| Access Control | [mochi-mqtt/server/hooks/auth . AllowHook](hooks/auth/allow_all.go) | Allow access to all connecting clients and read/write to all topics. | +| Access Control | [mochi-mqtt/server/hooks/auth . Auth](hooks/auth/auth.go) | Rule-based access control ledger. | +| Persistence | [mochi-mqtt/server/hooks/storage/bolt](hooks/storage/bolt/bolt.go) | Persistent storage using [BoltDB](https://dbdb.io/db/boltdb) (deprecated). | +| Persistence | [mochi-mqtt/server/hooks/storage/badger](hooks/storage/badger/badger.go) | Persistent storage using [BadgerDB](https://github.com/dgraph-io/badger). | +| Persistence | [mochi-mqtt/server/hooks/storage/redis](hooks/storage/redis/redis.go) | Persistent storage using [Redis](https://redis.io). | +| Debugging | [mochi-mqtt/server/hooks/debug](hooks/debug/debug.go) | Additional debugging output to visualise packet flow. | + +Many of the internal server functions are now exposed to developers, so you can make your own Hooks by using the above as examples. If you do, please [Open an issue](https://github.com/mochi-mqtt/server/issues) and let everyone know! + +### Access Control +#### Allow Hook +By default, Mochi MQTT uses a DENY-ALL access control rule. To allow connections, this must overwritten using an Access Control hook. The simplest of these hooks is the `auth.AllowAll` hook, which provides ALLOW-ALL rules to all connections, subscriptions, and publishing. It's also the simplest hook to use: + +```go +server := mqtt.New(nil) +_ = server.AddHook(new(auth.AllowHook), nil) +``` + +> Don't do this if you are exposing your server to the internet or untrusted networks - it should really be used for development, testing, and debugging only. + +#### Auth Ledger +The Auth Ledger hook provides a sophisticated mechanism for defining access rules in a struct format. Auth ledger rules come in two forms: Auth rules (connection), and ACL rules (publish subscribe). + +Auth rules have 4 optional criteria and an assertion flag: +| Criteria | Usage | +| -- | -- | +| Client | client id of the connecting client | +| Username | username of the connecting client | +| Password | password of the connecting client | +| Remote | the remote address or ip of the client | +| Allow | true (allow this user) or false (deny this user) | + +ACL rules have 3 optional criteria and an filter match: +| Criteria | Usage | +| -- | -- | +| Client | client id of the connecting client | +| Username | username of the connecting client | +| Remote | the remote address or ip of the client | +| Filters | an array of filters to match | + +Rules are processed in index order (0,1,2,3), returning on the first matching rule. See [hooks/auth/ledger.go](hooks/auth/ledger.go) to review the structs. + +```go +server := mqtt.New(nil) +err := server.AddHook(new(auth.Hook), &auth.Options{ + Ledger: &auth.Ledger{ + Auth: auth.AuthRules{ // Auth disallows all by default + {Username: "peach", Password: "password1", Allow: true}, + {Username: "melon", Password: "password2", Allow: true}, + {Remote: "127.0.0.1:*", Allow: true}, + {Remote: "localhost:*", Allow: true}, + }, + ACL: auth.ACLRules{ // ACL allows all by default + {Remote: "127.0.0.1:*"}, // local superuser allow all + { + // user melon can read and write to their own topic + Username: "melon", Filters: auth.Filters{ + "melon/#": auth.ReadWrite, + "updates/#": auth.WriteOnly, // can write to updates, but can't read updates from others + }, + }, + { + // Otherwise, no clients have publishing permissions + Filters: auth.Filters{ + "#": auth.ReadOnly, + "updates/#": auth.Deny, + }, + }, + }, + } +}) +``` + +The ledger can also be stored as JSON or YAML and loaded using the Data field: +```go +err = server.AddHook(new(auth.Hook), &auth.Options{ + Data: data, // build ledger from byte slice: yaml or json +}) +``` +See [examples/auth/encoded/main.go](examples/auth/encoded/main.go) for more information. + +### Persistent Storage +#### Redis +A basic Redis storage hook is available which provides persistence for the broker. It can be added to the server in the same fashion as any other hook, with several options. It uses github.com/go-redis/redis/v8 under the hook, and is completely configurable through the Options value. +```go +err := server.AddHook(new(redis.Hook), &redis.Options{ + Options: &rv8.Options{ + Addr: "localhost:6379", // default redis address + Password: "", // your password + DB: 0, // your redis db + }, +}) +if err != nil { + log.Fatal(err) +} +``` +For more information on how the redis hook works, or how to use it, see the [examples/persistence/redis/main.go](examples/persistence/redis/main.go) or [hooks/storage/redis](hooks/storage/redis) code. + +#### Badger DB +There's also a BadgerDB storage hook if you prefer file based storage. It can be added and configured in much the same way as the other hooks (with somewhat less options). +```go +err := server.AddHook(new(badger.Hook), &badger.Options{ + Path: badgerPath, +}) +if err != nil { + log.Fatal(err) +} +``` +For more information on how the badger hook works, or how to use it, see the [examples/persistence/badger/main.go](examples/persistence/badger/main.go) or [hooks/storage/badger](hooks/storage/badger) code. + +There is also a BoltDB hook which has been deprecated in favour of Badger, but if you need it, check [examples/persistence/bolt/main.go](examples/persistence/bolt/main.go). + +## Developing with Event Hooks +Many hooks are available for interacting with the broker and client lifecycle. +The function signatures for all the hooks and `mqtt.Hook` interface can be found in [hooks.go](hooks.go). + +> The most flexible event hooks are OnPacketRead, OnPacketEncode, and OnPacketSent - these hooks be used to control and modify all incoming and outgoing packets. + +| Function | Usage | +|------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| OnStarted | Called when the server has successfully started. | +| OnStopped | Called when the server has successfully stopped. | +| OnConnectAuthenticate | Called when a user attempts to authenticate with the server. An implementation of this method MUST be used to allow or deny access to the server (see hooks/auth/allow_all or basic). It can be used in custom hooks to check connecting users against an existing user database. Returns true if allowed. | +| OnACLCheck | Called when a user attempts to publish or subscribe to a topic filter. As above. | +| OnSysInfoTick | Called when the $SYS topic values are published out. | +| OnConnect | Called when a new client connects, may return an error or packet code to halt the client connection process. | +| OnSessionEstablish | Called immediately after a new client connects and authenticates and immediately before the session is established and CONNACK is sent. +| OnSessionEstablished | Called when a new client successfully establishes a session (after OnConnect) | +| OnDisconnect | Called when a client is disconnected for any reason. | +| OnAuthPacket | Called when an auth packet is received. It is intended to allow developers to create their own mqtt v5 Auth Packet handling mechanisms. Allows packet modification. | +| OnPacketRead | Called when a packet is received from a client. Allows packet modification. | +| OnPacketEncode | Called immediately before a packet is encoded to be sent to a client. Allows packet modification. | +| OnPacketSent | Called when a packet has been sent to a client. | +| OnPacketProcessed | Called when a packet has been received and successfully handled by the broker. | +| OnSubscribe | Called when a client subscribes to one or more filters. Allows packet modification. | +| OnSubscribed | Called when a client successfully subscribes to one or more filters. | +| OnSelectSubscribers | Called when subscribers have been collected for a topic, but before shared subscription subscribers have been selected. Allows receipient modification. | +| OnUnsubscribe | Called when a client unsubscribes from one or more filters. Allows packet modification. | +| OnUnsubscribed | Called when a client successfully unsubscribes from one or more filters. | +| OnPublish | Called when a client publishes a message. Allows packet modification. | +| OnPublished | Called when a client has published a message to subscribers. | +| OnPublishDropped | Called when a message to a client is dropped before delivery, such as if the client is taking too long to respond. | +| OnRetainMessage | Called then a published message is retained. | +| OnRetainPublished | Called then a retained message is published to a client. | +| OnQosPublish | Called when a publish packet with Qos >= 1 is issued to a subscriber. | +| OnQosComplete | Called when the Qos flow for a message has been completed. | +| OnQosDropped | Called when an inflight message expires before completion. | +| OnPacketIDExhausted | Called when a client runs out of unused packet ids to assign. | +| OnWill | Called when a client disconnects and intends to issue a will message. Allows packet modification. | +| OnWillSent | Called when an LWT message has been issued from a disconnecting client. | +| OnClientExpired | Called when a client session has expired and should be deleted. | +| OnRetainedExpired | Called when a retained message has expired and should be deleted. | +| StoredClients | Returns clients, eg. from a persistent store. | +| StoredSubscriptions | Returns client subscriptions, eg. from a persistent store. | +| StoredInflightMessages | Returns inflight messages, eg. from a persistent store. | +| StoredRetainedMessages | Returns retained messages, eg. from a persistent store. | +| StoredSysInfo | Returns stored system info values, eg. from a persistent store. | + +If you are building a persistent storage hook, see the existing persistent hooks for inspiration and patterns. If you are building an auth hook, you will need `OnACLCheck` and `OnConnectAuthenticate`. + + +### Direct Publish +To publish basic message to a topic from within the embedding application, you can use the `server.Publish(topic string, payload []byte, retain bool, qos byte) error` method. + +```go +err := server.Publish("direct/publish", []byte("packet scheduled message"), false, 0) +``` +> The Qos byte in this case is only used to set the upper qos limit available for subscribers, as per MQTT v5 spec. + +### Packet Injection +If you want more control, or want to set specific MQTT v5 properties and other values you can create your own publish packets from a client of your choice. This method allows you to inject MQTT packets (no just publish) directly into the runtime as though they had been received by a specific client. Most of the time you'll want to use the special client flag `inline=true`, as it has unique privileges: it bypasses all ACL and topic validation checks, meaning it can even publish to $SYS topics. + +Packet injection can be used for any MQTT packet, including ping requests, subscriptions, etc. And because the Clients structs and methods are now exported, you can even inject packets on behalf of a connected client (if you have a very custom requirements). + +```go +cl := server.NewClient(nil, "local", "inline", true) +server.InjectPacket(cl, packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Publish, + }, + TopicName: "direct/publish", + Payload: []byte("scheduled message"), +}) +``` + +> MQTT packets still need to be correctly formed, so refer our [the test packets catalogue](packets/tpackets.go) and [MQTTv5 Specification](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html) for inspiration. + +See the [hooks example](examples/hooks/main.go) to see this feature in action. + + + +### Testing +#### Unit Tests +Mochi MQTT tests over a thousand scenarios with thoughtfully hand written unit tests to ensure each function does exactly what we expect. You can run the tests using go: +``` +go run --cover ./... +``` + +#### Paho Interoperability Test +You can check the broker against the [Paho Interoperability Test](https://github.com/eclipse/paho.mqtt.testing/tree/master/interoperability) by starting the broker using `examples/paho/main.go`, and then running the mqtt v5 and v3 tests with `python3 client_test5.py` from the _interoperability_ folder. + +> Note that there are currently a number of outstanding issues regarding false negatives in the paho suite, and as such, certain compatibility modes are enabled in the `paho/main.go` example. + + +## Performance Benchmarks +Mochi MQTT performance is comparable with popular brokers such as Mosquitto, EMQX, and others. + +Performance benchmarks were tested using [MQTT-Stresser](https://github.com/inovex/mqtt-stresser) on a Apple Macbook Air M2, using `cmd/main.go` default settings. Taking into account bursts of high and low throughput, the median scores are the most useful. Higher is better. + +> The values presented in the benchmark are not representative of true messages per second throughput. They rely on an unusual calculation by mqtt-stresser, but are usable as they are consistent across all brokers. +> Benchmarks are provided as a general performance expectation guideline only. Comparisons are performed using out-of-the-box default configurations. + +`mqtt-stresser -broker tcp://localhost:1883 -num-clients=2 -num-messages=10000` +| Broker | publish fastest | median | slowest | receive fastest | median | slowest | +| -- | -- | -- | -- | -- | -- | -- | +| Mochi v2.2.10 | 124,772 | 125,456 | 124,614 | 314,461 | 313,186 | 311,910 | +| [Mosquitto v2.0.15](https://github.com/eclipse/mosquitto) | 155,920 | 155,919 | 155,918 | 185,485 | 185,097 | 184,709 | +| [EMQX v5.0.11](https://github.com/emqx/emqx) | 156,945 | 156,257 | 155,568 | 17,918 | 17,783 | 17,649 | +| [Rumqtt v0.21.0](https://github.com/bytebeamio/rumqtt) | 112,208 | 108,480 | 104,753 | 135,784 | 126,446 | 117,108 | + +`mqtt-stresser -broker tcp://localhost:1883 -num-clients=10 -num-messages=10000` +| Broker | publish fastest | median | slowest | receive fastest | median | slowest | +| -- | -- | -- | -- | -- | -- | -- | +| Mochi v2.2.10 | 41,825 | 31,663| 23,008 | 144,058 | 65,903 | 37,618 | +| Mosquitto v2.0.15 | 42,729 | 38,633 | 29,879 | 23,241 | 19,714 | 18,806 | +| EMQX v5.0.11 | 21,553 | 17,418 | 14,356 | 4,257 | 3,980 | 3,756 | +| Rumqtt v0.21.0 | 42,213 | 23,153 | 20,814 | 49,465 | 36,626 | 19,283 | + +Million Message Challenge (hit the server with 1 million messages immediately): + +`mqtt-stresser -broker tcp://localhost:1883 -num-clients=100 -num-messages=10000` +| Broker | publish fastest | median | slowest | receive fastest | median | slowest | +| -- | -- | -- | -- | -- | -- | -- | +| Mochi v2.2.10 | 13,532 | 4,425 | 2,344 | 52,120 | 7,274 | 2,701 | +| Mosquitto v2.0.15 | 3,826 | 3,395 | 3,032 | 1,200 | 1,150 | 1,118 | +| EMQX v5.0.11 | 4,086 | 2,432 | 2,274 | 434 | 333 | 311 | +| Rumqtt v0.21.0 | 78,972 | 5,047 | 3,804 | 4,286 | 3,249 | 2,027 | + +> Not sure what's going on with EMQX here, perhaps the docker out-of-the-box settings are not optimal, so take it with a pinch of salt as we know for a fact it's a solid piece of software. + +## Contribution Guidelines +Contributions and feedback are both welcomed and encouraged! [Open an issue](https://github.com/mochi-mqtt/server/issues) to report a bug, ask a question, or make a feature request. If you open a pull request, please try to follow the following guidelines: +- Try to maintain test coverage where reasonably possible. +- Clearly state what the PR does and why. +- Remember to add your SPDX FileContributor tag to files where you have made a meaningful contribution. + +[SPDX Annotations](https://spdx.dev) are used to clearly indicate the license, copyright, and contributions of each file in a machine-readable format. If you are adding a new file to the repository, please ensure it has the following SPDX header: +```go +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2023 mochi-mqtt +// SPDX-FileContributor: Your name or alias + +package name +``` + +Please ensure to add a new `SPDX-FileContributor` line for each contributor to the file. Refer to other files for examples. Please remember to do this, your contributions to this project are valuable and appreciated - it's important to receive credit! + +## Stargazers over time 🥰 +[![Stargazers over time](https://starchart.cc/mochi-mqtt/server.svg)](https://starchart.cc/mochi-mqtt/server) +Are you using Mochi MQTT in a project? [Let us know!](https://github.com/mochi-mqtt/server/issues) + diff --git a/vendor/github.com/mochi-mqtt/server/v2/clients.go b/vendor/github.com/mochi-mqtt/server/v2/clients.go new file mode 100644 index 000000000..75fe8685c --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/clients.go @@ -0,0 +1,574 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2023 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package mqtt + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/rs/xid" + + "github.com/mochi-mqtt/server/v2/packets" +) + +const ( + defaultKeepalive uint16 = 10 // the default connection keepalive value in seconds + defaultClientProtocolVersion byte = 4 // the default mqtt protocol version of connecting clients (if somehow unspecified). +) + +// ReadFn is the function signature for the function used for reading and processing new packets. +type ReadFn func(*Client, packets.Packet) error + +// Clients contains a map of the clients known by the broker. +type Clients struct { + internal map[string]*Client // clients known by the broker, keyed on client id. + sync.RWMutex +} + +// NewClients returns an instance of Clients. +func NewClients() *Clients { + return &Clients{ + internal: make(map[string]*Client), + } +} + +// Add adds a new client to the clients map, keyed on client id. +func (cl *Clients) Add(val *Client) { + cl.Lock() + defer cl.Unlock() + cl.internal[val.ID] = val +} + +// GetAll returns all the clients. +func (cl *Clients) GetAll() map[string]*Client { + cl.RLock() + defer cl.RUnlock() + m := map[string]*Client{} + for k, v := range cl.internal { + m[k] = v + } + return m +} + +// Get returns the value of a client if it exists. +func (cl *Clients) Get(id string) (*Client, bool) { + cl.RLock() + defer cl.RUnlock() + val, ok := cl.internal[id] + return val, ok +} + +// Len returns the length of the clients map. +func (cl *Clients) Len() int { + cl.RLock() + defer cl.RUnlock() + val := len(cl.internal) + return val +} + +// Delete removes a client from the internal map. +func (cl *Clients) Delete(id string) { + cl.Lock() + defer cl.Unlock() + delete(cl.internal, id) +} + +// GetByListener returns clients matching a listener id. +func (cl *Clients) GetByListener(id string) []*Client { + cl.RLock() + defer cl.RUnlock() + clients := make([]*Client, 0, cl.Len()) + for _, client := range cl.internal { + if client.Net.Listener == id && !client.Closed() { + clients = append(clients, client) + } + } + return clients +} + +// Client contains information about a client known by the broker. +type Client struct { + Properties ClientProperties // client properties + State ClientState // the operational state of the client. + Net ClientConnection // network connection state of the clinet + ID string // the client id. + ops *ops // ops provides a reference to server ops. + sync.RWMutex // mutex +} + +// ClientConnection contains the connection transport and metadata for the client. +type ClientConnection struct { + Conn net.Conn // the net.Conn used to establish the connection + bconn *bufio.ReadWriter // a buffered net.Conn for reading packets + Remote string // the remote address of the client + Listener string // listener id of the client + Inline bool // client is an inline programmetic client +} + +// ClientProperties contains the properties which define the client behaviour. +type ClientProperties struct { + Props packets.Properties + Will Will + Username []byte + ProtocolVersion byte + Clean bool +} + +// Will contains the last will and testament details for a client connection. +type Will struct { + Payload []byte // - + User []packets.UserProperty // - + TopicName string // - + Flag uint32 // 0,1 + WillDelayInterval uint32 // - + Qos byte // - + Retain bool // - +} + +// State tracks the state of the client. +type ClientState struct { + TopicAliases TopicAliases // a map of topic aliases + stopCause atomic.Value // reason for stopping + Inflight *Inflight // a map of in-flight qos messages + Subscriptions *Subscriptions // a map of the subscription filters a client maintains + disconnected int64 // the time the client disconnected in unix time, for calculating expiry + outbound chan *packets.Packet // queue for pending outbound packets + endOnce sync.Once // only end once + isTakenOver uint32 // used to identify orphaned clients + packetID uint32 // the current highest packetID + open context.Context // indicate that the client is open for packet exchange + cancelOpen context.CancelFunc // cancel function for open context + outboundQty int32 // number of messages currently in the outbound queue + Keepalive uint16 // the number of seconds the connection can wait + ServerKeepalive bool // keepalive was set by the server +} + +// newClient returns a new instance of Client. This is almost exclusively used by Server +// for creating new clients, but it lives here because it's not dependent. +func newClient(c net.Conn, o *ops) *Client { + ctx, cancel := context.WithCancel(context.Background()) + cl := &Client{ + State: ClientState{ + Inflight: NewInflights(), + Subscriptions: NewSubscriptions(), + TopicAliases: NewTopicAliases(o.options.Capabilities.TopicAliasMaximum), + open: ctx, + cancelOpen: cancel, + Keepalive: defaultKeepalive, + outbound: make(chan *packets.Packet, o.options.Capabilities.MaximumClientWritesPending), + }, + Properties: ClientProperties{ + ProtocolVersion: defaultClientProtocolVersion, // default protocol version + }, + ops: o, + } + + if c != nil { + cl.Net = ClientConnection{ + Conn: c, + bconn: bufio.NewReadWriter( + bufio.NewReaderSize(c, o.options.ClientNetReadBufferSize), + bufio.NewWriterSize(c, o.options.ClientNetWriteBufferSize), + ), + Remote: c.RemoteAddr().String(), + } + } + + return cl +} + +// WriteLoop ranges over pending outbound messages and writes them to the client connection. +func (cl *Client) WriteLoop() { + for { + select { + case pk := <-cl.State.outbound: + if err := cl.WritePacket(*pk); err != nil { + cl.ops.log.Debug().Err(err).Str("client", cl.ID).Interface("packet", pk).Msg("failed publishing packet") + } + atomic.AddInt32(&cl.State.outboundQty, -1) + case <-cl.State.open.Done(): + return + } + } +} + +// ParseConnect parses the connect parameters and properties for a client. +func (cl *Client) ParseConnect(lid string, pk packets.Packet) { + cl.Net.Listener = lid + + cl.Properties.ProtocolVersion = pk.ProtocolVersion + cl.Properties.Username = pk.Connect.Username + cl.Properties.Clean = pk.Connect.Clean + cl.Properties.Props = pk.Properties.Copy(false) + + cl.State.Keepalive = pk.Connect.Keepalive // [MQTT-3.2.2-22] + cl.State.Inflight.ResetReceiveQuota(int32(cl.ops.options.Capabilities.ReceiveMaximum)) // server receive max per client + cl.State.Inflight.ResetSendQuota(int32(cl.Properties.Props.ReceiveMaximum)) // client receive max + cl.State.TopicAliases.Outbound = NewOutboundTopicAliases(cl.Properties.Props.TopicAliasMaximum) + + cl.ID = pk.Connect.ClientIdentifier + if cl.ID == "" { + cl.ID = xid.New().String() // [MQTT-3.1.3-6] [MQTT-3.1.3-7] + cl.Properties.Props.AssignedClientID = cl.ID + } + + if pk.Connect.WillFlag { + cl.Properties.Will = Will{ + Qos: pk.Connect.WillQos, + Retain: pk.Connect.WillRetain, + Payload: pk.Connect.WillPayload, + TopicName: pk.Connect.WillTopic, + WillDelayInterval: pk.Connect.WillProperties.WillDelayInterval, + User: pk.Connect.WillProperties.User, + } + if pk.Properties.SessionExpiryIntervalFlag && + pk.Properties.SessionExpiryInterval < pk.Connect.WillProperties.WillDelayInterval { + cl.Properties.Will.WillDelayInterval = pk.Properties.SessionExpiryInterval + } + if pk.Connect.WillFlag { + cl.Properties.Will.Flag = 1 // atomic for checking + } + } +} + +// refreshDeadline refreshes the read/write deadline for the net.Conn connection. +func (cl *Client) refreshDeadline(keepalive uint16) { + var expiry time.Time // nil time can be used to disable deadline if keepalive = 0 + if keepalive > 0 { + expiry = time.Now().Add(time.Duration(keepalive+(keepalive/2)) * time.Second) // [MQTT-3.1.2-22] + } + + if cl.Net.Conn != nil { + _ = cl.Net.Conn.SetDeadline(expiry) // [MQTT-3.1.2-22] + } +} + +// NextPacketID returns the next available (unused) packet id for the client. +// If no unused packet ids are available, an error is returned and the client +// should be disconnected. +func (cl *Client) NextPacketID() (i uint32, err error) { + cl.Lock() + defer cl.Unlock() + + i = atomic.LoadUint32(&cl.State.packetID) + started := i + overflowed := false + for { + if overflowed && i == started { + return 0, packets.ErrQuotaExceeded + } + + if i >= cl.ops.options.Capabilities.maximumPacketID { + overflowed = true + i = 0 + continue + } + + i++ + + if _, ok := cl.State.Inflight.Get(uint16(i)); !ok { + atomic.StoreUint32(&cl.State.packetID, i) + return i, nil + } + } +} + +// ResendInflightMessages attempts to resend any pending inflight messages to connected clients. +func (cl *Client) ResendInflightMessages(force bool) error { + if cl.State.Inflight.Len() == 0 { + return nil + } + + for _, tk := range cl.State.Inflight.GetAll(false) { + if tk.FixedHeader.Type == packets.Publish { + tk.FixedHeader.Dup = true // [MQTT-3.3.1-1] [MQTT-3.3.1-3] + } + + cl.ops.hooks.OnQosPublish(cl, tk, tk.Created, 0) + err := cl.WritePacket(tk) + if err != nil { + return err + } + + if tk.FixedHeader.Type == packets.Puback || tk.FixedHeader.Type == packets.Pubcomp { + if ok := cl.State.Inflight.Delete(tk.PacketID); ok { + cl.ops.hooks.OnQosComplete(cl, tk) + atomic.AddInt64(&cl.ops.info.Inflight, -1) + } + } + } + + return nil +} + +// ClearInflights deletes all inflight messages for the client, eg. for a disconnected user with a clean session. +func (cl *Client) ClearInflights(now, maximumExpiry int64) []uint16 { + deleted := []uint16{} + for _, tk := range cl.State.Inflight.GetAll(false) { + if (tk.Expiry > 0 && tk.Expiry < now) || tk.Created+maximumExpiry < now { + if ok := cl.State.Inflight.Delete(tk.PacketID); ok { + cl.ops.hooks.OnQosDropped(cl, tk) + atomic.AddInt64(&cl.ops.info.Inflight, -1) + deleted = append(deleted, tk.PacketID) + } + } + } + + return deleted +} + +// Read reads incoming packets from the connected client and transforms them into +// packets to be handled by the packetHandler. +func (cl *Client) Read(packetHandler ReadFn) error { + var err error + + for { + if cl.Closed() { + return nil + } + + cl.refreshDeadline(cl.State.Keepalive) + fh := new(packets.FixedHeader) + err = cl.ReadFixedHeader(fh) + if err != nil { + return err + } + + pk, err := cl.ReadPacket(fh) + if err != nil { + return err + } + + err = packetHandler(cl, pk) // Process inbound packet. + if err != nil { + return err + } + } +} + +// Stop instructs the client to shut down all processing goroutines and disconnect. +func (cl *Client) Stop(err error) { + cl.State.endOnce.Do(func() { + + if cl.Net.Conn != nil { + _ = cl.Net.Conn.Close() // omit close error + } + + if err != nil { + cl.State.stopCause.Store(err) + } + + if cl.State.cancelOpen != nil { + cl.State.cancelOpen() + } + + atomic.StoreInt64(&cl.State.disconnected, time.Now().Unix()) + }) +} + +// StopCause returns the reason the client connection was stopped, if any. +func (cl *Client) StopCause() error { + if cl.State.stopCause.Load() == nil { + return nil + } + return cl.State.stopCause.Load().(error) +} + +// Closed returns true if client connection is closed. +func (cl *Client) Closed() bool { + return cl.State.open == nil || cl.State.open.Err() != nil +} + +// ReadFixedHeader reads in the values of the next packet's fixed header. +func (cl *Client) ReadFixedHeader(fh *packets.FixedHeader) error { + if cl.Net.bconn == nil { + return ErrConnectionClosed + } + + b, err := cl.Net.bconn.ReadByte() + if err != nil { + return err + } + + err = fh.Decode(b) + if err != nil { + return err + } + + var bu int + fh.Remaining, bu, err = packets.DecodeLength(cl.Net.bconn) + if err != nil { + return err + } + + if cl.ops.options.Capabilities.MaximumPacketSize > 0 && uint32(fh.Remaining+1) > cl.ops.options.Capabilities.MaximumPacketSize { + return packets.ErrPacketTooLarge // [MQTT-3.2.2-15] + } + + atomic.AddInt64(&cl.ops.info.BytesReceived, int64(bu+1)) + return nil +} + +// ReadPacket reads the remaining buffer into an MQTT packet. +func (cl *Client) ReadPacket(fh *packets.FixedHeader) (pk packets.Packet, err error) { + atomic.AddInt64(&cl.ops.info.PacketsReceived, 1) + + pk.ProtocolVersion = cl.Properties.ProtocolVersion // inherit client protocol version for decoding + pk.FixedHeader = *fh + p := make([]byte, pk.FixedHeader.Remaining) + n, err := io.ReadFull(cl.Net.bconn, p) + if err != nil { + return pk, err + } + + atomic.AddInt64(&cl.ops.info.BytesReceived, int64(n)) + + // Decode the remaining packet values using a fresh copy of the bytes, + // otherwise the next packet will change the data of this one. + px := append([]byte{}, p[:]...) + switch pk.FixedHeader.Type { + case packets.Connect: + err = pk.ConnectDecode(px) + case packets.Disconnect: + err = pk.DisconnectDecode(px) + case packets.Connack: + err = pk.ConnackDecode(px) + case packets.Publish: + err = pk.PublishDecode(px) + if err == nil { + atomic.AddInt64(&cl.ops.info.MessagesReceived, 1) + } + case packets.Puback: + err = pk.PubackDecode(px) + case packets.Pubrec: + err = pk.PubrecDecode(px) + case packets.Pubrel: + err = pk.PubrelDecode(px) + case packets.Pubcomp: + err = pk.PubcompDecode(px) + case packets.Subscribe: + err = pk.SubscribeDecode(px) + case packets.Suback: + err = pk.SubackDecode(px) + case packets.Unsubscribe: + err = pk.UnsubscribeDecode(px) + case packets.Unsuback: + err = pk.UnsubackDecode(px) + case packets.Pingreq: + case packets.Pingresp: + case packets.Auth: + err = pk.AuthDecode(px) + default: + err = fmt.Errorf("invalid packet type; %v", pk.FixedHeader.Type) + } + + if err != nil { + return pk, err + } + + pk, err = cl.ops.hooks.OnPacketRead(cl, pk) + return +} + +// WritePacket encodes and writes a packet to the client. +func (cl *Client) WritePacket(pk packets.Packet) error { + if cl.Closed() { + return ErrConnectionClosed + } + + if cl.Net.Conn == nil { + return nil + } + + if pk.Expiry > 0 { + pk.Properties.MessageExpiryInterval = uint32(pk.Expiry - time.Now().Unix()) // [MQTT-3.3.2-6] + } + + pk.ProtocolVersion = cl.Properties.ProtocolVersion + if pk.Mods.MaxSize == 0 { // NB we use this statement to embed client packet sizes in tests + pk.Mods.MaxSize = cl.Properties.Props.MaximumPacketSize + } + + if cl.Properties.Props.RequestProblemInfoFlag && cl.Properties.Props.RequestProblemInfo == 0x0 { + pk.Mods.DisallowProblemInfo = true // [MQTT-3.1.2-29] strict, no problem info on any packet if set + } + + if pk.FixedHeader.Type != packets.Connack || cl.Properties.Props.RequestResponseInfo == 0x1 || cl.ops.options.Capabilities.Compatibilities.AlwaysReturnResponseInfo { + pk.Mods.AllowResponseInfo = true // [MQTT-3.1.2-28] we need to know which properties we can encode + } + + pk = cl.ops.hooks.OnPacketEncode(cl, pk) + + var err error + buf := new(bytes.Buffer) + switch pk.FixedHeader.Type { + case packets.Connect: + err = pk.ConnectEncode(buf) + case packets.Connack: + err = pk.ConnackEncode(buf) + case packets.Publish: + err = pk.PublishEncode(buf) + case packets.Puback: + err = pk.PubackEncode(buf) + case packets.Pubrec: + err = pk.PubrecEncode(buf) + case packets.Pubrel: + err = pk.PubrelEncode(buf) + case packets.Pubcomp: + err = pk.PubcompEncode(buf) + case packets.Subscribe: + err = pk.SubscribeEncode(buf) + case packets.Suback: + err = pk.SubackEncode(buf) + case packets.Unsubscribe: + err = pk.UnsubscribeEncode(buf) + case packets.Unsuback: + err = pk.UnsubackEncode(buf) + case packets.Pingreq: + err = pk.PingreqEncode(buf) + case packets.Pingresp: + err = pk.PingrespEncode(buf) + case packets.Disconnect: + err = pk.DisconnectEncode(buf) + case packets.Auth: + err = pk.AuthEncode(buf) + default: + err = fmt.Errorf("%w: %v", packets.ErrNoValidPacketAvailable, pk.FixedHeader.Type) + } + if err != nil { + return err + } + + if pk.Mods.MaxSize > 0 && uint32(buf.Len()) > pk.Mods.MaxSize { + return packets.ErrPacketTooLarge // [MQTT-3.1.2-24] [MQTT-3.1.2-25] + } + + nb := net.Buffers{buf.Bytes()} + n, err := func() (int64, error) { + cl.Lock() + defer cl.Unlock() + return nb.WriteTo(cl.Net.Conn) + }() + if err != nil { + return err + } + + atomic.AddInt64(&cl.ops.info.BytesSent, n) + atomic.AddInt64(&cl.ops.info.PacketsSent, 1) + if pk.FixedHeader.Type == packets.Publish { + atomic.AddInt64(&cl.ops.info.MessagesSent, 1) + } + + cl.ops.hooks.OnPacketSent(cl, pk, buf.Bytes()) + + return err +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/hooks.go b/vendor/github.com/mochi-mqtt/server/v2/hooks.go new file mode 100644 index 000000000..1af3c6df4 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/hooks.go @@ -0,0 +1,846 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co, thedevop + +package mqtt + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + + "github.com/mochi-mqtt/server/v2/hooks/storage" + "github.com/mochi-mqtt/server/v2/packets" + "github.com/mochi-mqtt/server/v2/system" + + "github.com/rs/zerolog" +) + +const ( + SetOptions byte = iota + OnSysInfoTick + OnStarted + OnStopped + OnConnectAuthenticate + OnACLCheck + OnConnect + OnSessionEstablish + OnSessionEstablished + OnDisconnect + OnAuthPacket + OnPacketRead + OnPacketEncode + OnPacketSent + OnPacketProcessed + OnSubscribe + OnSubscribed + OnSelectSubscribers + OnUnsubscribe + OnUnsubscribed + OnPublish + OnPublished + OnPublishDropped + OnRetainMessage + OnRetainPublished + OnQosPublish + OnQosComplete + OnQosDropped + OnPacketIDExhausted + OnWill + OnWillSent + OnClientExpired + OnRetainedExpired + StoredClients + StoredSubscriptions + StoredInflightMessages + StoredRetainedMessages + StoredSysInfo +) + +var ( + // ErrInvalidConfigType indicates a different Type of config value was expected to what was received. + ErrInvalidConfigType = errors.New("invalid config type provided") +) + +// Hook provides an interface of handlers for different events which occur +// during the lifecycle of the broker. +type Hook interface { + ID() string + Provides(b byte) bool + Init(config any) error + Stop() error + SetOpts(l *zerolog.Logger, o *HookOptions) + OnStarted() + OnStopped() + OnConnectAuthenticate(cl *Client, pk packets.Packet) bool + OnACLCheck(cl *Client, topic string, write bool) bool + OnSysInfoTick(*system.Info) + OnConnect(cl *Client, pk packets.Packet) error + OnSessionEstablish(cl *Client, pk packets.Packet) + OnSessionEstablished(cl *Client, pk packets.Packet) + OnDisconnect(cl *Client, err error, expire bool) + OnAuthPacket(cl *Client, pk packets.Packet) (packets.Packet, error) + OnPacketRead(cl *Client, pk packets.Packet) (packets.Packet, error) // triggers when a new packet is received by a client, but before packet validation + OnPacketEncode(cl *Client, pk packets.Packet) packets.Packet // modify a packet before it is byte-encoded and written to the client + OnPacketSent(cl *Client, pk packets.Packet, b []byte) // triggers when packet bytes have been written to the client + OnPacketProcessed(cl *Client, pk packets.Packet, err error) // triggers after a packet from the client been processed (handled) + OnSubscribe(cl *Client, pk packets.Packet) packets.Packet + OnSubscribed(cl *Client, pk packets.Packet, reasonCodes []byte) + OnSelectSubscribers(subs *Subscribers, pk packets.Packet) *Subscribers + OnUnsubscribe(cl *Client, pk packets.Packet) packets.Packet + OnUnsubscribed(cl *Client, pk packets.Packet) + OnPublish(cl *Client, pk packets.Packet) (packets.Packet, error) + OnPublished(cl *Client, pk packets.Packet) + OnPublishDropped(cl *Client, pk packets.Packet) + OnRetainMessage(cl *Client, pk packets.Packet, r int64) + OnRetainPublished(cl *Client, pk packets.Packet) + OnQosPublish(cl *Client, pk packets.Packet, sent int64, resends int) + OnQosComplete(cl *Client, pk packets.Packet) + OnQosDropped(cl *Client, pk packets.Packet) + OnPacketIDExhausted(cl *Client, pk packets.Packet) + OnWill(cl *Client, will Will) (Will, error) + OnWillSent(cl *Client, pk packets.Packet) + OnClientExpired(cl *Client) + OnRetainedExpired(filter string) + StoredClients() ([]storage.Client, error) + StoredSubscriptions() ([]storage.Subscription, error) + StoredInflightMessages() ([]storage.Message, error) + StoredRetainedMessages() ([]storage.Message, error) + StoredSysInfo() (storage.SystemInfo, error) +} + +// HookOptions contains values which are inherited from the server on initialisation. +type HookOptions struct { + Capabilities *Capabilities +} + +// Hooks is a slice of Hook interfaces to be called in sequence. +type Hooks struct { + Log *zerolog.Logger // a logger for the hook (from the server) + internal atomic.Value // a slice of []Hook + wg sync.WaitGroup // a waitgroup for syncing hook shutdown + qty int64 // the number of hooks in use + sync.Mutex // a mutex for locking when adding hooks +} + +// Len returns the number of hooks added. +func (h *Hooks) Len() int64 { + return atomic.LoadInt64(&h.qty) +} + +// Provides returns true if any one hook provides any of the requested hook methods. +func (h *Hooks) Provides(b ...byte) bool { + for _, hook := range h.GetAll() { + for _, hb := range b { + if hook.Provides(hb) { + return true + } + } + } + + return false +} + +// Add adds and initializes a new hook. +func (h *Hooks) Add(hook Hook, config any) error { + h.Lock() + defer h.Unlock() + + err := hook.Init(config) + if err != nil { + return fmt.Errorf("failed initialising %s hook: %w", hook.ID(), err) + } + + i, ok := h.internal.Load().([]Hook) + if !ok { + i = []Hook{} + } + + i = append(i, hook) + h.internal.Store(i) + atomic.AddInt64(&h.qty, 1) + h.wg.Add(1) + + return nil +} + +// GetAll returns a slice of all the hooks. +func (h *Hooks) GetAll() []Hook { + i, ok := h.internal.Load().([]Hook) + if !ok { + return []Hook{} + } + + return i +} + +// Stop indicates all attached hooks to gracefully end. +func (h *Hooks) Stop() { + go func() { + for _, hook := range h.GetAll() { + h.Log.Info().Str("hook", hook.ID()).Msg("stopping hook") + if err := hook.Stop(); err != nil { + h.Log.Debug().Err(err).Str("hook", hook.ID()).Msg("problem stopping hook") + } + + h.wg.Done() + } + }() + + h.wg.Wait() +} + +// OnSysInfoTick is called when the $SYS topic values are published out. +func (h *Hooks) OnSysInfoTick(sys *system.Info) { + for _, hook := range h.GetAll() { + if hook.Provides(OnSysInfoTick) { + hook.OnSysInfoTick(sys) + } + } +} + +// OnStarted is called when the server has successfully started. +func (h *Hooks) OnStarted() { + for _, hook := range h.GetAll() { + if hook.Provides(OnStarted) { + hook.OnStarted() + } + } +} + +// OnStopped is called when the server has successfully stopped. +func (h *Hooks) OnStopped() { + for _, hook := range h.GetAll() { + if hook.Provides(OnStopped) { + hook.OnStopped() + } + } +} + +// OnConnect is called when a new client connects, and may return a packets.Code as an error to halt the connection. +func (h *Hooks) OnConnect(cl *Client, pk packets.Packet) error { + for _, hook := range h.GetAll() { + if hook.Provides(OnConnect) { + err := hook.OnConnect(cl, pk) + if err != nil { + return err + } + } + } + return nil +} + +// OnSessionEstablish is called right after a new client connects and authenticates and right before +// the session is established and CONNACK is sent. +func (h *Hooks) OnSessionEstablish(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnSessionEstablish) { + hook.OnSessionEstablish(cl, pk) + } + } +} + +// OnSessionEstablished is called when a new client establishes a session (after OnConnect). +func (h *Hooks) OnSessionEstablished(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnSessionEstablished) { + hook.OnSessionEstablished(cl, pk) + } + } +} + +// OnDisconnect is called when a client is disconnected for any reason. +func (h *Hooks) OnDisconnect(cl *Client, err error, expire bool) { + for _, hook := range h.GetAll() { + if hook.Provides(OnDisconnect) { + hook.OnDisconnect(cl, err, expire) + } + } +} + +// OnPacketRead is called when a packet is received from a client. +func (h *Hooks) OnPacketRead(cl *Client, pk packets.Packet) (pkx packets.Packet, err error) { + pkx = pk + for _, hook := range h.GetAll() { + if hook.Provides(OnPacketRead) { + npk, err := hook.OnPacketRead(cl, pkx) + if err != nil && errors.Is(err, packets.ErrRejectPacket) { + h.Log.Debug().Err(err).Str("hook", hook.ID()).Interface("packet", pkx).Msg("packet rejected") + return pk, err + } else if err != nil { + continue + } + + pkx = npk + } + } + + return +} + +// OnAuthPacket is called when an auth packet is received. It is intended to allow developers +// to create their own auth packet handling mechanisms. +func (h *Hooks) OnAuthPacket(cl *Client, pk packets.Packet) (pkx packets.Packet, err error) { + pkx = pk + for _, hook := range h.GetAll() { + if hook.Provides(OnAuthPacket) { + npk, err := hook.OnAuthPacket(cl, pkx) + if err != nil { + return pk, err + } + + pkx = npk + } + } + + return +} + +// OnPacketEncode is called immediately before a packet is encoded to be sent to a client. +func (h *Hooks) OnPacketEncode(cl *Client, pk packets.Packet) packets.Packet { + for _, hook := range h.GetAll() { + if hook.Provides(OnPacketEncode) { + pk = hook.OnPacketEncode(cl, pk) + } + } + + return pk +} + +// OnPacketProcessed is called when a packet has been received and successfully handled by the broker. +func (h *Hooks) OnPacketProcessed(cl *Client, pk packets.Packet, err error) { + for _, hook := range h.GetAll() { + if hook.Provides(OnPacketProcessed) { + hook.OnPacketProcessed(cl, pk, err) + } + } +} + +// OnPacketSent is called when a packet has been sent to a client. It takes a bytes parameter +// containing the bytes sent. +func (h *Hooks) OnPacketSent(cl *Client, pk packets.Packet, b []byte) { + for _, hook := range h.GetAll() { + if hook.Provides(OnPacketSent) { + hook.OnPacketSent(cl, pk, b) + } + } +} + +// OnSubscribe is called when a client subscribes to one or more filters. This method +// differs from OnSubscribed in that it allows you to modify the subscription values +// before the packet is processed. The return values of the hook methods are passed-through +// in the order the hooks were attached. +func (h *Hooks) OnSubscribe(cl *Client, pk packets.Packet) packets.Packet { + for _, hook := range h.GetAll() { + if hook.Provides(OnSubscribe) { + pk = hook.OnSubscribe(cl, pk) + } + } + return pk +} + +// OnSubscribed is called when a client subscribes to one or more filters. +func (h *Hooks) OnSubscribed(cl *Client, pk packets.Packet, reasonCodes []byte) { + for _, hook := range h.GetAll() { + if hook.Provides(OnSubscribed) { + hook.OnSubscribed(cl, pk, reasonCodes) + } + } +} + +// OnSelectSubscribers is called when subscribers have been collected for a topic, but before +// shared subscription subscribers have been selected. This hook can be used to programmatically +// remove or add clients to a publish to subscribers process, or to select the subscriber for a shared +// group in a custom manner (such as based on client id, ip, etc). +func (h *Hooks) OnSelectSubscribers(subs *Subscribers, pk packets.Packet) *Subscribers { + for _, hook := range h.GetAll() { + if hook.Provides(OnSelectSubscribers) { + subs = hook.OnSelectSubscribers(subs, pk) + } + } + return subs +} + +// OnUnsubscribe is called when a client unsubscribes from one or more filters. This method +// differs from OnUnsubscribed in that it allows you to modify the unsubscription values +// before the packet is processed. The return values of the hook methods are passed-through +// in the order the hooks were attached. +func (h *Hooks) OnUnsubscribe(cl *Client, pk packets.Packet) packets.Packet { + for _, hook := range h.GetAll() { + if hook.Provides(OnUnsubscribe) { + pk = hook.OnUnsubscribe(cl, pk) + } + } + return pk +} + +// OnUnsubscribed is called when a client unsubscribes from one or more filters. +func (h *Hooks) OnUnsubscribed(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnUnsubscribed) { + hook.OnUnsubscribed(cl, pk) + } + } +} + +// OnPublish is called when a client publishes a message. This method differs from OnPublished +// in that it allows you to modify you to modify the incoming packet before it is processed. +// The return values of the hook methods are passed-through in the order the hooks were attached. +func (h *Hooks) OnPublish(cl *Client, pk packets.Packet) (pkx packets.Packet, err error) { + pkx = pk + for _, hook := range h.GetAll() { + if hook.Provides(OnPublish) { + npk, err := hook.OnPublish(cl, pkx) + if err != nil { + if errors.Is(err, packets.ErrRejectPacket) { + h.Log.Debug().Err(err).Str("hook", hook.ID()).Interface("packet", pkx).Msg("publish packet rejected") + return pk, err + } + h.Log.Error().Err(err).Str("hook", hook.ID()).Interface("packet", pkx).Msg("publish packet error") + return pk, err + } + pkx = npk + } + } + + return +} + +// OnPublished is called when a client has published a message to subscribers. +func (h *Hooks) OnPublished(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnPublished) { + hook.OnPublished(cl, pk) + } + } +} + +// OnPublishDropped is called when a message to a client was dropped instead of delivered +// such as when a client is too slow to respond. +func (h *Hooks) OnPublishDropped(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnPublishDropped) { + hook.OnPublishDropped(cl, pk) + } + } +} + +// OnRetainMessage is called then a published message is retained. +func (h *Hooks) OnRetainMessage(cl *Client, pk packets.Packet, r int64) { + for _, hook := range h.GetAll() { + if hook.Provides(OnRetainMessage) { + hook.OnRetainMessage(cl, pk, r) + } + } +} + +// OnRetainPublished is called when a retained message is published. +func (h *Hooks) OnRetainPublished(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnRetainPublished) { + hook.OnRetainPublished(cl, pk) + } + } +} + +// OnQosPublish is called when a publish packet with Qos >= 1 is issued to a subscriber. +// In other words, this method is called when a new inflight message is created or resent. +// It is typically used to store a new inflight message. +func (h *Hooks) OnQosPublish(cl *Client, pk packets.Packet, sent int64, resends int) { + for _, hook := range h.GetAll() { + if hook.Provides(OnQosPublish) { + hook.OnQosPublish(cl, pk, sent, resends) + } + } +} + +// OnQosComplete is called when the Qos flow for a message has been completed. +// In other words, when an inflight message is resolved. +// It is typically used to delete an inflight message from a store. +func (h *Hooks) OnQosComplete(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnQosComplete) { + hook.OnQosComplete(cl, pk) + } + } +} + +// OnQosDropped is called the Qos flow for a message expires. In other words, when +// an inflight message expires or is abandoned. It is typically used to delete an +// inflight message from a store. +func (h *Hooks) OnQosDropped(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnQosDropped) { + hook.OnQosDropped(cl, pk) + } + } +} + +// OnPacketIDExhausted is called when the client runs out of unused packet ids to +// assign to a packet. +func (h *Hooks) OnPacketIDExhausted(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnPacketIDExhausted) { + hook.OnPacketIDExhausted(cl, pk) + } + } +} + +// OnWill is called when a client disconnects and publishes an LWT message. This method +// differs from OnWillSent in that it allows you to modify the LWT message before it is +// published. The return values of the hook methods are passed-through in the order +// the hooks were attached. +func (h *Hooks) OnWill(cl *Client, will Will) Will { + for _, hook := range h.GetAll() { + if hook.Provides(OnWill) { + mlwt, err := hook.OnWill(cl, will) + if err != nil { + h.Log.Error().Err(err).Str("hook", hook.ID()).Interface("will", will).Msg("parse will error") + continue + } + will = mlwt + } + } + + return will +} + +// OnWillSent is called when an LWT message has been issued from a disconnecting client. +func (h *Hooks) OnWillSent(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnWillSent) { + hook.OnWillSent(cl, pk) + } + } +} + +// OnClientExpired is called when a client session has expired and should be deleted. +func (h *Hooks) OnClientExpired(cl *Client) { + for _, hook := range h.GetAll() { + if hook.Provides(OnClientExpired) { + hook.OnClientExpired(cl) + } + } +} + +// OnRetainedExpired is called when a retained message has expired and should be deleted. +func (h *Hooks) OnRetainedExpired(filter string) { + for _, hook := range h.GetAll() { + if hook.Provides(OnRetainedExpired) { + hook.OnRetainedExpired(filter) + } + } +} + +// StoredClients returns all clients, e.g. from a persistent store, is used to +// populate the server clients list before start. +func (h *Hooks) StoredClients() (v []storage.Client, err error) { + for _, hook := range h.GetAll() { + if hook.Provides(StoredClients) { + v, err := hook.StoredClients() + if err != nil { + h.Log.Error().Err(err).Str("hook", hook.ID()).Msg("failed to load clients") + return v, err + } + + if len(v) > 0 { + return v, nil + } + } + } + + return +} + +// StoredSubscriptions returns all subcriptions, e.g. from a persistent store, and is +// used to populate the server subscriptions list before start. +func (h *Hooks) StoredSubscriptions() (v []storage.Subscription, err error) { + for _, hook := range h.GetAll() { + if hook.Provides(StoredSubscriptions) { + v, err := hook.StoredSubscriptions() + if err != nil { + h.Log.Error().Err(err).Str("hook", hook.ID()).Msg("failed to load subscriptions") + return v, err + } + + if len(v) > 0 { + return v, nil + } + } + } + + return +} + +// StoredInflightMessages returns all inflight messages, e.g. from a persistent store, +// and is used to populate the restored clients with inflight messages before start. +func (h *Hooks) StoredInflightMessages() (v []storage.Message, err error) { + for _, hook := range h.GetAll() { + if hook.Provides(StoredInflightMessages) { + v, err := hook.StoredInflightMessages() + if err != nil { + h.Log.Error().Err(err).Str("hook", hook.ID()).Msg("failed to load inflight messages") + return v, err + } + + if len(v) > 0 { + return v, nil + } + } + } + + return +} + +// StoredRetainedMessages returns all retained messages, e.g. from a persistent store, +// and is used to populate the server topics with retained messages before start. +func (h *Hooks) StoredRetainedMessages() (v []storage.Message, err error) { + for _, hook := range h.GetAll() { + if hook.Provides(StoredRetainedMessages) { + v, err := hook.StoredRetainedMessages() + if err != nil { + h.Log.Error().Err(err).Str("hook", hook.ID()).Msg("failed to load retained messages") + return v, err + } + + if len(v) > 0 { + return v, nil + } + } + } + + return +} + +// StoredSysInfo returns a set of system info values. +func (h *Hooks) StoredSysInfo() (v storage.SystemInfo, err error) { + for _, hook := range h.GetAll() { + if hook.Provides(StoredSysInfo) { + v, err := hook.StoredSysInfo() + if err != nil { + h.Log.Error().Err(err).Str("hook", hook.ID()).Msg("failed to load $SYS info") + return v, err + } + + if v.Version != "" { + return v, nil + } + } + } + + return +} + +// OnConnectAuthenticate is called when a user attempts to authenticate with the server. +// An implementation of this method MUST be used to allow or deny access to the +// server (see hooks/auth/allow_all or basic). It can be used in custom hooks to +// check connecting users against an existing user database. +func (h *Hooks) OnConnectAuthenticate(cl *Client, pk packets.Packet) bool { + for _, hook := range h.GetAll() { + if hook.Provides(OnConnectAuthenticate) { + if ok := hook.OnConnectAuthenticate(cl, pk); ok { + return true + } + } + } + + return false +} + +// OnACLCheck is called when a user attempts to publish or subscribe to a topic filter. +// An implementation of this method MUST be used to allow or deny access to the +// (see hooks/auth/allow_all or basic). It can be used in custom hooks to +// check publishing and subscribing users against an existing permissions or roles database. +func (h *Hooks) OnACLCheck(cl *Client, topic string, write bool) bool { + for _, hook := range h.GetAll() { + if hook.Provides(OnACLCheck) { + if ok := hook.OnACLCheck(cl, topic, write); ok { + return true + } + } + } + + return false +} + +// HookBase provides a set of default methods for each hook. It should be embedded in +// all hooks. +type HookBase struct { + Hook + Log *zerolog.Logger + Opts *HookOptions +} + +// ID returns the ID of the hook. +func (h *HookBase) ID() string { + return "base" +} + +// Provides indicates which methods a hook provides. The default is none - this method +// should be overridden by the embedding hook. +func (h *HookBase) Provides(b byte) bool { + return false +} + +// Init performs any pre-start initializations for the hook, such as connecting to databases +// or opening files. +func (h *HookBase) Init(config any) error { + return nil +} + +// SetOpts is called by the server to propagate internal values and generally should +// not be called manually. +func (h *HookBase) SetOpts(l *zerolog.Logger, opts *HookOptions) { + h.Log = l + h.Opts = opts +} + +// Stop is called to gracefully shut down the hook. +func (h *HookBase) Stop() error { + return nil +} + +// OnStarted is called when the server starts. +func (h *HookBase) OnStarted() {} + +// OnStopped is called when the server stops. +func (h *HookBase) OnStopped() {} + +// OnSysInfoTick is called when the server publishes system info. +func (h *HookBase) OnSysInfoTick(*system.Info) {} + +// OnConnectAuthenticate is called when a user attempts to authenticate with the server. +func (h *HookBase) OnConnectAuthenticate(cl *Client, pk packets.Packet) bool { + return false +} + +// OnACLCheck is called when a user attempts to subscribe or publish to a topic. +func (h *HookBase) OnACLCheck(cl *Client, topic string, write bool) bool { + return false +} + +// OnConnect is called when a new client connects. +func (h *HookBase) OnConnect(cl *Client, pk packets.Packet) error { + return nil +} + +// OnSessionEstablish is called right after a new client connects and authenticates and right before +// the session is established and CONNACK is sent. +func (h *HookBase) OnSessionEstablish(cl *Client, pk packets.Packet) {} + +// OnSessionEstablished is called when a new client establishes a session (after OnConnect). +func (h *HookBase) OnSessionEstablished(cl *Client, pk packets.Packet) {} + +// OnDisconnect is called when a client is disconnected for any reason. +func (h *HookBase) OnDisconnect(cl *Client, err error, expire bool) {} + +// OnAuthPacket is called when an auth packet is received from the client. +func (h *HookBase) OnAuthPacket(cl *Client, pk packets.Packet) (packets.Packet, error) { + return pk, nil +} + +// OnPacketRead is called when a packet is received. +func (h *HookBase) OnPacketRead(cl *Client, pk packets.Packet) (packets.Packet, error) { + return pk, nil +} + +// OnPacketEncode is called before a packet is byte-encoded and written to the client. +func (h *HookBase) OnPacketEncode(cl *Client, pk packets.Packet) packets.Packet { + return pk +} + +// OnPacketSent is called immediately after a packet is written to a client. +func (h *HookBase) OnPacketSent(cl *Client, pk packets.Packet, b []byte) {} + +// OnPacketProcessed is called immediately after a packet from a client is processed. +func (h *HookBase) OnPacketProcessed(cl *Client, pk packets.Packet, err error) {} + +// OnSubscribe is called when a client subscribes to one or more filters. +func (h *HookBase) OnSubscribe(cl *Client, pk packets.Packet) packets.Packet { + return pk +} + +// OnSubscribed is called when a client subscribes to one or more filters. +func (h *HookBase) OnSubscribed(cl *Client, pk packets.Packet, reasonCodes []byte) {} + +// OnSelectSubscribers is called when selecting subscribers to receive a message. +func (h *HookBase) OnSelectSubscribers(subs *Subscribers, pk packets.Packet) *Subscribers { + return subs +} + +// OnUnsubscribe is called when a client unsubscribes from one or more filters. +func (h *HookBase) OnUnsubscribe(cl *Client, pk packets.Packet) packets.Packet { + return pk +} + +// OnUnsubscribed is called when a client unsubscribes from one or more filters. +func (h *HookBase) OnUnsubscribed(cl *Client, pk packets.Packet) {} + +// OnPublish is called when a client publishes a message. +func (h *HookBase) OnPublish(cl *Client, pk packets.Packet) (packets.Packet, error) { + return pk, nil +} + +// OnPublished is called when a client has published a message to subscribers. +func (h *HookBase) OnPublished(cl *Client, pk packets.Packet) {} + +// OnPublishDropped is called when a message to a client is dropped instead of being delivered. +func (h *HookBase) OnPublishDropped(cl *Client, pk packets.Packet) {} + +// OnRetainMessage is called then a published message is retained. +func (h *HookBase) OnRetainMessage(cl *Client, pk packets.Packet, r int64) {} + +// OnRetainPublished is called when a retained message is published. +func (h *HookBase) OnRetainPublished(cl *Client, pk packets.Packet) {} + +// OnQosPublish is called when a publish packet with Qos > 1 is issued to a subscriber. +func (h *HookBase) OnQosPublish(cl *Client, pk packets.Packet, sent int64, resends int) {} + +// OnQosComplete is called when the Qos flow for a message has been completed. +func (h *HookBase) OnQosComplete(cl *Client, pk packets.Packet) {} + +// OnQosDropped is called the Qos flow for a message expires. +func (h *HookBase) OnQosDropped(cl *Client, pk packets.Packet) {} + +// OnPacketIDExhausted is called when the client runs out of unused packet ids to assign to a packet. +func (h *HookBase) OnPacketIDExhausted(cl *Client, pk packets.Packet) {} + +// OnWill is called when a client disconnects and publishes an LWT message. +func (h *HookBase) OnWill(cl *Client, will Will) (Will, error) { + return will, nil +} + +// OnWillSent is called when an LWT message has been issued from a disconnecting client. +func (h *HookBase) OnWillSent(cl *Client, pk packets.Packet) {} + +// OnClientExpired is called when a client session has expired. +func (h *HookBase) OnClientExpired(cl *Client) {} + +// OnRetainedExpired is called when a retained message for a topic has expired. +func (h *HookBase) OnRetainedExpired(topic string) {} + +// StoredClients returns all clients from a store. +func (h *HookBase) StoredClients() (v []storage.Client, err error) { + return +} + +// StoredSubscriptions returns all subcriptions from a store. +func (h *HookBase) StoredSubscriptions() (v []storage.Subscription, err error) { + return +} + +// StoredInflightMessages returns all inflight messages from a store. +func (h *HookBase) StoredInflightMessages() (v []storage.Message, err error) { + return +} + +// StoredRetainedMessages returns all retained messages from a store. +func (h *HookBase) StoredRetainedMessages() (v []storage.Message, err error) { + return +} + +// StoredSysInfo returns a set of system info values. +func (h *HookBase) StoredSysInfo() (v storage.SystemInfo, err error) { + return +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/allow_all.go b/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/allow_all.go new file mode 100644 index 000000000..e05a0de3b --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/allow_all.go @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package auth + +import ( + "bytes" + + "github.com/mochi-mqtt/server/v2" + "github.com/mochi-mqtt/server/v2/packets" +) + +// AllowHook is an authentication hook which allows connection access +// for all users and read and write access to all topics. +type AllowHook struct { + mqtt.HookBase +} + +// ID returns the ID of the hook. +func (h *AllowHook) ID() string { + return "allow-all-auth" +} + +// Provides indicates which hook methods this hook provides. +func (h *AllowHook) Provides(b byte) bool { + return bytes.Contains([]byte{ + mqtt.OnConnectAuthenticate, + mqtt.OnACLCheck, + }, []byte{b}) +} + +// OnConnectAuthenticate returns true/allowed for all requests. +func (h *AllowHook) OnConnectAuthenticate(cl *mqtt.Client, pk packets.Packet) bool { + return true +} + +// OnACLCheck returns true/allowed for all checks. +func (h *AllowHook) OnACLCheck(cl *mqtt.Client, topic string, write bool) bool { + return true +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/auth.go b/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/auth.go new file mode 100644 index 000000000..ed9146134 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/auth.go @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package auth + +import ( + "bytes" + + "github.com/mochi-mqtt/server/v2" + "github.com/mochi-mqtt/server/v2/packets" +) + +// Options contains the configuration/rules data for the auth ledger. +type Options struct { + Data []byte + Ledger *Ledger +} + +// Hook is an authentication hook which implements an auth ledger. +type Hook struct { + mqtt.HookBase + config *Options + ledger *Ledger +} + +// ID returns the ID of the hook. +func (h *Hook) ID() string { + return "auth-ledger" +} + +// Provides indicates which hook methods this hook provides. +func (h *Hook) Provides(b byte) bool { + return bytes.Contains([]byte{ + mqtt.OnConnectAuthenticate, + mqtt.OnACLCheck, + }, []byte{b}) +} + +// Init configures the hook with the auth ledger to be used for checking. +func (h *Hook) Init(config any) error { + if _, ok := config.(*Options); !ok && config != nil { + return mqtt.ErrInvalidConfigType + } + + if config == nil { + config = new(Options) + } + + h.config = config.(*Options) + + var err error + if h.config.Ledger != nil { + h.ledger = h.config.Ledger + } else if len(h.config.Data) > 0 { + h.ledger = new(Ledger) + err = h.ledger.Unmarshal(h.config.Data) + } + if err != nil { + return err + } + + if h.ledger == nil { + h.ledger = &Ledger{ + Auth: AuthRules{}, + ACL: ACLRules{}, + } + } + + h.Log.Info(). + Int("authentication", len(h.ledger.Auth)). + Int("acl", len(h.ledger.ACL)). + Msg("loaded auth rules") + + return nil +} + +// OnConnectAuthenticate returns true if the connecting client has rules which provide access +// in the auth ledger. +func (h *Hook) OnConnectAuthenticate(cl *mqtt.Client, pk packets.Packet) bool { + if _, ok := h.ledger.AuthOk(cl, pk); ok { + return true + } + + h.Log.Info(). + Str("username", string(pk.Connect.Username)). + Str("remote", cl.Net.Remote). + Msg("client failed authentication check") + + return false +} + +// OnACLCheck returns true if the connecting client has matching read or write access to subscribe +// or publish to a given topic. +func (h *Hook) OnACLCheck(cl *mqtt.Client, topic string, write bool) bool { + if _, ok := h.ledger.ACLOk(cl, topic, write); ok { + return true + } + + h.Log.Debug(). + Str("client", cl.ID). + Str("username", string(cl.Properties.Username)). + Str("topic", topic). + Msg("client failed allowed ACL check") + + return false +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/ledger.go b/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/ledger.go new file mode 100644 index 000000000..9e5e2e67c --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/ledger.go @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package auth + +import ( + "encoding/json" + "strings" + "sync" + + "gopkg.in/yaml.v3" + + "github.com/mochi-mqtt/server/v2" + "github.com/mochi-mqtt/server/v2/packets" +) + +const ( + Deny Access = iota // user cannot access the topic + ReadOnly // user can only subscribe to the topic + WriteOnly // user can only publish to the topic + ReadWrite // user can both publish and subscribe to the topic +) + +// Access determines the read/write privileges for an ACL rule. +type Access byte + +// Users contains a map of access rules for specific users, keyed on username. +type Users map[string]UserRule + +// UserRule defines a set of access rules for a specific user. +type UserRule struct { + Username RString `json:"username,omitempty" yaml:"username,omitempty"` // the username of a user + Password RString `json:"password,omitempty" yaml:"password,omitempty"` // the password of a user + ACL Filters `json:"acl,omitempty" yaml:"acl,omitempty"` // filters to match, if desired + Disallow bool `json:"disallow,omitempty" yaml:"disallow,omitempty"` // allow or disallow the user +} + +// AuthRules defines generic access rules applicable to all users. +type AuthRules []AuthRule + +type AuthRule struct { + Client RString `json:"client,omitempty" yaml:"client,omitempty"` // the id of a connecting client + Username RString `json:"username,omitempty" yaml:"username,omitempty"` // the username of a user + Remote RString `json:"remote,omitempty" yaml:"remote,omitempty"` // remote address or + Password RString `json:"password,omitempty" yaml:"password,omitempty"` // the password of a user + Allow bool `json:"allow,omitempty" yaml:"allow,omitempty"` // allow or disallow the users +} + +// ACLRules defines generic topic or filter access rules applicable to all users. +type ACLRules []ACLRule + +// ACLRule defines access rules for a specific topic or filter. +type ACLRule struct { + Client RString `json:"client,omitempty" yaml:"client,omitempty"` // the id of a connecting client + Username RString `json:"username,omitempty" yaml:"username,omitempty"` // the username of a user + Remote RString `json:"remote,omitempty" yaml:"remote,omitempty"` // remote address or + Filters Filters `json:"filters,omitempty" yaml:"filters,omitempty"` // filters to match +} + +// Filters is a map of Access rules keyed on filter. +type Filters map[RString]Access + +// RString is a rule value string. +type RString string + +// Matches returns true if the rule matches a given string. +func (r RString) Matches(a string) bool { + rr := string(r) + if r == "" || r == "*" || a == rr { + return true + } + + i := strings.Index(rr, "*") + if i > 0 && len(a) > i && strings.Compare(rr[:i], a[:i]) == 0 { + return true + } + + return false +} + +// FilterMatches returns true if a filter matches a topic rule. +func (f RString) FilterMatches(a string) bool { + _, ok := MatchTopic(string(f), a) + return ok +} + +// MatchTopic checks if a given topic matches a filter, accounting for filter +// wildcards. Eg. filter /a/b/+/c == topic a/b/d/c. +func MatchTopic(filter string, topic string) (elements []string, matched bool) { + filterParts := strings.Split(filter, "/") + topicParts := strings.Split(topic, "/") + + elements = make([]string, 0) + for i := 0; i < len(filterParts); i++ { + if i >= len(topicParts) { + matched = false + return + } + + if filterParts[i] == "+" { + elements = append(elements, topicParts[i]) + continue + } + + if filterParts[i] == "#" { + matched = true + elements = append(elements, strings.Join(topicParts[i:], "/")) + return + } + + if filterParts[i] != topicParts[i] { + matched = false + return + } + } + + return elements, true +} + +// Ledger is an auth ledger containing access rules for users and topics. +type Ledger struct { + sync.Mutex `json:"-" yaml:"-"` + Users Users `json:"users" yaml:"users"` + Auth AuthRules `json:"auth" yaml:"auth"` + ACL ACLRules `json:"acl" yaml:"acl"` +} + +// Update updates the internal values of the ledger. +func (l *Ledger) Update(ln *Ledger) { + l.Lock() + defer l.Unlock() + l.Auth = ln.Auth + l.ACL = ln.ACL +} + +// AuthOk returns true if the rules indicate the user is allowed to authenticate. +func (l *Ledger) AuthOk(cl *mqtt.Client, pk packets.Packet) (n int, ok bool) { + // If the users map is set, always check for a predefined user first instead + // of iterating through global rules. + if l.Users != nil { + if u, ok := l.Users[string(cl.Properties.Username)]; ok && + u.Password != "" && + u.Password == RString(pk.Connect.Password) { + return 0, !u.Disallow + } + } + + // If there's no users map, or no user was found, attempt to find a matching + // rule (which may also contain a user). + for n, rule := range l.Auth { + if rule.Client.Matches(cl.ID) && + rule.Username.Matches(string(cl.Properties.Username)) && + rule.Password.Matches(string(pk.Connect.Password)) && + rule.Remote.Matches(cl.Net.Remote) { + return n, rule.Allow + } + } + + return 0, false +} + +// ACLOk returns true if the rules indicate the user is allowed to read or write to +// a specific filter or topic respectively, based on the write bool. +func (l *Ledger) ACLOk(cl *mqtt.Client, topic string, write bool) (n int, ok bool) { + // If the users map is set, always check for a predefined user first instead + // of iterating through global rules. + if l.Users != nil { + if u, ok := l.Users[string(cl.Properties.Username)]; ok && len(u.ACL) > 0 { + for filter, access := range u.ACL { + if filter.FilterMatches(topic) { + if !write && (access == ReadOnly || access == ReadWrite) { + return n, true + } else if write && (access == WriteOnly || access == ReadWrite) { + return n, true + } else { + return n, false + } + } + } + } + } + + for n, rule := range l.ACL { + if rule.Client.Matches(cl.ID) && + rule.Username.Matches(string(cl.Properties.Username)) && + rule.Remote.Matches(cl.Net.Remote) { + if len(rule.Filters) == 0 { + return n, true + } + + if write { + for filter, access := range rule.Filters { + if access == WriteOnly || access == ReadWrite { + if filter.FilterMatches(topic) { + return n, true + } + } + } + } + + if !write { + for filter, access := range rule.Filters { + if access == ReadOnly || access == ReadWrite { + if filter.FilterMatches(topic) { + return n, true + } + } + } + } + + for filter, _ := range rule.Filters { + if filter.FilterMatches(topic) { + return n, false + } + } + } + } + + return 0, true +} + +// ToJSON encodes the values into a JSON string. +func (l *Ledger) ToJSON() (data []byte, err error) { + return json.Marshal(l) +} + +// ToYAML encodes the values into a YAML string. +func (l *Ledger) ToYAML() (data []byte, err error) { + return yaml.Marshal(l) +} + +// Unmarshal decodes a JSON or YAML string (such as a rule config from a file) into a struct. +func (l *Ledger) Unmarshal(data []byte) error { + l.Lock() + defer l.Unlock() + if len(data) == 0 { + return nil + } + + if data[0] == '{' { + return json.Unmarshal(data, l) + } + + return yaml.Unmarshal(data, &l) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/hooks/storage/storage.go b/vendor/github.com/mochi-mqtt/server/v2/hooks/storage/storage.go new file mode 100644 index 000000000..12ade7b2b --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/hooks/storage/storage.go @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package storage + +import ( + "encoding/json" + "errors" + + "github.com/mochi-mqtt/server/v2/packets" + "github.com/mochi-mqtt/server/v2/system" +) + +const ( + SubscriptionKey = "SUB" // unique key to denote Subscriptions in a store + SysInfoKey = "SYS" // unique key to denote server system information in a store + RetainedKey = "RET" // unique key to denote retained messages in a store + InflightKey = "IFM" // unique key to denote inflight messages in a store + ClientKey = "CL" // unique key to denote clients in a store +) + +var ( + // ErrDBFileNotOpen indicates that the file database (e.g. bolt/badger) wasn't open for reading. + ErrDBFileNotOpen = errors.New("db file not open") +) + +// Client is a storable representation of an mqtt client. +type Client struct { + Will ClientWill `json:"will"` // will topic and payload data if applicable + Properties ClientProperties `json:"properties"` // the connect properties for the client + Username []byte `json:"username"` // the username of the client + ID string `json:"id" storm:"id"` // the client id / storage key + T string `json:"t"` // the data type (client) + Remote string `json:"remote"` // the remote address of the client + Listener string `json:"listener"` // the listener the client connected on + ProtocolVersion byte `json:"protocolVersion"` // mqtt protocol version of the client + Clean bool `json:"clean"` // if the client requested a clean start/session +} + +// ClientProperties contains a limited set of the mqtt v5 properties specific to a client connection. +type ClientProperties struct { + AuthenticationData []byte `json:"authenticationData"` + User []packets.UserProperty `json:"user"` + AuthenticationMethod string `json:"authenticationMethod"` + SessionExpiryInterval uint32 `json:"sessionExpiryInterval"` + MaximumPacketSize uint32 `json:"maximumPacketSize"` + ReceiveMaximum uint16 `json:"receiveMaximum"` + TopicAliasMaximum uint16 `json:"topicAliasMaximum"` + SessionExpiryIntervalFlag bool `json:"sessionExpiryIntervalFlag"` + RequestProblemInfo byte `json:"requestProblemInfo"` + RequestProblemInfoFlag bool `json:"requestProblemInfoFlag"` + RequestResponseInfo byte `json:"requestResponseInfo"` +} + +// ClientWill contains a will message for a client, and limited mqtt v5 properties. +type ClientWill struct { + Payload []byte `json:"payload"` + User []packets.UserProperty `json:"user"` + TopicName string `json:"topicName"` + Flag uint32 `json:"flag"` + WillDelayInterval uint32 `json:"willDelayInterval"` + Qos byte `json:"qos"` + Retain bool `json:"retain"` +} + +// MarshalBinary encodes the values into a json string. +func (d Client) MarshalBinary() (data []byte, err error) { + return json.Marshal(d) +} + +// UnmarshalBinary decodes a json string into a struct. +func (d *Client) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return nil + } + return json.Unmarshal(data, d) +} + +// Message is a storable representation of an MQTT message (specifically publish). +type Message struct { + Properties MessageProperties `json:"properties"` // - + Payload []byte `json:"payload"` // the message payload (if retained) + T string `json:"t"` // the data type + ID string `json:"id" storm:"id"` // the storage key + Origin string `json:"origin"` // the id of the client who sent the message + TopicName string `json:"topic_name"` // the topic the message was sent to (if retained) + FixedHeader packets.FixedHeader `json:"fixedheader"` // the header properties of the message + Created int64 `json:"created"` // the time the message was created in unixtime + Sent int64 `json:"sent"` // the last time the message was sent (for retries) in unixtime (if inflight) + PacketID uint16 `json:"packet_id"` // the unique id of the packet (if inflight) +} + +// MessageProperties contains a limited subset of mqtt v5 properties specific to publish messages. +type MessageProperties struct { + CorrelationData []byte `json:"correlationData"` + SubscriptionIdentifier []int `json:"subscriptionIdentifier"` + User []packets.UserProperty `json:"user"` + ContentType string `json:"contentType"` + ResponseTopic string `json:"responseTopic"` + MessageExpiryInterval uint32 `json:"messageExpiry"` + TopicAlias uint16 `json:"topicAlias"` + PayloadFormat byte `json:"payloadFormat"` + PayloadFormatFlag bool `json:"payloadFormatFlag"` +} + +// MarshalBinary encodes the values into a json string. +func (d Message) MarshalBinary() (data []byte, err error) { + return json.Marshal(d) +} + +// UnmarshalBinary decodes a json string into a struct. +func (d *Message) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return nil + } + return json.Unmarshal(data, d) +} + +// ToPacket converts a storage.Message to a standard packet. +func (d *Message) ToPacket() packets.Packet { + pk := packets.Packet{ + FixedHeader: d.FixedHeader, + PacketID: d.PacketID, + TopicName: d.TopicName, + Payload: d.Payload, + Origin: d.Origin, + Created: d.Created, + Properties: packets.Properties{ + PayloadFormat: d.Properties.PayloadFormat, + PayloadFormatFlag: d.Properties.PayloadFormatFlag, + MessageExpiryInterval: d.Properties.MessageExpiryInterval, + ContentType: d.Properties.ContentType, + ResponseTopic: d.Properties.ResponseTopic, + CorrelationData: d.Properties.CorrelationData, + SubscriptionIdentifier: d.Properties.SubscriptionIdentifier, + TopicAlias: d.Properties.TopicAlias, + User: d.Properties.User, + }, + } + + // Return a deep copy of the packet data otherwise the slices will + // continue pointing at the values from the storage packet. + pk = pk.Copy(true) + pk.FixedHeader.Dup = d.FixedHeader.Dup + + return pk +} + +// Subscription is a storable representation of an mqtt subscription. +type Subscription struct { + T string `json:"t"` + ID string `json:"id" storm:"id"` + Client string `json:"client"` + Filter string `json:"filter"` + Identifier int `json:"identifier"` + RetainHandling byte `json:"retain_handling"` + Qos byte `json:"qos"` + RetainAsPublished bool `json:"retain_as_pub"` + NoLocal bool `json:"no_local"` +} + +// MarshalBinary encodes the values into a json string. +func (d Subscription) MarshalBinary() (data []byte, err error) { + return json.Marshal(d) +} + +// UnmarshalBinary decodes a json string into a struct. +func (d *Subscription) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return nil + } + return json.Unmarshal(data, d) +} + +// SystemInfo is a storable representation of the system information values. +type SystemInfo struct { + system.Info // embed the system info struct + T string `json:"t"` // the data type + ID string `json:"id" storm:"id"` // the storage key +} + +// MarshalBinary encodes the values into a json string. +func (d SystemInfo) MarshalBinary() (data []byte, err error) { + return json.Marshal(d) +} + +// UnmarshalBinary decodes a json string into a struct. +func (d *SystemInfo) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return nil + } + return json.Unmarshal(data, d) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/inflight.go b/vendor/github.com/mochi-mqtt/server/v2/inflight.go new file mode 100644 index 000000000..9d949584d --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/inflight.go @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2023 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package mqtt + +import ( + "sort" + "sync" + "sync/atomic" + + "github.com/mochi-mqtt/server/v2/packets" +) + +// Inflight is a map of InflightMessage keyed on packet id. +type Inflight struct { + sync.RWMutex + internal map[uint16]packets.Packet // internal contains the inflight packets + receiveQuota int32 // remaining inbound qos quota for flow control + sendQuota int32 // remaining outbound qos quota for flow control + maximumReceiveQuota int32 // maximum allowed receive quota + maximumSendQuota int32 // maximum allowed send quota +} + +// NewInflights returns a new instance of an Inflight packets map. +func NewInflights() *Inflight { + return &Inflight{ + internal: map[uint16]packets.Packet{}, + } +} + +// Set adds or updates an inflight packet by packet id. +func (i *Inflight) Set(m packets.Packet) bool { + i.Lock() + defer i.Unlock() + + _, ok := i.internal[m.PacketID] + i.internal[m.PacketID] = m + return !ok +} + +// Get returns an inflight packet by packet id. +func (i *Inflight) Get(id uint16) (packets.Packet, bool) { + i.RLock() + defer i.RUnlock() + + if m, ok := i.internal[id]; ok { + return m, true + } + + return packets.Packet{}, false +} + +// Len returns the size of the inflight messages map. +func (i *Inflight) Len() int { + i.RLock() + defer i.RUnlock() + return len(i.internal) +} + +// Clone returns a new instance of Inflight with the same message data. +// This is used when transferring inflights from a taken-over session. +func (i *Inflight) Clone() *Inflight { + c := NewInflights() + i.RLock() + defer i.RUnlock() + for k, v := range i.internal { + c.internal[k] = v + } + return c +} + +// GetAll returns all the inflight messages. +func (i *Inflight) GetAll(immediate bool) []packets.Packet { + i.RLock() + defer i.RUnlock() + + m := []packets.Packet{} + for _, v := range i.internal { + if !immediate || (immediate && v.Expiry < 0) { + m = append(m, v) + } + } + + sort.Slice(m, func(i, j int) bool { + return uint16(m[i].Created) < uint16(m[j].Created) + }) + + return m +} + +// NextImmediate returns the next inflight packet which is indicated to be sent immediately. +// This typically occurs when the quota has been exhausted, and we need to wait until new quota +// is free to continue sending. +func (i *Inflight) NextImmediate() (packets.Packet, bool) { + i.RLock() + defer i.RUnlock() + + m := i.GetAll(true) + if len(m) > 0 { + return m[0], true + } + + return packets.Packet{}, false +} + +// Delete removes an in-flight message from the map. Returns true if the message existed. +func (i *Inflight) Delete(id uint16) bool { + i.Lock() + defer i.Unlock() + + _, ok := i.internal[id] + delete(i.internal, id) + + return ok +} + +// TakeRecieveQuota reduces the receive quota by 1. +func (i *Inflight) DecreaseReceiveQuota() { + if atomic.LoadInt32(&i.receiveQuota) > 0 { + atomic.AddInt32(&i.receiveQuota, -1) + } +} + +// TakeRecieveQuota increases the receive quota by 1. +func (i *Inflight) IncreaseReceiveQuota() { + if atomic.LoadInt32(&i.receiveQuota) < atomic.LoadInt32(&i.maximumReceiveQuota) { + atomic.AddInt32(&i.receiveQuota, 1) + } +} + +// ResetReceiveQuota resets the receive quota to the maximum allowed value. +func (i *Inflight) ResetReceiveQuota(n int32) { + atomic.StoreInt32(&i.receiveQuota, n) + atomic.StoreInt32(&i.maximumReceiveQuota, n) +} + +// DecreaseSendQuota reduces the send quota by 1. +func (i *Inflight) DecreaseSendQuota() { + if atomic.LoadInt32(&i.sendQuota) > 0 { + atomic.AddInt32(&i.sendQuota, -1) + } +} + +// IncreaseSendQuota increases the send quota by 1. +func (i *Inflight) IncreaseSendQuota() { + if atomic.LoadInt32(&i.sendQuota) < atomic.LoadInt32(&i.maximumSendQuota) { + atomic.AddInt32(&i.sendQuota, 1) + } +} + +// ResetSendQuota resets the send quota to the maximum allowed value. +func (i *Inflight) ResetSendQuota(n int32) { + atomic.StoreInt32(&i.sendQuota, n) + atomic.StoreInt32(&i.maximumSendQuota, n) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/http_healthcheck.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/http_healthcheck.go new file mode 100644 index 000000000..e8fc0274f --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/http_healthcheck.go @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2023 mochi-mqtt, mochi-co +// SPDX-FileContributor: Derek Duncan + +package listeners + +import ( + "context" + "net/http" + "sync" + "sync/atomic" + "time" + + "github.com/rs/zerolog" +) + +// HTTPHealthCheck is a listener for providing an HTTP healthcheck endpoint. +type HTTPHealthCheck struct { + sync.RWMutex + id string // the internal id of the listener + address string // the network address to bind to + config *Config // configuration values for the listener + listen *http.Server // the http server + log *zerolog.Logger // server logger + end uint32 // ensure the close methods are only called once +} + +// NewHTTPHealthCheck initialises and returns a new HTTP listener, listening on an address. +func NewHTTPHealthCheck(id, address string, config *Config) *HTTPHealthCheck { + if config == nil { + config = new(Config) + } + return &HTTPHealthCheck{ + id: id, + address: address, + config: config, + } +} + +// ID returns the id of the listener. +func (l *HTTPHealthCheck) ID() string { + return l.id +} + +// Address returns the address of the listener. +func (l *HTTPHealthCheck) Address() string { + return l.address +} + +// Protocol returns the address of the listener. +func (l *HTTPHealthCheck) Protocol() string { + if l.listen != nil && l.listen.TLSConfig != nil { + return "https" + } + + return "http" +} + +// Init initializes the listener. +func (l *HTTPHealthCheck) Init(log *zerolog.Logger) error { + l.log = log + + mux := http.NewServeMux() + mux.HandleFunc("/healthcheck", func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + w.WriteHeader(http.StatusMethodNotAllowed) + } + }) + l.listen = &http.Server{ + ReadTimeout: 5 * time.Second, + WriteTimeout: 5 * time.Second, + Addr: l.address, + Handler: mux, + } + + if l.config.TLSConfig != nil { + l.listen.TLSConfig = l.config.TLSConfig + } + + return nil +} + +// Serve starts listening for new connections and serving responses. +func (l *HTTPHealthCheck) Serve(establish EstablishFn) { + if l.listen.TLSConfig != nil { + l.listen.ListenAndServeTLS("", "") + } else { + l.listen.ListenAndServe() + } +} + +// Close closes the listener and any client connections. +func (l *HTTPHealthCheck) Close(closeClients CloseFn) { + l.Lock() + defer l.Unlock() + + if atomic.CompareAndSwapUint32(&l.end, 0, 1) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + l.listen.Shutdown(ctx) + } + + closeClients(l.id) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/http_sysinfo.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/http_sysinfo.go new file mode 100644 index 000000000..aa32c91cb --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/http_sysinfo.go @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package listeners + +import ( + "context" + "encoding/json" + "io" + "net/http" + "sync" + "sync/atomic" + "time" + + "github.com/mochi-mqtt/server/v2/system" + + "github.com/rs/zerolog" +) + +// HTTPStats is a listener for presenting the server $SYS stats on a JSON http endpoint. +type HTTPStats struct { + sync.RWMutex + id string // the internal id of the listener + address string // the network address to bind to + config *Config // configuration values for the listener + listen *http.Server // the http server + log *zerolog.Logger // server logger + sysInfo *system.Info // pointers to the server data + end uint32 // ensure the close methods are only called once +} + +// NewHTTPStats initialises and returns a new HTTP listener, listening on an address. +func NewHTTPStats(id, address string, config *Config, sysInfo *system.Info) *HTTPStats { + if config == nil { + config = new(Config) + } + return &HTTPStats{ + id: id, + address: address, + sysInfo: sysInfo, + config: config, + } +} + +// ID returns the id of the listener. +func (l *HTTPStats) ID() string { + return l.id +} + +// Address returns the address of the listener. +func (l *HTTPStats) Address() string { + return l.address +} + +// Protocol returns the address of the listener. +func (l *HTTPStats) Protocol() string { + if l.listen != nil && l.listen.TLSConfig != nil { + return "https" + } + + return "http" +} + +// Init initializes the listener. +func (l *HTTPStats) Init(log *zerolog.Logger) error { + l.log = log + + mux := http.NewServeMux() + mux.HandleFunc("/", l.jsonHandler) + l.listen = &http.Server{ + ReadTimeout: 5 * time.Second, + WriteTimeout: 5 * time.Second, + Addr: l.address, + Handler: mux, + } + + if l.config.TLSConfig != nil { + l.listen.TLSConfig = l.config.TLSConfig + } + + return nil +} + +// Serve starts listening for new connections and serving responses. +func (l *HTTPStats) Serve(establish EstablishFn) { + if l.listen.TLSConfig != nil { + l.listen.ListenAndServeTLS("", "") + } else { + l.listen.ListenAndServe() + } +} + +// Close closes the listener and any client connections. +func (l *HTTPStats) Close(closeClients CloseFn) { + l.Lock() + defer l.Unlock() + + if atomic.CompareAndSwapUint32(&l.end, 0, 1) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + l.listen.Shutdown(ctx) + } + + closeClients(l.id) +} + +// jsonHandler is an HTTP handler which outputs the $SYS stats as JSON. +func (l *HTTPStats) jsonHandler(w http.ResponseWriter, req *http.Request) { + info := *l.sysInfo.Clone() + + out, err := json.MarshalIndent(info, "", "\t") + if err != nil { + io.WriteString(w, err.Error()) + } + + w.Write(out) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/listeners.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/listeners.go new file mode 100644 index 000000000..24031e003 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/listeners.go @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package listeners + +import ( + "crypto/tls" + "net" + "sync" + + "github.com/rs/zerolog" +) + +// Config contains configuration values for a listener. +type Config struct { + // TLSConfig is a tls.Config configuration to be used with the listener. + // See examples folder for basic and mutual-tls use. + TLSConfig *tls.Config +} + +// EstablishFn is a callback function for establishing new clients. +type EstablishFn func(id string, c net.Conn) error + +// CloseFunc is a callback function for closing all listener clients. +type CloseFn func(id string) + +// Listener is an interface for network listeners. A network listener listens +// for incoming client connections and adds them to the server. +type Listener interface { + Init(*zerolog.Logger) error // open the network address + Serve(EstablishFn) // starting actively listening for new connections + ID() string // return the id of the listener + Address() string // the address of the listener + Protocol() string // the protocol in use by the listener + Close(CloseFn) // stop and close the listener +} + +// Listeners contains the network listeners for the broker. +type Listeners struct { + wg sync.WaitGroup // a waitgroup that waits for all listeners to finish. + internal map[string]Listener // a map of active listeners. + sync.RWMutex +} + +// New returns a new instance of Listeners. +func New() *Listeners { + return &Listeners{ + internal: map[string]Listener{}, + } +} + +// Add adds a new listener to the listeners map, keyed on id. +func (l *Listeners) Add(val Listener) { + l.Lock() + defer l.Unlock() + l.internal[val.ID()] = val +} + +// Get returns the value of a listener if it exists. +func (l *Listeners) Get(id string) (Listener, bool) { + l.RLock() + defer l.RUnlock() + val, ok := l.internal[id] + return val, ok +} + +// Len returns the length of the listeners map. +func (l *Listeners) Len() int { + l.RLock() + defer l.RUnlock() + return len(l.internal) +} + +// Delete removes a listener from the internal map. +func (l *Listeners) Delete(id string) { + l.Lock() + defer l.Unlock() + delete(l.internal, id) +} + +// Serve starts a listener serving from the internal map. +func (l *Listeners) Serve(id string, establisher EstablishFn) { + l.RLock() + defer l.RUnlock() + listener := l.internal[id] + + go func(e EstablishFn) { + defer l.wg.Done() + l.wg.Add(1) + listener.Serve(e) + }(establisher) +} + +// ServeAll starts all listeners serving from the internal map. +func (l *Listeners) ServeAll(establisher EstablishFn) { + l.RLock() + i := 0 + ids := make([]string, len(l.internal)) + for id := range l.internal { + ids[i] = id + i++ + } + l.RUnlock() + + for _, id := range ids { + l.Serve(id, establisher) + } +} + +// Close stops a listener from the internal map. +func (l *Listeners) Close(id string, closer CloseFn) { + l.RLock() + defer l.RUnlock() + if listener, ok := l.internal[id]; ok { + listener.Close(closer) + } +} + +// CloseAll iterates and closes all registered listeners. +func (l *Listeners) CloseAll(closer CloseFn) { + l.RLock() + i := 0 + ids := make([]string, len(l.internal)) + for id := range l.internal { + ids[i] = id + i++ + } + l.RUnlock() + + for _, id := range ids { + l.Close(id, closer) + } + l.wg.Wait() +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/mock.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/mock.go new file mode 100644 index 000000000..8847af641 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/mock.go @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package listeners + +import ( + "fmt" + "net" + "sync" + + "github.com/rs/zerolog" +) + +// MockEstablisher is a function signature which can be used in testing. +func MockEstablisher(id string, c net.Conn) error { + return nil +} + +// MockCloser is a function signature which can be used in testing. +func MockCloser(id string) {} + +// MockListener is a mock listener for establishing client connections. +type MockListener struct { + sync.RWMutex + id string // the id of the listener + address string // the network address the listener binds to + Config *Config // configuration for the listener + done chan bool // indicate the listener is done + Serving bool // indicate the listener is serving + Listening bool // indiciate the listener is listening + ErrListen bool // throw an error on listen +} + +// NewMockListener returns a new instance of MockListener. +func NewMockListener(id, address string) *MockListener { + return &MockListener{ + id: id, + address: address, + done: make(chan bool), + } +} + +// Serve serves the mock listener. +func (l *MockListener) Serve(establisher EstablishFn) { + l.Lock() + l.Serving = true + l.Unlock() + + for range l.done { + return + } +} + +// Init initializes the listener. +func (l *MockListener) Init(log *zerolog.Logger) error { + if l.ErrListen { + return fmt.Errorf("listen failure") + } + + l.Lock() + defer l.Unlock() + l.Listening = true + return nil +} + +// ID returns the id of the mock listener. +func (l *MockListener) ID() string { + return l.id +} + +// Address returns the address of the listener. +func (l *MockListener) Address() string { + return l.address +} + +// Protocol returns the address of the listener. +func (l *MockListener) Protocol() string { + return "mock" +} + +// Close closes the mock listener. +func (l *MockListener) Close(closer CloseFn) { + l.Lock() + defer l.Unlock() + l.Serving = false + closer(l.id) + close(l.done) +} + +// IsServing indicates whether the mock listener is serving. +func (l *MockListener) IsServing() bool { + l.Lock() + defer l.Unlock() + return l.Serving +} + +// IsListening indicates whether the mock listener is listening. +func (l *MockListener) IsListening() bool { + l.Lock() + defer l.Unlock() + return l.Listening +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/net.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/net.go new file mode 100644 index 000000000..662b9ec0d --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/net.go @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2023 mochi-mqtt, mochi-co +// SPDX-FileContributor: Jeroen Rinzema + +package listeners + +import ( + "net" + "sync" + "sync/atomic" + + "github.com/rs/zerolog" +) + +// Net is a listener for establishing client connections on basic TCP protocol. +type Net struct { // [MQTT-4.2.0-1] + mu sync.Mutex + listener net.Listener // a net.Listener which will listen for new clients + id string // the internal id of the listener + log *zerolog.Logger // server logger + end uint32 // ensure the close methods are only called once +} + +// NewNet initialises and returns a listener serving incoming connections on the given net.Listener +func NewNet(id string, listener net.Listener) *Net { + return &Net{ + id: id, + listener: listener, + } +} + +// ID returns the id of the listener. +func (l *Net) ID() string { + return l.id +} + +// Address returns the address of the listener. +func (l *Net) Address() string { + return l.listener.Addr().String() +} + +// Protocol returns the network of the listener. +func (l *Net) Protocol() string { + return l.listener.Addr().Network() +} + +// Init initializes the listener. +func (l *Net) Init(log *zerolog.Logger) error { + l.log = log + return nil +} + +// Serve starts waiting for new TCP connections, and calls the establish +// connection callback for any received. +func (l *Net) Serve(establish EstablishFn) { + for { + if atomic.LoadUint32(&l.end) == 1 { + return + } + + conn, err := l.listener.Accept() + if err != nil { + return + } + + if atomic.LoadUint32(&l.end) == 0 { + go func() { + err = establish(l.id, conn) + if err != nil { + l.log.Warn().Err(err).Send() + } + }() + } + } +} + +// Close closes the listener and any client connections. +func (l *Net) Close(closeClients CloseFn) { + l.mu.Lock() + defer l.mu.Unlock() + + if atomic.CompareAndSwapUint32(&l.end, 0, 1) { + closeClients(l.id) + } + + if l.listener != nil { + err := l.listener.Close() + if err != nil { + return + } + } +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/tcp.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/tcp.go new file mode 100644 index 000000000..1fc34da89 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/tcp.go @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package listeners + +import ( + "crypto/tls" + "net" + "sync" + "sync/atomic" + + "github.com/rs/zerolog" +) + +// TCP is a listener for establishing client connections on basic TCP protocol. +type TCP struct { // [MQTT-4.2.0-1] + sync.RWMutex + id string // the internal id of the listener + address string // the network address to bind to + listen net.Listener // a net.Listener which will listen for new clients + config *Config // configuration values for the listener + log *zerolog.Logger // server logger + end uint32 // ensure the close methods are only called once +} + +// NewTCP initialises and returns a new TCP listener, listening on an address. +func NewTCP(id, address string, config *Config) *TCP { + if config == nil { + config = new(Config) + } + + return &TCP{ + id: id, + address: address, + config: config, + } +} + +// ID returns the id of the listener. +func (l *TCP) ID() string { + return l.id +} + +// Address returns the address of the listener. +func (l *TCP) Address() string { + return l.address +} + +// Protocol returns the address of the listener. +func (l *TCP) Protocol() string { + return "tcp" +} + +// Init initializes the listener. +func (l *TCP) Init(log *zerolog.Logger) error { + l.log = log + + var err error + if l.config.TLSConfig != nil { + l.listen, err = tls.Listen("tcp", l.address, l.config.TLSConfig) + } else { + l.listen, err = net.Listen("tcp", l.address) + } + + return err +} + +// Serve starts waiting for new TCP connections, and calls the establish +// connection callback for any received. +func (l *TCP) Serve(establish EstablishFn) { + for { + if atomic.LoadUint32(&l.end) == 1 { + return + } + + conn, err := l.listen.Accept() + if err != nil { + return + } + + if atomic.LoadUint32(&l.end) == 0 { + go func() { + err = establish(l.id, conn) + if err != nil { + l.log.Warn().Err(err).Send() + } + }() + } + } +} + +// Close closes the listener and any client connections. +func (l *TCP) Close(closeClients CloseFn) { + l.Lock() + defer l.Unlock() + + if atomic.CompareAndSwapUint32(&l.end, 0, 1) { + closeClients(l.id) + } + + if l.listen != nil { + err := l.listen.Close() + if err != nil { + return + } + } +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/unixsock.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/unixsock.go new file mode 100644 index 000000000..ebe54c3f5 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/unixsock.go @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: jason@zgwit.com + +package listeners + +import ( + "net" + "os" + "sync" + "sync/atomic" + + "github.com/rs/zerolog" +) + +// UnixSock is a listener for establishing client connections on basic UnixSock protocol. +type UnixSock struct { + sync.RWMutex + id string // the internal id of the listener. + address string // the network address to bind to. + listen net.Listener // a net.Listener which will listen for new clients. + log *zerolog.Logger // server logger + end uint32 // ensure the close methods are only called once. +} + +// NewUnixSock initialises and returns a new UnixSock listener, listening on an address. +func NewUnixSock(id, address string) *UnixSock { + return &UnixSock{ + id: id, + address: address, + } +} + +// ID returns the id of the listener. +func (l *UnixSock) ID() string { + return l.id +} + +// Address returns the address of the listener. +func (l *UnixSock) Address() string { + return l.address +} + +// Protocol returns the address of the listener. +func (l *UnixSock) Protocol() string { + return "unix" +} + +// Init initializes the listener. +func (l *UnixSock) Init(log *zerolog.Logger) error { + l.log = log + + var err error + _ = os.Remove(l.address) + l.listen, err = net.Listen("unix", l.address) + return err +} + +// Serve starts waiting for new UnixSock connections, and calls the establish +// connection callback for any received. +func (l *UnixSock) Serve(establish EstablishFn) { + for { + if atomic.LoadUint32(&l.end) == 1 { + return + } + + conn, err := l.listen.Accept() + if err != nil { + return + } + + if atomic.LoadUint32(&l.end) == 0 { + go func() { + err = establish(l.id, conn) + if err != nil { + l.log.Warn().Err(err).Send() + } + }() + } + } +} + +// Close closes the listener and any client connections. +func (l *UnixSock) Close(closeClients CloseFn) { + l.Lock() + defer l.Unlock() + + if atomic.CompareAndSwapUint32(&l.end, 0, 1) { + closeClients(l.id) + } + + if l.listen != nil { + err := l.listen.Close() + if err != nil { + return + } + } +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/websocket.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/websocket.go new file mode 100644 index 000000000..0b06c86f8 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/websocket.go @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package listeners + +import ( + "context" + "errors" + "io" + "net" + "net/http" + "sync" + "sync/atomic" + "time" + + "github.com/gorilla/websocket" + "github.com/rs/zerolog" +) + +var ( + // ErrInvalidMessage indicates that a message payload was not valid. + ErrInvalidMessage = errors.New("message type not binary") +) + +// Websocket is a listener for establishing websocket connections. +type Websocket struct { // [MQTT-4.2.0-1] + sync.RWMutex + id string // the internal id of the listener + address string // the network address to bind to + config *Config // configuration values for the listener + listen *http.Server // an http server for serving websocket connections + log *zerolog.Logger // server logger + establish EstablishFn // the server's establish connection handler + upgrader *websocket.Upgrader // upgrade the incoming http/tcp connection to a websocket compliant connection. + end uint32 // ensure the close methods are only called once +} + +// NewWebsocket initialises and returns a new Websocket listener, listening on an address. +func NewWebsocket(id, address string, config *Config) *Websocket { + if config == nil { + config = new(Config) + } + + return &Websocket{ + id: id, + address: address, + config: config, + upgrader: &websocket.Upgrader{ + Subprotocols: []string{"mqtt"}, + CheckOrigin: func(r *http.Request) bool { + return true + }, + }, + } +} + +// ID returns the id of the listener. +func (l *Websocket) ID() string { + return l.id +} + +// Address returns the address of the listener. +func (l *Websocket) Address() string { + return l.address +} + +// Protocol returns the address of the listener. +func (l *Websocket) Protocol() string { + if l.config.TLSConfig != nil { + return "wss" + } + + return "ws" +} + +// Init initializes the listener. +func (l *Websocket) Init(log *zerolog.Logger) error { + l.log = log + + mux := http.NewServeMux() + mux.HandleFunc("/", l.handler) + l.listen = &http.Server{ + Addr: l.address, + Handler: mux, + TLSConfig: l.config.TLSConfig, + ReadTimeout: 60 * time.Second, + WriteTimeout: 60 * time.Second, + } + + return nil +} + +// handler upgrades and handles an incoming websocket connection. +func (l *Websocket) handler(w http.ResponseWriter, r *http.Request) { + c, err := l.upgrader.Upgrade(w, r, nil) + if err != nil { + return + } + defer c.Close() + + err = l.establish(l.id, &wsConn{Conn: c.UnderlyingConn(), c: c}) + if err != nil { + l.log.Warn().Err(err).Send() + } +} + +// Serve starts waiting for new Websocket connections, and calls the connection +// establishment callback for any received. +func (l *Websocket) Serve(establish EstablishFn) { + l.establish = establish + + if l.listen.TLSConfig != nil { + l.listen.ListenAndServeTLS("", "") + } else { + l.listen.ListenAndServe() + } +} + +// Close closes the listener and any client connections. +func (l *Websocket) Close(closeClients CloseFn) { + l.Lock() + defer l.Unlock() + + if atomic.CompareAndSwapUint32(&l.end, 0, 1) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + l.listen.Shutdown(ctx) + } + + closeClients(l.id) +} + +// wsConn is a websocket connection which satisfies the net.Conn interface. +type wsConn struct { + net.Conn + c *websocket.Conn + + // reader for the current message (may be nil) + r io.Reader +} + +// Read reads the next span of bytes from the websocket connection and returns the number of bytes read. +func (ws *wsConn) Read(p []byte) (int, error) { + if ws.r == nil { + op, r, err := ws.c.NextReader() + if err != nil { + return 0, err + } + + if op != websocket.BinaryMessage { + err = ErrInvalidMessage + return 0, err + } + + ws.r = r + } + + var n int + for { + // buffer is full, return what we've read so far + if n == len(p) { + return n, nil + } + + br, err := ws.r.Read(p[n:]) + n += br + if err != nil { + // when ANY error occurs, we consider this the end of the current message (either because it really is, via + // io.EOF, or because something bad happened, in which case we want to drop the remainder) + ws.r = nil + + if errors.Is(err, io.EOF) { + err = nil + } + return n, err + } + } +} + +// Write writes bytes to the websocket connection. +func (ws *wsConn) Write(p []byte) (int, error) { + err := ws.c.WriteMessage(websocket.BinaryMessage, p) + if err != nil { + return 0, err + } + + return len(p), nil +} + +// Close signals the underlying websocket conn to close. +func (ws *wsConn) Close() error { + return ws.Conn.Close() +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/packets/codec.go b/vendor/github.com/mochi-mqtt/server/v2/packets/codec.go new file mode 100644 index 000000000..152d777ed --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/packets/codec.go @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package packets + +import ( + "bytes" + "encoding/binary" + "io" + "unicode/utf8" + "unsafe" +) + +// bytesToString provides a zero-alloc no-copy byte to string conversion. +// via https://github.com/golang/go/issues/25484#issuecomment-391415660 +func bytesToString(bs []byte) string { + return *(*string)(unsafe.Pointer(&bs)) +} + +// decodeUint16 extracts the value of two bytes from a byte array. +func decodeUint16(buf []byte, offset int) (uint16, int, error) { + if len(buf) < offset+2 { + return 0, 0, ErrMalformedOffsetUintOutOfRange + } + + return binary.BigEndian.Uint16(buf[offset : offset+2]), offset + 2, nil +} + +// decodeUint32 extracts the value of four bytes from a byte array. +func decodeUint32(buf []byte, offset int) (uint32, int, error) { + if len(buf) < offset+4 { + return 0, 0, ErrMalformedOffsetUintOutOfRange + } + + return binary.BigEndian.Uint32(buf[offset : offset+4]), offset + 4, nil +} + +// decodeString extracts a string from a byte array, beginning at an offset. +func decodeString(buf []byte, offset int) (string, int, error) { + b, n, err := decodeBytes(buf, offset) + if err != nil { + return "", 0, err + } + + if !validUTF8(b) { // [MQTT-1.5.4-1] [MQTT-3.1.3-5] + return "", 0, ErrMalformedInvalidUTF8 + } + + return bytesToString(b), n, nil +} + +// validUTF8 checks if the byte array contains valid UTF-8 characters. +func validUTF8(b []byte) bool { + return utf8.Valid(b) && bytes.IndexByte(b, 0x00) == -1 // [MQTT-1.5.4-1] [MQTT-1.5.4-2] +} + +// decodeBytes extracts a byte array from a byte array, beginning at an offset. Used primarily for message payloads. +func decodeBytes(buf []byte, offset int) ([]byte, int, error) { + length, next, err := decodeUint16(buf, offset) + if err != nil { + return make([]byte, 0), 0, err + } + + if next+int(length) > len(buf) { + return make([]byte, 0), 0, ErrMalformedOffsetBytesOutOfRange + } + + return buf[next : next+int(length)], next + int(length), nil +} + +// decodeByte extracts the value of a byte from a byte array. +func decodeByte(buf []byte, offset int) (byte, int, error) { + if len(buf) <= offset { + return 0, 0, ErrMalformedOffsetByteOutOfRange + } + return buf[offset], offset + 1, nil +} + +// decodeByteBool extracts the value of a byte from a byte array and returns a bool. +func decodeByteBool(buf []byte, offset int) (bool, int, error) { + if len(buf) <= offset { + return false, 0, ErrMalformedOffsetBoolOutOfRange + } + return 1&buf[offset] > 0, offset + 1, nil +} + +// encodeBool returns a byte instead of a bool. +func encodeBool(b bool) byte { + if b { + return 1 + } + return 0 +} + +// encodeBytes encodes a byte array to a byte array. Used primarily for message payloads. +func encodeBytes(val []byte) []byte { + // In most circumstances the number of bytes being encoded is small. + // Setting the cap to a low amount allows us to account for those without + // triggering allocation growth on append unless we need to. + buf := make([]byte, 2, 32) + binary.BigEndian.PutUint16(buf, uint16(len(val))) + return append(buf, val...) +} + +// encodeUint16 encodes a uint16 value to a byte array. +func encodeUint16(val uint16) []byte { + buf := make([]byte, 2) + binary.BigEndian.PutUint16(buf, val) + return buf +} + +// encodeUint32 encodes a uint16 value to a byte array. +func encodeUint32(val uint32) []byte { + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, val) + return buf +} + +// encodeString encodes a string to a byte array. +func encodeString(val string) []byte { + // Like encodeBytes, we set the cap to a small number to avoid + // triggering allocation growth on append unless we absolutely need to. + buf := make([]byte, 2, 32) + binary.BigEndian.PutUint16(buf, uint16(len(val))) + return append(buf, []byte(val)...) +} + +// encodeLength writes length bits for the header. +func encodeLength(b *bytes.Buffer, length int64) { + // 1.5.5 Variable Byte Integer encode non-normative + // https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901027 + for { + eb := byte(length % 128) + length /= 128 + if length > 0 { + eb |= 0x80 + } + b.WriteByte(eb) + if length == 0 { + break // [MQTT-1.5.5-1] + } + } +} + +func DecodeLength(b io.ByteReader) (n, bu int, err error) { + // see 1.5.5 Variable Byte Integer decode non-normative + // https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901027 + var multiplier uint32 + var value uint32 + bu = 1 + for { + eb, err := b.ReadByte() + if err != nil { + return 0, bu, err + } + + value |= uint32(eb&127) << multiplier + if value > 268435455 { + return 0, bu, ErrMalformedVariableByteInteger + } + + if (eb & 128) == 0 { + break + } + + multiplier += 7 + bu++ + } + + return int(value), bu, nil +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/packets/codes.go b/vendor/github.com/mochi-mqtt/server/v2/packets/codes.go new file mode 100644 index 000000000..154d7ae5d --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/packets/codes.go @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package packets + +// Code contains a reason code and reason string for a response. +type Code struct { + Reason string + Code byte +} + +// String returns the readable reason for a code. +func (c Code) String() string { + return c.Reason +} + +// Error returns the readable reason for a code. +func (c Code) Error() string { + return c.Reason +} + +var ( + // QosCodes indicates the reason codes for each Qos byte. + QosCodes = map[byte]Code{ + 0: CodeGrantedQos0, + 1: CodeGrantedQos1, + 2: CodeGrantedQos2, + } + + CodeSuccessIgnore = Code{Code: 0x00, Reason: "ignore packet"} + CodeSuccess = Code{Code: 0x00, Reason: "success"} + CodeDisconnect = Code{Code: 0x00, Reason: "disconnected"} + CodeGrantedQos0 = Code{Code: 0x00, Reason: "granted qos 0"} + CodeGrantedQos1 = Code{Code: 0x01, Reason: "granted qos 1"} + CodeGrantedQos2 = Code{Code: 0x02, Reason: "granted qos 2"} + CodeDisconnectWillMessage = Code{Code: 0x04, Reason: "disconnect with will message"} + CodeNoMatchingSubscribers = Code{Code: 0x10, Reason: "no matching subscribers"} + CodeNoSubscriptionExisted = Code{Code: 0x11, Reason: "no subscription existed"} + CodeContinueAuthentication = Code{Code: 0x18, Reason: "continue authentication"} + CodeReAuthenticate = Code{Code: 0x19, Reason: "re-authenticate"} + ErrUnspecifiedError = Code{Code: 0x80, Reason: "unspecified error"} + ErrMalformedPacket = Code{Code: 0x81, Reason: "malformed packet"} + ErrMalformedProtocolName = Code{Code: 0x81, Reason: "malformed packet: protocol name"} + ErrMalformedProtocolVersion = Code{Code: 0x81, Reason: "malformed packet: protocol version"} + ErrMalformedFlags = Code{Code: 0x81, Reason: "malformed packet: flags"} + ErrMalformedKeepalive = Code{Code: 0x81, Reason: "malformed packet: keepalive"} + ErrMalformedPacketID = Code{Code: 0x81, Reason: "malformed packet: packet identifier"} + ErrMalformedTopic = Code{Code: 0x81, Reason: "malformed packet: topic"} + ErrMalformedWillTopic = Code{Code: 0x81, Reason: "malformed packet: will topic"} + ErrMalformedWillPayload = Code{Code: 0x81, Reason: "malformed packet: will message"} + ErrMalformedUsername = Code{Code: 0x81, Reason: "malformed packet: username"} + ErrMalformedPassword = Code{Code: 0x81, Reason: "malformed packet: password"} + ErrMalformedQos = Code{Code: 0x81, Reason: "malformed packet: qos"} + ErrMalformedOffsetUintOutOfRange = Code{Code: 0x81, Reason: "malformed packet: offset uint out of range"} + ErrMalformedOffsetBytesOutOfRange = Code{Code: 0x81, Reason: "malformed packet: offset bytes out of range"} + ErrMalformedOffsetByteOutOfRange = Code{Code: 0x81, Reason: "malformed packet: offset byte out of range"} + ErrMalformedOffsetBoolOutOfRange = Code{Code: 0x81, Reason: "malformed packet: offset boolean out of range"} + ErrMalformedInvalidUTF8 = Code{Code: 0x81, Reason: "malformed packet: invalid utf-8 string"} + ErrMalformedVariableByteInteger = Code{Code: 0x81, Reason: "malformed packet: variable byte integer out of range"} + ErrMalformedBadProperty = Code{Code: 0x81, Reason: "malformed packet: unknown property"} + ErrMalformedProperties = Code{Code: 0x81, Reason: "malformed packet: properties"} + ErrMalformedWillProperties = Code{Code: 0x81, Reason: "malformed packet: will properties"} + ErrMalformedSessionPresent = Code{Code: 0x81, Reason: "malformed packet: session present"} + ErrMalformedReasonCode = Code{Code: 0x81, Reason: "malformed packet: reason code"} + ErrProtocolViolation = Code{Code: 0x82, Reason: "protocol violation"} + ErrProtocolViolationProtocolName = Code{Code: 0x82, Reason: "protocol violation: protocol name"} + ErrProtocolViolationProtocolVersion = Code{Code: 0x82, Reason: "protocol violation: protocol version"} + ErrProtocolViolationReservedBit = Code{Code: 0x82, Reason: "protocol violation: reserved bit not 0"} + ErrProtocolViolationFlagNoUsername = Code{Code: 0x82, Reason: "protocol violation: username flag set but no value"} + ErrProtocolViolationFlagNoPassword = Code{Code: 0x82, Reason: "protocol violation: password flag set but no value"} + ErrProtocolViolationUsernameNoFlag = Code{Code: 0x82, Reason: "protocol violation: username set but no flag"} + ErrProtocolViolationPasswordNoFlag = Code{Code: 0x82, Reason: "protocol violation: username set but no flag"} + ErrProtocolViolationPasswordTooLong = Code{Code: 0x82, Reason: "protocol violation: password too long"} + ErrProtocolViolationUsernameTooLong = Code{Code: 0x82, Reason: "protocol violation: username too long"} + ErrProtocolViolationNoPacketID = Code{Code: 0x82, Reason: "protocol violation: missing packet id"} + ErrProtocolViolationSurplusPacketID = Code{Code: 0x82, Reason: "protocol violation: surplus packet id"} + ErrProtocolViolationQosOutOfRange = Code{Code: 0x82, Reason: "protocol violation: qos out of range"} + ErrProtocolViolationSecondConnect = Code{Code: 0x82, Reason: "protocol violation: second connect packet"} + ErrProtocolViolationZeroNonZeroExpiry = Code{Code: 0x82, Reason: "protocol violation: non-zero expiry"} + ErrProtocolViolationRequireFirstConnect = Code{Code: 0x82, Reason: "protocol violation: first packet must be connect"} + ErrProtocolViolationWillFlagNoPayload = Code{Code: 0x82, Reason: "protocol violation: will flag no payload"} + ErrProtocolViolationWillFlagSurplusRetain = Code{Code: 0x82, Reason: "protocol violation: will flag surplus retain"} + ErrProtocolViolationSurplusWildcard = Code{Code: 0x82, Reason: "protocol violation: topic contains wildcards"} + ErrProtocolViolationSurplusSubID = Code{Code: 0x82, Reason: "protocol violation: contained subscription identifier"} + ErrProtocolViolationInvalidTopic = Code{Code: 0x82, Reason: "protocol violation: invalid topic"} + ErrProtocolViolationInvalidSharedNoLocal = Code{Code: 0x82, Reason: "protocol violation: invalid shared no local"} + ErrProtocolViolationNoFilters = Code{Code: 0x82, Reason: "protocol violation: must contain at least one filter"} + ErrProtocolViolationInvalidReason = Code{Code: 0x82, Reason: "protocol violation: invalid reason"} + ErrProtocolViolationOversizeSubID = Code{Code: 0x82, Reason: "protocol violation: oversize subscription id"} + ErrProtocolViolationDupNoQos = Code{Code: 0x82, Reason: "protocol violation: dup true with no qos"} + ErrProtocolViolationUnsupportedProperty = Code{Code: 0x82, Reason: "protocol violation: unsupported property"} + ErrProtocolViolationNoTopic = Code{Code: 0x82, Reason: "protocol violation: no topic or alias"} + ErrImplementationSpecificError = Code{Code: 0x83, Reason: "implementation specific error"} + ErrRejectPacket = Code{Code: 0x83, Reason: "packet rejected"} + ErrUnsupportedProtocolVersion = Code{Code: 0x84, Reason: "unsupported protocol version"} + ErrClientIdentifierNotValid = Code{Code: 0x85, Reason: "client identifier not valid"} + ErrClientIdentifierTooLong = Code{Code: 0x85, Reason: "client identifier too long"} + ErrBadUsernameOrPassword = Code{Code: 0x86, Reason: "bad username or password"} + ErrNotAuthorized = Code{Code: 0x87, Reason: "not authorized"} + ErrServerUnavailable = Code{Code: 0x88, Reason: "server unavailable"} + ErrServerBusy = Code{Code: 0x89, Reason: "server busy"} + ErrBanned = Code{Code: 0x8A, Reason: "banned"} + ErrServerShuttingDown = Code{Code: 0x8B, Reason: "server shutting down"} + ErrBadAuthenticationMethod = Code{Code: 0x8C, Reason: "bad authentication method"} + ErrKeepAliveTimeout = Code{Code: 0x8D, Reason: "keep alive timeout"} + ErrSessionTakenOver = Code{Code: 0x8E, Reason: "session takeover"} + ErrTopicFilterInvalid = Code{Code: 0x8F, Reason: "topic filter invalid"} + ErrTopicNameInvalid = Code{Code: 0x90, Reason: "topic name invalid"} + ErrPacketIdentifierInUse = Code{Code: 0x91, Reason: "packet identifier in use"} + ErrPacketIdentifierNotFound = Code{Code: 0x92, Reason: "packet identifier not found"} + ErrReceiveMaximum = Code{Code: 0x93, Reason: "receive maximum exceeded"} + ErrTopicAliasInvalid = Code{Code: 0x94, Reason: "topic alias invalid"} + ErrPacketTooLarge = Code{Code: 0x95, Reason: "packet too large"} + ErrMessageRateTooHigh = Code{Code: 0x96, Reason: "message rate too high"} + ErrQuotaExceeded = Code{Code: 0x97, Reason: "quota exceeded"} + ErrPendingClientWritesExceeded = Code{Code: 0x97, Reason: "too many pending writes"} + ErrAdministrativeAction = Code{Code: 0x98, Reason: "administrative action"} + ErrPayloadFormatInvalid = Code{Code: 0x99, Reason: "payload format invalid"} + ErrRetainNotSupported = Code{Code: 0x9A, Reason: "retain not supported"} + ErrQosNotSupported = Code{Code: 0x9B, Reason: "qos not supported"} + ErrUseAnotherServer = Code{Code: 0x9C, Reason: "use another server"} + ErrServerMoved = Code{Code: 0x9D, Reason: "server moved"} + ErrSharedSubscriptionsNotSupported = Code{Code: 0x9E, Reason: "shared subscriptions not supported"} + ErrConnectionRateExceeded = Code{Code: 0x9F, Reason: "connection rate exceeded"} + ErrMaxConnectTime = Code{Code: 0xA0, Reason: "maximum connect time"} + ErrSubscriptionIdentifiersNotSupported = Code{Code: 0xA1, Reason: "subscription identifiers not supported"} + ErrWildcardSubscriptionsNotSupported = Code{Code: 0xA2, Reason: "wildcard subscriptions not supported"} + + // MQTTv3 specific bytes. + Err3UnsupportedProtocolVersion = Code{Code: 0x01} + Err3ClientIdentifierNotValid = Code{Code: 0x02} + Err3ServerUnavailable = Code{Code: 0x03} + ErrMalformedUsernameOrPassword = Code{Code: 0x04} + Err3NotAuthorized = Code{Code: 0x05} + + // V5CodesToV3 maps MQTTv5 Connack reason codes to MQTTv3 return codes. + // This is required because MQTTv3 has different return byte specification. + // See http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc385349257 + V5CodesToV3 = map[Code]Code{ + ErrUnsupportedProtocolVersion: Err3UnsupportedProtocolVersion, + ErrClientIdentifierNotValid: Err3ClientIdentifierNotValid, + ErrServerUnavailable: Err3ServerUnavailable, + ErrMalformedUsername: ErrMalformedUsernameOrPassword, + ErrMalformedPassword: ErrMalformedUsernameOrPassword, + ErrBadUsernameOrPassword: Err3NotAuthorized, + } +) diff --git a/vendor/github.com/mochi-mqtt/server/v2/packets/fixedheader.go b/vendor/github.com/mochi-mqtt/server/v2/packets/fixedheader.go new file mode 100644 index 000000000..eb20451bf --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/packets/fixedheader.go @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package packets + +import ( + "bytes" +) + +// FixedHeader contains the values of the fixed header portion of the MQTT packet. +type FixedHeader struct { + Remaining int `json:"remaining"` // the number of remaining bytes in the payload. + Type byte `json:"type"` // the type of the packet (PUBLISH, SUBSCRIBE, etc) from bits 7 - 4 (byte 1). + Qos byte `json:"qos"` // indicates the quality of service expected. + Dup bool `json:"dup"` // indicates if the packet was already sent at an earlier time. + Retain bool `json:"retain"` // whether the message should be retained. +} + +// Encode encodes the FixedHeader and returns a bytes buffer. +func (fh *FixedHeader) Encode(buf *bytes.Buffer) { + buf.WriteByte(fh.Type<<4 | encodeBool(fh.Dup)<<3 | fh.Qos<<1 | encodeBool(fh.Retain)) + encodeLength(buf, int64(fh.Remaining)) +} + +// Decode extracts the specification bits from the header byte. +func (fh *FixedHeader) Decode(hb byte) error { + fh.Type = hb >> 4 // Get the message type from the first 4 bytes. + + switch fh.Type { + case Publish: + if (hb>>1)&0x01 > 0 && (hb>>1)&0x02 > 0 { + return ErrProtocolViolationQosOutOfRange // [MQTT-3.3.1-4] + } + + fh.Dup = (hb>>3)&0x01 > 0 // is duplicate + fh.Qos = (hb >> 1) & 0x03 // qos flag + fh.Retain = hb&0x01 > 0 // is retain flag + case Pubrel: + fallthrough + case Subscribe: + fallthrough + case Unsubscribe: + if (hb>>0)&0x01 != 0 || (hb>>1)&0x01 != 1 || (hb>>2)&0x01 != 0 || (hb>>3)&0x01 != 0 { // [MQTT-3.8.1-1] [MQTT-3.10.1-1] + return ErrMalformedFlags + } + + fh.Qos = (hb >> 1) & 0x03 + default: + if (hb>>0)&0x01 != 0 || + (hb>>1)&0x01 != 0 || + (hb>>2)&0x01 != 0 || + (hb>>3)&0x01 != 0 { // [MQTT-3.8.3-5] [MQTT-3.14.1-1] [MQTT-3.15.1-1] + return ErrMalformedFlags + } + } + + if fh.Qos == 0 && fh.Dup { + return ErrProtocolViolationDupNoQos // [MQTT-3.3.1-2] + } + + return nil +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/packets/packets.go b/vendor/github.com/mochi-mqtt/server/v2/packets/packets.go new file mode 100644 index 000000000..2611bcb4d --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/packets/packets.go @@ -0,0 +1,1148 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package packets + +import ( + "bytes" + "errors" + "fmt" + "math" + "strconv" + "strings" + "sync" +) + +// All of the valid packet types and their packet identifier. +const ( + Reserved byte = iota // 0 - we use this in packet tests to indicate special-test or all packets. + Connect // 1 + Connack // 2 + Publish // 3 + Puback // 4 + Pubrec // 5 + Pubrel // 6 + Pubcomp // 7 + Subscribe // 8 + Suback // 9 + Unsubscribe // 10 + Unsuback // 11 + Pingreq // 12 + Pingresp // 13 + Disconnect // 14 + Auth // 15 + WillProperties byte = 99 // Special byte for validating Will Properties. +) + +var ( + // ErrNoValidPacketAvailable indicates the packet type byte provided does not exist in the mqtt specification. + ErrNoValidPacketAvailable error = errors.New("no valid packet available") + + // PacketNames is a map of packet bytes to human readable names, for easier debugging. + PacketNames = map[byte]string{ + 0: "Reserved", + 1: "Connect", + 2: "Connack", + 3: "Publish", + 4: "Puback", + 5: "Pubrec", + 6: "Pubrel", + 7: "Pubcomp", + 8: "Subscribe", + 9: "Suback", + 10: "Unsubscribe", + 11: "Unsuback", + 12: "Pingreq", + 13: "Pingresp", + 14: "Disconnect", + 15: "Auth", + } +) + +// Packets is a concurrency safe map of packets. +type Packets struct { + internal map[string]Packet + sync.RWMutex +} + +// NewPackets returns a new instance of Packets. +func NewPackets() *Packets { + return &Packets{ + internal: map[string]Packet{}, + } +} + +// Add adds a new packet to the map. +func (p *Packets) Add(id string, val Packet) { + p.Lock() + defer p.Unlock() + p.internal[id] = val +} + +// GetAll returns all packets in the map. +func (p *Packets) GetAll() map[string]Packet { + p.RLock() + defer p.RUnlock() + m := map[string]Packet{} + for k, v := range p.internal { + m[k] = v + } + return m +} + +// Get returns a specific packet in the map by packet id. +func (p *Packets) Get(id string) (val Packet, ok bool) { + p.RLock() + defer p.RUnlock() + val, ok = p.internal[id] + return val, ok +} + +// Len returns the number of packets in the map. +func (p *Packets) Len() int { + p.RLock() + defer p.RUnlock() + val := len(p.internal) + return val +} + +// Delete removes a packet from the map by packet id. +func (p *Packets) Delete(id string) { + p.Lock() + defer p.Unlock() + delete(p.internal, id) +} + +// Packet represents an MQTT packet. Instead of providing a packet interface +// variant packet structs, this is a single concrete packet type to cover all packet +// types, which allows us to take advantage of various compiler optimizations. It +// contains a combination of mqtt spec values and internal broker control codes. +type Packet struct { + Connect ConnectParams // parameters for connect packets (just for organisation) + Properties Properties // all mqtt v5 packet properties + Payload []byte // a message/payload for publish packets + ReasonCodes []byte // one or more reason codes for multi-reason responses (suback, etc) + Filters Subscriptions // a list of subscription filters and their properties (subscribe, unsubscribe) + TopicName string // the topic a payload is being published to + Origin string // client id of the client who is issuing the packet (mostly internal use) + FixedHeader FixedHeader // - + Created int64 // unix timestamp indicating time packet was created/received on the server + Expiry int64 // unix timestamp indicating when the packet will expire and should be deleted + Mods Mods // internal broker control values for controlling certain mqtt v5 compliance + PacketID uint16 // packet id for the packet (publish, qos, etc) + ProtocolVersion byte // protocol version of the client the packet belongs to + SessionPresent bool // session existed for connack + ReasonCode byte // reason code for a packet response (acks, etc) + ReservedBit byte // reserved, do not use (except in testing) + Ignore bool // if true, do not perform any message forwarding operations +} + +// Mods specifies certain values required for certain mqtt v5 compliance within packet encoding/decoding. +type Mods struct { + MaxSize uint32 // the maximum packet size specified by the client / server + DisallowProblemInfo bool // if problem info is disallowed + AllowResponseInfo bool // if response info is disallowed +} + +// ConnectParams contains packet values which are specifically related to connect packets. +type ConnectParams struct { + WillProperties Properties `json:"willProperties"` // - + Password []byte `json:"password"` // - + Username []byte `json:"username"` // - + ProtocolName []byte `json:"protocolName"` // - + WillPayload []byte `json:"willPayload"` // - + ClientIdentifier string `json:"clientId"` // - + WillTopic string `json:"willTopic"` // - + Keepalive uint16 `json:"keepalive"` // - + PasswordFlag bool `json:"passwordFlag"` // - + UsernameFlag bool `json:"usernameFlag"` // - + WillQos byte `json:"willQos"` // - + WillFlag bool `json:"willFlag"` // - + WillRetain bool `json:"willRetain"` // - + Clean bool `json:"clean"` // CleanSession in v3.1.1, CleanStart in v5 +} + +// Subscriptions is a slice of Subscription. +type Subscriptions []Subscription // must be a slice to retain order. + +// Subscription contains details about a client subscription to a topic filter. +type Subscription struct { + ShareName []string + Filter string + Identifier int + Identifiers map[string]int + RetainHandling byte + Qos byte + RetainAsPublished bool + NoLocal bool + FwdRetainedFlag bool // true if the subscription forms part of a publish response to a client subscription and packet is retained. +} + +// Copy creates a new instance of a packet, but with an empty header for inheriting new QoS flags, etc. +func (pk *Packet) Copy(allowTransfer bool) Packet { + p := Packet{ + FixedHeader: FixedHeader{ + Remaining: pk.FixedHeader.Remaining, + Type: pk.FixedHeader.Type, + Retain: pk.FixedHeader.Retain, + Dup: false, // [MQTT-4.3.1-1] [MQTT-4.3.2-2] + Qos: pk.FixedHeader.Qos, + }, + Mods: Mods{ + MaxSize: pk.Mods.MaxSize, + }, + ReservedBit: pk.ReservedBit, + ProtocolVersion: pk.ProtocolVersion, + Connect: ConnectParams{ + ClientIdentifier: pk.Connect.ClientIdentifier, + Keepalive: pk.Connect.Keepalive, + WillQos: pk.Connect.WillQos, + WillTopic: pk.Connect.WillTopic, + WillFlag: pk.Connect.WillFlag, + WillRetain: pk.Connect.WillRetain, + WillProperties: pk.Connect.WillProperties.Copy(allowTransfer), + Clean: pk.Connect.Clean, + }, + TopicName: pk.TopicName, + Properties: pk.Properties.Copy(allowTransfer), + SessionPresent: pk.SessionPresent, + ReasonCode: pk.ReasonCode, + Filters: pk.Filters, + Created: pk.Created, + Expiry: pk.Expiry, + Origin: pk.Origin, + } + + if allowTransfer { + p.PacketID = pk.PacketID + } + + if len(pk.Connect.ProtocolName) > 0 { + p.Connect.ProtocolName = append([]byte{}, pk.Connect.ProtocolName...) + } + + if len(pk.Connect.Password) > 0 { + p.Connect.PasswordFlag = true + p.Connect.Password = append([]byte{}, pk.Connect.Password...) + } + + if len(pk.Connect.Username) > 0 { + p.Connect.UsernameFlag = true + p.Connect.Username = append([]byte{}, pk.Connect.Username...) + } + + if len(pk.Connect.WillPayload) > 0 { + p.Connect.WillPayload = append([]byte{}, pk.Connect.WillPayload...) + } + + if len(pk.Payload) > 0 { + p.Payload = append([]byte{}, pk.Payload...) + } + + if len(pk.ReasonCodes) > 0 { + p.ReasonCodes = append([]byte{}, pk.ReasonCodes...) + } + + return p +} + +// Merge merges a new subscription with a base subscription, preserving the highest +// qos value, matched identifiers and any special properties. +func (s Subscription) Merge(n Subscription) Subscription { + if s.Identifiers == nil { + s.Identifiers = map[string]int{ + s.Filter: s.Identifier, + } + } + + if n.Identifier > 0 { + s.Identifiers[n.Filter] = n.Identifier + } + + if n.Qos > s.Qos { + s.Qos = n.Qos // [MQTT-3.3.4-2] + } + + if n.NoLocal { + s.NoLocal = true // [MQTT-3.8.3-3] + } + + return s +} + +// encode encodes a subscription and properties into bytes. +func (p Subscription) encode() byte { + var flag byte + flag |= p.Qos + + if p.NoLocal { + flag |= 1 << 2 + } + + if p.RetainAsPublished { + flag |= 1 << 3 + } + + flag |= p.RetainHandling << 4 + return flag +} + +// decode decodes subscription bytes into a subscription struct. +func (p *Subscription) decode(b byte) { + p.Qos = b & 3 // byte + p.NoLocal = 1&(b>>2) > 0 // bool + p.RetainAsPublished = 1&(b>>3) > 0 // bool + p.RetainHandling = 3 & (b >> 4) // byte +} + +// ConnectEncode encodes a connect packet. +func (pk *Packet) ConnectEncode(buf *bytes.Buffer) error { + nb := bytes.NewBuffer([]byte{}) + nb.Write(encodeBytes(pk.Connect.ProtocolName)) + nb.WriteByte(pk.ProtocolVersion) + + nb.WriteByte( + encodeBool(pk.Connect.Clean)<<1 | + encodeBool(pk.Connect.WillFlag)<<2 | + pk.Connect.WillQos<<3 | + encodeBool(pk.Connect.WillRetain)<<5 | + encodeBool(pk.Connect.PasswordFlag)<<6 | + encodeBool(pk.Connect.UsernameFlag)<<7 | + 0, // [MQTT-2.1.3-1] + ) + + nb.Write(encodeUint16(pk.Connect.Keepalive)) + + if pk.ProtocolVersion == 5 { + pb := bytes.NewBuffer([]byte{}) + (&pk.Properties).Encode(pk.FixedHeader.Type, pk.Mods, pb, 0) + nb.Write(pb.Bytes()) + } + + nb.Write(encodeString(pk.Connect.ClientIdentifier)) + + if pk.Connect.WillFlag { + if pk.ProtocolVersion == 5 { + pb := bytes.NewBuffer([]byte{}) + (&pk.Connect).WillProperties.Encode(WillProperties, pk.Mods, pb, 0) + nb.Write(pb.Bytes()) + } + + nb.Write(encodeString(pk.Connect.WillTopic)) + nb.Write(encodeBytes(pk.Connect.WillPayload)) + } + + if pk.Connect.UsernameFlag { + nb.Write(encodeBytes(pk.Connect.Username)) + } + + if pk.Connect.PasswordFlag { + nb.Write(encodeBytes(pk.Connect.Password)) + } + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + nb.WriteTo(buf) + + return nil +} + +// ConnectDecode decodes a connect packet. +func (pk *Packet) ConnectDecode(buf []byte) error { + var offset int + var err error + + pk.Connect.ProtocolName, offset, err = decodeBytes(buf, 0) + if err != nil { + return ErrMalformedProtocolName + } + + pk.ProtocolVersion, offset, err = decodeByte(buf, offset) + if err != nil { + return ErrMalformedProtocolVersion + } + + flags, offset, err := decodeByte(buf, offset) + if err != nil { + return ErrMalformedFlags + } + + pk.ReservedBit = 1 & flags + pk.Connect.Clean = 1&(flags>>1) > 0 + pk.Connect.WillFlag = 1&(flags>>2) > 0 + pk.Connect.WillQos = 3 & (flags >> 3) // this one is not a bool + pk.Connect.WillRetain = 1&(flags>>5) > 0 + pk.Connect.PasswordFlag = 1&(flags>>6) > 0 + pk.Connect.UsernameFlag = 1&(flags>>7) > 0 + + pk.Connect.Keepalive, offset, err = decodeUint16(buf, offset) + if err != nil { + return ErrMalformedKeepalive + } + + if pk.ProtocolVersion == 5 { + n, err := pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + offset += n + } + + pk.Connect.ClientIdentifier, offset, err = decodeString(buf, offset) // [MQTT-3.1.3-1] [MQTT-3.1.3-2] [MQTT-3.1.3-3] [MQTT-3.1.3-4] + if err != nil { + return ErrClientIdentifierNotValid // [MQTT-3.1.3-8] + } + + if pk.Connect.WillFlag { // [MQTT-3.1.2-7] + if pk.ProtocolVersion == 5 { + n, err := pk.Connect.WillProperties.Decode(WillProperties, bytes.NewBuffer(buf[offset:])) + if err != nil { + return ErrMalformedWillProperties + } + offset += n + } + + pk.Connect.WillTopic, offset, err = decodeString(buf, offset) + if err != nil { + return ErrMalformedWillTopic + } + + pk.Connect.WillPayload, offset, err = decodeBytes(buf, offset) + if err != nil { + return ErrMalformedWillPayload + } + } + + if pk.Connect.UsernameFlag { // [MQTT-3.1.3-12] + if offset >= len(buf) { // we are at the end of the packet + return ErrProtocolViolationFlagNoUsername // [MQTT-3.1.2-17] + } + + pk.Connect.Username, offset, err = decodeBytes(buf, offset) + if err != nil { + return ErrMalformedUsername + } + } + + if pk.Connect.PasswordFlag { + pk.Connect.Password, _, err = decodeBytes(buf, offset) + if err != nil { + return ErrMalformedPassword + } + } + + return nil +} + +// ConnectValidate ensures the connect packet is compliant. +func (pk *Packet) ConnectValidate() Code { + if !bytes.Equal(pk.Connect.ProtocolName, []byte{'M', 'Q', 'I', 's', 'd', 'p'}) && !bytes.Equal(pk.Connect.ProtocolName, []byte{'M', 'Q', 'T', 'T'}) { + return ErrProtocolViolationProtocolName // [MQTT-3.1.2-1] + } + + if (bytes.Equal(pk.Connect.ProtocolName, []byte{'M', 'Q', 'I', 's', 'd', 'p'}) && pk.ProtocolVersion != 3) || + (bytes.Equal(pk.Connect.ProtocolName, []byte{'M', 'Q', 'T', 'T'}) && pk.ProtocolVersion != 4 && pk.ProtocolVersion != 5) { + return ErrProtocolViolationProtocolVersion // [MQTT-3.1.2-2] + } + + if pk.ReservedBit != 0 { + return ErrProtocolViolationReservedBit // [MQTT-3.1.2-3] + } + + if len(pk.Connect.Password) > math.MaxUint16 { + return ErrProtocolViolationPasswordTooLong + } + + if len(pk.Connect.Username) > math.MaxUint16 { + return ErrProtocolViolationUsernameTooLong + } + + if !pk.Connect.UsernameFlag && len(pk.Connect.Username) > 0 { + return ErrProtocolViolationUsernameNoFlag // [MQTT-3.1.2-16] + } + + if pk.Connect.PasswordFlag && len(pk.Connect.Password) == 0 { + return ErrProtocolViolationFlagNoPassword // [MQTT-3.1.2-19] + } + + if !pk.Connect.PasswordFlag && len(pk.Connect.Password) > 0 { + return ErrProtocolViolationPasswordNoFlag // [MQTT-3.1.2-18] + } + + if len(pk.Connect.ClientIdentifier) > math.MaxUint16 { + return ErrClientIdentifierNotValid + } + + if pk.Connect.WillFlag { + if len(pk.Connect.WillPayload) == 0 || pk.Connect.WillTopic == "" { + return ErrProtocolViolationWillFlagNoPayload // [MQTT-3.1.2-9] + } + + if pk.Connect.WillQos > 2 { + return ErrProtocolViolationQosOutOfRange // [MQTT-3.1.2-12] + } + } + + if !pk.Connect.WillFlag && pk.Connect.WillRetain { + return ErrProtocolViolationWillFlagSurplusRetain // [MQTT-3.1.2-13] + } + + return CodeSuccess +} + +// ConnackEncode encodes a Connack packet. +func (pk *Packet) ConnackEncode(buf *bytes.Buffer) error { + nb := bytes.NewBuffer([]byte{}) + nb.WriteByte(encodeBool(pk.SessionPresent)) + nb.WriteByte(pk.ReasonCode) + + if pk.ProtocolVersion == 5 { + pb := bytes.NewBuffer([]byte{}) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()+2) // +SessionPresent +ReasonCode + nb.Write(pb.Bytes()) + } + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + nb.WriteTo(buf) + return nil +} + +// ConnackDecode decodes a Connack packet. +func (pk *Packet) ConnackDecode(buf []byte) error { + var offset int + var err error + + pk.SessionPresent, offset, err = decodeByteBool(buf, 0) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedSessionPresent) + } + + pk.ReasonCode, offset, err = decodeByte(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedReasonCode) + } + + if pk.ProtocolVersion == 5 { + _, err := pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + } + + return nil +} + +// DisconnectEncode encodes a Disconnect packet. +func (pk *Packet) DisconnectEncode(buf *bytes.Buffer) error { + nb := bytes.NewBuffer([]byte{}) + + if pk.ProtocolVersion == 5 { + nb.WriteByte(pk.ReasonCode) + + pb := bytes.NewBuffer([]byte{}) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()) + nb.Write(pb.Bytes()) + } + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + nb.WriteTo(buf) + + return nil +} + +// DisconnectDecode decodes a Disconnect packet. +func (pk *Packet) DisconnectDecode(buf []byte) error { + if pk.ProtocolVersion == 5 && pk.FixedHeader.Remaining > 1 { + var err error + var offset int + pk.ReasonCode, offset, err = decodeByte(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedReasonCode) + } + + if pk.FixedHeader.Remaining > 2 { + _, err = pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + } + } + + return nil +} + +// PingreqEncode encodes a Pingreq packet. +func (pk *Packet) PingreqEncode(buf *bytes.Buffer) error { + pk.FixedHeader.Encode(buf) + return nil +} + +// PingreqDecode decodes a Pingreq packet. +func (pk *Packet) PingreqDecode(buf []byte) error { + return nil +} + +// PingrespEncode encodes a Pingresp packet. +func (pk *Packet) PingrespEncode(buf *bytes.Buffer) error { + pk.FixedHeader.Encode(buf) + return nil +} + +// PingrespDecode decodes a Pingres packet. +func (pk *Packet) PingrespDecode(buf []byte) error { + return nil +} + +// PublishEncode encodes a Publish packet. +func (pk *Packet) PublishEncode(buf *bytes.Buffer) error { + nb := bytes.NewBuffer([]byte{}) + + nb.Write(encodeString(pk.TopicName)) // [MQTT-3.3.2-1] + + if pk.FixedHeader.Qos > 0 { + if pk.PacketID == 0 { + return ErrProtocolViolationNoPacketID // [MQTT-2.2.1-2] + } + nb.Write(encodeUint16(pk.PacketID)) + } + + if pk.ProtocolVersion == 5 { + pb := bytes.NewBuffer([]byte{}) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()+len(pk.Payload)) + nb.Write(pb.Bytes()) + } + + nb.Write(pk.Payload) + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + nb.WriteTo(buf) + + return nil +} + +// PublishDecode extracts the data values from the packet. +func (pk *Packet) PublishDecode(buf []byte) error { + var offset int + var err error + + pk.TopicName, offset, err = decodeString(buf, 0) // [MQTT-3.3.2-1] + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedTopic) + } + + if pk.FixedHeader.Qos > 0 { + pk.PacketID, offset, err = decodeUint16(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedPacketID) + } + } + + if pk.ProtocolVersion == 5 { + n, err := pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + + offset += n + } + + pk.Payload = buf[offset:] + + return nil +} + +// PublishValidate validates a publish packet. +func (pk *Packet) PublishValidate(topicAliasMaximum uint16) Code { + if pk.FixedHeader.Qos > 0 && pk.PacketID == 0 { + return ErrProtocolViolationNoPacketID // [MQTT-2.2.1-3] [MQTT-2.2.1-4] + } + + if pk.FixedHeader.Qos == 0 && pk.PacketID > 0 { + return ErrProtocolViolationSurplusPacketID // [MQTT-2.2.1-2] + } + + if strings.ContainsAny(pk.TopicName, "+#") { + return ErrProtocolViolationSurplusWildcard // [MQTT-3.3.2-2] + } + + if pk.Properties.TopicAlias > topicAliasMaximum { + return ErrTopicAliasInvalid // [MQTT-3.2.2-17] [MQTT-3.3.2-9] ~[MQTT-3.3.2-10] [MQTT-3.3.2-12] + } + + if pk.TopicName == "" && pk.Properties.TopicAlias == 0 { + return ErrProtocolViolationNoTopic // ~[MQTT-3.3.2-8] + } + + if pk.Properties.TopicAliasFlag && pk.Properties.TopicAlias == 0 { + return ErrTopicAliasInvalid // [MQTT-3.3.2-8] + } + + if len(pk.Properties.SubscriptionIdentifier) > 0 { + return ErrProtocolViolationSurplusSubID // [MQTT-3.3.4-6] + } + + return CodeSuccess +} + +// encodePubAckRelRecComp encodes a Puback, Pubrel, Pubrec, or Pubcomp packet. +func (pk *Packet) encodePubAckRelRecComp(buf *bytes.Buffer) error { + nb := bytes.NewBuffer([]byte{}) + nb.Write(encodeUint16(pk.PacketID)) + + if pk.ProtocolVersion == 5 { + pb := bytes.NewBuffer([]byte{}) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()) + if pk.ReasonCode >= ErrUnspecifiedError.Code || pb.Len() > 1 { + nb.WriteByte(pk.ReasonCode) + } + + if pb.Len() > 1 { + nb.Write(pb.Bytes()) + } + } + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + nb.WriteTo(buf) + return nil +} + +// decode extracts the data values from a Puback, Pubrel, Pubrec, or Pubcomp packet. +func (pk *Packet) decodePubAckRelRecComp(buf []byte) error { + var offset int + var err error + pk.PacketID, offset, err = decodeUint16(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedPacketID) + } + + if pk.ProtocolVersion == 5 && pk.FixedHeader.Remaining > 2 { + pk.ReasonCode, offset, err = decodeByte(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedReasonCode) + } + + if pk.FixedHeader.Remaining > 3 { + _, err = pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + } + } + + return nil +} + +// PubackEncode encodes a Puback packet. +func (pk *Packet) PubackEncode(buf *bytes.Buffer) error { + return pk.encodePubAckRelRecComp(buf) +} + +// PubackDecode decodes a Puback packet. +func (pk *Packet) PubackDecode(buf []byte) error { + return pk.decodePubAckRelRecComp(buf) +} + +// PubcompEncode encodes a Pubcomp packet. +func (pk *Packet) PubcompEncode(buf *bytes.Buffer) error { + return pk.encodePubAckRelRecComp(buf) +} + +// PubcompDecode decodes a Pubcomp packet. +func (pk *Packet) PubcompDecode(buf []byte) error { + return pk.decodePubAckRelRecComp(buf) +} + +// PubrecEncode encodes a Pubrec packet. +func (pk *Packet) PubrecEncode(buf *bytes.Buffer) error { + return pk.encodePubAckRelRecComp(buf) +} + +// PubrecDecode decodes a Pubrec packet. +func (pk *Packet) PubrecDecode(buf []byte) error { + return pk.decodePubAckRelRecComp(buf) +} + +// PubrelEncode encodes a Pubrel packet. +func (pk *Packet) PubrelEncode(buf *bytes.Buffer) error { + return pk.encodePubAckRelRecComp(buf) +} + +// PubrelDecode decodes a Pubrel packet. +func (pk *Packet) PubrelDecode(buf []byte) error { + return pk.decodePubAckRelRecComp(buf) +} + +// ReasonCodeValid returns true if the provided reason code is valid for the packet type. +func (pk *Packet) ReasonCodeValid() bool { + switch pk.FixedHeader.Type { + case Pubrec: + return bytes.Contains([]byte{ + CodeSuccess.Code, + CodeNoMatchingSubscribers.Code, + ErrUnspecifiedError.Code, + ErrImplementationSpecificError.Code, + ErrNotAuthorized.Code, + ErrTopicNameInvalid.Code, + ErrPacketIdentifierInUse.Code, + ErrQuotaExceeded.Code, + ErrPayloadFormatInvalid.Code, + }, []byte{pk.ReasonCode}) + case Pubrel: + fallthrough + case Pubcomp: + return bytes.Contains([]byte{ + CodeSuccess.Code, + ErrPacketIdentifierNotFound.Code, + }, []byte{pk.ReasonCode}) + case Suback: + return bytes.Contains([]byte{ + CodeGrantedQos0.Code, + CodeGrantedQos1.Code, + CodeGrantedQos2.Code, + ErrUnspecifiedError.Code, + ErrImplementationSpecificError.Code, + ErrNotAuthorized.Code, + ErrTopicFilterInvalid.Code, + ErrPacketIdentifierInUse.Code, + ErrQuotaExceeded.Code, + ErrSharedSubscriptionsNotSupported.Code, + ErrSubscriptionIdentifiersNotSupported.Code, + ErrWildcardSubscriptionsNotSupported.Code, + }, []byte{pk.ReasonCode}) + case Unsuback: + return bytes.Contains([]byte{ + CodeSuccess.Code, + CodeNoSubscriptionExisted.Code, + ErrUnspecifiedError.Code, + ErrImplementationSpecificError.Code, + ErrNotAuthorized.Code, + ErrTopicFilterInvalid.Code, + ErrPacketIdentifierInUse.Code, + }, []byte{pk.ReasonCode}) + } + + return true +} + +// SubackEncode encodes a Suback packet. +func (pk *Packet) SubackEncode(buf *bytes.Buffer) error { + nb := bytes.NewBuffer([]byte{}) + nb.Write(encodeUint16(pk.PacketID)) + + if pk.ProtocolVersion == 5 { + pb := bytes.NewBuffer([]byte{}) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()+len(pk.ReasonCodes)) + nb.Write(pb.Bytes()) + } + + nb.Write(pk.ReasonCodes) + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + nb.WriteTo(buf) + + return nil +} + +// SubackDecode decodes a Suback packet. +func (pk *Packet) SubackDecode(buf []byte) error { + var offset int + var err error + + pk.PacketID, offset, err = decodeUint16(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedPacketID) + } + + if pk.ProtocolVersion == 5 { + n, err := pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + offset += n + } + + pk.ReasonCodes = buf[offset:] + + return nil +} + +// SubscribeEncode encodes a Subscribe packet. +func (pk *Packet) SubscribeEncode(buf *bytes.Buffer) error { + if pk.PacketID == 0 { + return ErrProtocolViolationNoPacketID + } + + nb := bytes.NewBuffer([]byte{}) + nb.Write(encodeUint16(pk.PacketID)) + + xb := bytes.NewBuffer([]byte{}) // capture and write filters after length checks + for _, opts := range pk.Filters { + xb.Write(encodeString(opts.Filter)) // [MQTT-3.8.3-1] + if pk.ProtocolVersion == 5 { + xb.WriteByte(opts.encode()) + } else { + xb.WriteByte(opts.Qos) + } + } + + if pk.ProtocolVersion == 5 { + pb := bytes.NewBuffer([]byte{}) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()+xb.Len()) + nb.Write(pb.Bytes()) + } + + nb.Write(xb.Bytes()) + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + nb.WriteTo(buf) + + return nil +} + +// SubscribeDecode decodes a Subscribe packet. +func (pk *Packet) SubscribeDecode(buf []byte) error { + var offset int + var err error + + pk.PacketID, offset, err = decodeUint16(buf, offset) + if err != nil { + return ErrMalformedPacketID + } + + if pk.ProtocolVersion == 5 { + n, err := pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + offset += n + } + + var filter string + pk.Filters = Subscriptions{} + for offset < len(buf) { + filter, offset, err = decodeString(buf, offset) // [MQTT-3.8.3-1] + if err != nil { + return ErrMalformedTopic + } + + var option byte + sub := &Subscription{ + Filter: filter, + } + + if pk.ProtocolVersion == 5 { + sub.decode(buf[offset]) + offset += 1 + } else { + option, offset, err = decodeByte(buf, offset) + if err != nil { + return ErrMalformedQos + } + sub.Qos = option + } + + if len(pk.Properties.SubscriptionIdentifier) > 0 { + sub.Identifier = pk.Properties.SubscriptionIdentifier[0] + } + + if sub.Qos > 2 { + return ErrProtocolViolationQosOutOfRange + } + + pk.Filters = append(pk.Filters, *sub) + } + + return nil +} + +// SubscribeValidate ensures the packet is compliant. +func (pk *Packet) SubscribeValidate() Code { + if pk.FixedHeader.Qos > 0 && pk.PacketID == 0 { + return ErrProtocolViolationNoPacketID // [MQTT-2.2.1-3] [MQTT-2.2.1-4] + } + + if len(pk.Filters) == 0 { + return ErrProtocolViolationNoFilters // [MQTT-3.10.3-2] + } + + for _, v := range pk.Filters { + if v.Identifier > 268435455 { // 3.3.2.3.8 The Subscription Identifier can have the value of 1 to 268,435,455. + return ErrProtocolViolationOversizeSubID // + } + } + + return CodeSuccess +} + +// UnsubackEncode encodes an Unsuback packet. +func (pk *Packet) UnsubackEncode(buf *bytes.Buffer) error { + nb := bytes.NewBuffer([]byte{}) + nb.Write(encodeUint16(pk.PacketID)) + + if pk.ProtocolVersion == 5 { + pb := bytes.NewBuffer([]byte{}) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()) + nb.Write(pb.Bytes()) + } + + nb.Write(pk.ReasonCodes) + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + nb.WriteTo(buf) + + return nil +} + +// UnsubackDecode decodes an Unsuback packet. +func (pk *Packet) UnsubackDecode(buf []byte) error { + var offset int + var err error + + pk.PacketID, offset, err = decodeUint16(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedPacketID) + } + + if pk.ProtocolVersion == 5 { + n, err := pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + + offset += n + + pk.ReasonCodes = buf[offset:] + } + + return nil +} + +// UnsubscribeEncode encodes an Unsubscribe packet. +func (pk *Packet) UnsubscribeEncode(buf *bytes.Buffer) error { + if pk.PacketID == 0 { + return ErrProtocolViolationNoPacketID + } + + nb := bytes.NewBuffer([]byte{}) + nb.Write(encodeUint16(pk.PacketID)) + + xb := bytes.NewBuffer([]byte{}) // capture filters and write after length checks + for _, sub := range pk.Filters { + xb.Write(encodeString(sub.Filter)) // [MQTT-3.10.3-1] + } + + if pk.ProtocolVersion == 5 { + pb := bytes.NewBuffer([]byte{}) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()+xb.Len()) + nb.Write(pb.Bytes()) + } + + nb.Write(xb.Bytes()) + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + nb.WriteTo(buf) + + return nil +} + +// UnsubscribeDecode decodes an Unsubscribe packet. +func (pk *Packet) UnsubscribeDecode(buf []byte) error { + var offset int + var err error + + pk.PacketID, offset, err = decodeUint16(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedPacketID) + } + + if pk.ProtocolVersion == 5 { + n, err := pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + offset += n + } + + var filter string + pk.Filters = Subscriptions{} + for offset < len(buf) { + filter, offset, err = decodeString(buf, offset) // [MQTT-3.10.3-1] + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedTopic) + } + pk.Filters = append(pk.Filters, Subscription{Filter: filter}) + } + + return nil +} + +// UnsubscribeValidate validates an Unsubscribe packet. +func (pk *Packet) UnsubscribeValidate() Code { + if pk.FixedHeader.Qos > 0 && pk.PacketID == 0 { + return ErrProtocolViolationNoPacketID // [MQTT-2.2.1-3] [MQTT-2.2.1-4] + } + + if len(pk.Filters) == 0 { + return ErrProtocolViolationNoFilters // [MQTT-3.10.3-2] + } + + return CodeSuccess +} + +// AuthEncode encodes an Auth packet. +func (pk *Packet) AuthEncode(buf *bytes.Buffer) error { + nb := bytes.NewBuffer([]byte{}) + nb.WriteByte(pk.ReasonCode) + + pb := bytes.NewBuffer([]byte{}) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()) + nb.Write(pb.Bytes()) + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + nb.WriteTo(buf) + return nil +} + +// AuthDecode decodes an Auth packet. +func (pk *Packet) AuthDecode(buf []byte) error { + var offset int + var err error + + pk.ReasonCode, offset, err = decodeByte(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedReasonCode) + } + + _, err = pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + + return nil +} + +// AuthValidate returns success if the auth packet is valid. +func (pk *Packet) AuthValidate() Code { + if pk.ReasonCode != CodeSuccess.Code && + pk.ReasonCode != CodeContinueAuthentication.Code && + pk.ReasonCode != CodeReAuthenticate.Code { + return ErrProtocolViolationInvalidReason // [MQTT-3.15.2-1] + } + + return CodeSuccess +} + +// FormatID returns the PacketID field as a decimal integer. +func (pk *Packet) FormatID() string { + return strconv.FormatUint(uint64(pk.PacketID), 10) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/packets/properties.go b/vendor/github.com/mochi-mqtt/server/v2/packets/properties.go new file mode 100644 index 000000000..c5eefc1a2 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/packets/properties.go @@ -0,0 +1,477 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package packets + +import ( + "bytes" + "fmt" + "strings" +) + +const ( + PropPayloadFormat byte = 1 + PropMessageExpiryInterval byte = 2 + PropContentType byte = 3 + PropResponseTopic byte = 8 + PropCorrelationData byte = 9 + PropSubscriptionIdentifier byte = 11 + PropSessionExpiryInterval byte = 17 + PropAssignedClientID byte = 18 + PropServerKeepAlive byte = 19 + PropAuthenticationMethod byte = 21 + PropAuthenticationData byte = 22 + PropRequestProblemInfo byte = 23 + PropWillDelayInterval byte = 24 + PropRequestResponseInfo byte = 25 + PropResponseInfo byte = 26 + PropServerReference byte = 28 + PropReasonString byte = 31 + PropReceiveMaximum byte = 33 + PropTopicAliasMaximum byte = 34 + PropTopicAlias byte = 35 + PropMaximumQos byte = 36 + PropRetainAvailable byte = 37 + PropUser byte = 38 + PropMaximumPacketSize byte = 39 + PropWildcardSubAvailable byte = 40 + PropSubIDAvailable byte = 41 + PropSharedSubAvailable byte = 42 +) + +// validPacketProperties indicates which properties are valid for which packet types. +var validPacketProperties = map[byte]map[byte]byte{ + PropPayloadFormat: {Publish: 1, WillProperties: 1}, + PropMessageExpiryInterval: {Publish: 1, WillProperties: 1}, + PropContentType: {Publish: 1, WillProperties: 1}, + PropResponseTopic: {Publish: 1, WillProperties: 1}, + PropCorrelationData: {Publish: 1, WillProperties: 1}, + PropSubscriptionIdentifier: {Publish: 1, Subscribe: 1}, + PropSessionExpiryInterval: {Connect: 1, Connack: 1, Disconnect: 1}, + PropAssignedClientID: {Connack: 1}, + PropServerKeepAlive: {Connack: 1}, + PropAuthenticationMethod: {Connect: 1, Connack: 1, Auth: 1}, + PropAuthenticationData: {Connect: 1, Connack: 1, Auth: 1}, + PropRequestProblemInfo: {Connect: 1}, + PropWillDelayInterval: {WillProperties: 1}, + PropRequestResponseInfo: {Connect: 1}, + PropResponseInfo: {Connack: 1}, + PropServerReference: {Connack: 1, Disconnect: 1}, + PropReasonString: {Connack: 1, Puback: 1, Pubrec: 1, Pubrel: 1, Pubcomp: 1, Suback: 1, Unsuback: 1, Disconnect: 1, Auth: 1}, + PropReceiveMaximum: {Connect: 1, Connack: 1}, + PropTopicAliasMaximum: {Connect: 1, Connack: 1}, + PropTopicAlias: {Publish: 1}, + PropMaximumQos: {Connack: 1}, + PropRetainAvailable: {Connack: 1}, + PropUser: {Connect: 1, Connack: 1, Publish: 1, Puback: 1, Pubrec: 1, Pubrel: 1, Pubcomp: 1, Subscribe: 1, Suback: 1, Unsubscribe: 1, Unsuback: 1, Disconnect: 1, Auth: 1, WillProperties: 1}, + PropMaximumPacketSize: {Connect: 1, Connack: 1}, + PropWildcardSubAvailable: {Connack: 1}, + PropSubIDAvailable: {Connack: 1}, + PropSharedSubAvailable: {Connack: 1}, +} + +// UserProperty is an arbitrary key-value pair for a packet user properties array. +type UserProperty struct { // [MQTT-1.5.7-1] + Key string `json:"k"` + Val string `json:"v"` +} + +// Properties contains all of the mqtt v5 properties available for a packet. +// Some properties have valid values of 0 or not-present. In this case, we opt for +// property flags to indicate the usage of property. +// Refer to mqtt v5 2.2.2.2 Property spec for more information. +type Properties struct { + CorrelationData []byte `json:"cd"` + SubscriptionIdentifier []int `json:"si"` + AuthenticationData []byte `json:"ad"` + User []UserProperty `json:"user"` + ContentType string `json:"ct"` + ResponseTopic string `json:"rt"` + AssignedClientID string `json:"aci"` + AuthenticationMethod string `json:"am"` + ResponseInfo string `json:"ri"` + ServerReference string `json:"sr"` + ReasonString string `json:"rs"` + MessageExpiryInterval uint32 `json:"me"` + SessionExpiryInterval uint32 `json:"sei"` + WillDelayInterval uint32 `json:"wdi"` + MaximumPacketSize uint32 `json:"mps"` + ServerKeepAlive uint16 `json:"ska"` + ReceiveMaximum uint16 `json:"rm"` + TopicAliasMaximum uint16 `json:"tam"` + TopicAlias uint16 `json:"ta"` + PayloadFormat byte `json:"pf"` + PayloadFormatFlag bool `json:"fpf"` + SessionExpiryIntervalFlag bool `json:"fsei"` + ServerKeepAliveFlag bool `json:"fska"` + RequestProblemInfo byte `json:"rpi"` + RequestProblemInfoFlag bool `json:"frpi"` + RequestResponseInfo byte `json:"rri"` + TopicAliasFlag bool `json:"fta"` + MaximumQos byte `json:"mqos"` + MaximumQosFlag bool `json:"fmqos"` + RetainAvailable byte `json:"ra"` + RetainAvailableFlag bool `json:"fra"` + WildcardSubAvailable byte `json:"wsa"` + WildcardSubAvailableFlag bool `json:"fwsa"` + SubIDAvailable byte `json:"sida"` + SubIDAvailableFlag bool `json:"fsida"` + SharedSubAvailable byte `json:"ssa"` + SharedSubAvailableFlag bool `json:"fssa"` +} + +// Copy creates a new Properties struct with copies of the values. +func (p *Properties) Copy(allowTransfer bool) Properties { + pr := Properties{ + PayloadFormat: p.PayloadFormat, // [MQTT-3.3.2-4] + PayloadFormatFlag: p.PayloadFormatFlag, + MessageExpiryInterval: p.MessageExpiryInterval, + ContentType: p.ContentType, // [MQTT-3.3.2-20] + ResponseTopic: p.ResponseTopic, // [MQTT-3.3.2-15] + SessionExpiryInterval: p.SessionExpiryInterval, + SessionExpiryIntervalFlag: p.SessionExpiryIntervalFlag, + AssignedClientID: p.AssignedClientID, + ServerKeepAlive: p.ServerKeepAlive, + ServerKeepAliveFlag: p.ServerKeepAliveFlag, + AuthenticationMethod: p.AuthenticationMethod, + RequestProblemInfo: p.RequestProblemInfo, + RequestProblemInfoFlag: p.RequestProblemInfoFlag, + WillDelayInterval: p.WillDelayInterval, + RequestResponseInfo: p.RequestResponseInfo, + ResponseInfo: p.ResponseInfo, + ServerReference: p.ServerReference, + ReasonString: p.ReasonString, + ReceiveMaximum: p.ReceiveMaximum, + TopicAliasMaximum: p.TopicAliasMaximum, + TopicAlias: 0, // NB; do not copy topic alias [MQTT-3.3.2-7] + we do not send to clients (currently) [MQTT-3.1.2-26] [MQTT-3.1.2-27] + MaximumQos: p.MaximumQos, + MaximumQosFlag: p.MaximumQosFlag, + RetainAvailable: p.RetainAvailable, + RetainAvailableFlag: p.RetainAvailableFlag, + MaximumPacketSize: p.MaximumPacketSize, + WildcardSubAvailable: p.WildcardSubAvailable, + WildcardSubAvailableFlag: p.WildcardSubAvailableFlag, + SubIDAvailable: p.SubIDAvailable, + SubIDAvailableFlag: p.SubIDAvailableFlag, + SharedSubAvailable: p.SharedSubAvailable, + SharedSubAvailableFlag: p.SharedSubAvailableFlag, + } + + if allowTransfer { + pr.TopicAlias = p.TopicAlias + pr.TopicAliasFlag = p.TopicAliasFlag + } + + if len(p.CorrelationData) > 0 { + pr.CorrelationData = append([]byte{}, p.CorrelationData...) // [MQTT-3.3.2-16] + } + + if len(p.SubscriptionIdentifier) > 0 { + pr.SubscriptionIdentifier = append([]int{}, p.SubscriptionIdentifier...) + } + + if len(p.AuthenticationData) > 0 { + pr.AuthenticationData = append([]byte{}, p.AuthenticationData...) + } + + if len(p.User) > 0 { + pr.User = []UserProperty{} + for _, v := range p.User { + pr.User = append(pr.User, UserProperty{ // [MQTT-3.3.2-17] + Key: v.Key, + Val: v.Val, + }) + } + } + + return pr +} + +// canEncode returns true if the property type is valid for the packet type. +func (p *Properties) canEncode(pkt byte, k byte) bool { + return validPacketProperties[k][pkt] == 1 +} + +// Encode encodes properties into a bytes buffer. +func (p *Properties) Encode(pkt byte, mods Mods, b *bytes.Buffer, n int) { + if p == nil { + return + } + + var buf bytes.Buffer + if p.canEncode(pkt, PropPayloadFormat) && p.PayloadFormatFlag { + buf.WriteByte(PropPayloadFormat) + buf.WriteByte(p.PayloadFormat) + } + + if p.canEncode(pkt, PropMessageExpiryInterval) && p.MessageExpiryInterval > 0 { + buf.WriteByte(PropMessageExpiryInterval) + buf.Write(encodeUint32(p.MessageExpiryInterval)) + } + + if p.canEncode(pkt, PropContentType) && p.ContentType != "" { + buf.WriteByte(PropContentType) + buf.Write(encodeString(p.ContentType)) // [MQTT-3.3.2-19] + } + + if mods.AllowResponseInfo && p.canEncode(pkt, PropResponseTopic) && // [MQTT-3.3.2-14] + p.ResponseTopic != "" && !strings.ContainsAny(p.ResponseTopic, "+#") { // [MQTT-3.1.2-28] + buf.WriteByte(PropResponseTopic) + buf.Write(encodeString(p.ResponseTopic)) // [MQTT-3.3.2-13] + } + + if mods.AllowResponseInfo && p.canEncode(pkt, PropCorrelationData) && len(p.CorrelationData) > 0 { // [MQTT-3.1.2-28] + buf.WriteByte(PropCorrelationData) + buf.Write(encodeBytes(p.CorrelationData)) + } + + if p.canEncode(pkt, PropSubscriptionIdentifier) && len(p.SubscriptionIdentifier) > 0 { + for _, v := range p.SubscriptionIdentifier { + if v > 0 { + buf.WriteByte(PropSubscriptionIdentifier) + encodeLength(&buf, int64(v)) + } + } + } + + if p.canEncode(pkt, PropSessionExpiryInterval) && p.SessionExpiryIntervalFlag { // [MQTT-3.14.2-2] + buf.WriteByte(PropSessionExpiryInterval) + buf.Write(encodeUint32(p.SessionExpiryInterval)) + } + + if p.canEncode(pkt, PropAssignedClientID) && p.AssignedClientID != "" { + buf.WriteByte(PropAssignedClientID) + buf.Write(encodeString(p.AssignedClientID)) + } + + if p.canEncode(pkt, PropServerKeepAlive) && p.ServerKeepAliveFlag { + buf.WriteByte(PropServerKeepAlive) + buf.Write(encodeUint16(p.ServerKeepAlive)) + } + + if p.canEncode(pkt, PropAuthenticationMethod) && p.AuthenticationMethod != "" { + buf.WriteByte(PropAuthenticationMethod) + buf.Write(encodeString(p.AuthenticationMethod)) + } + + if p.canEncode(pkt, PropAuthenticationData) && len(p.AuthenticationData) > 0 { + buf.WriteByte(PropAuthenticationData) + buf.Write(encodeBytes(p.AuthenticationData)) + } + + if p.canEncode(pkt, PropRequestProblemInfo) && p.RequestProblemInfoFlag { + buf.WriteByte(PropRequestProblemInfo) + buf.WriteByte(p.RequestProblemInfo) + } + + if p.canEncode(pkt, PropWillDelayInterval) && p.WillDelayInterval > 0 { + buf.WriteByte(PropWillDelayInterval) + buf.Write(encodeUint32(p.WillDelayInterval)) + } + + if p.canEncode(pkt, PropRequestResponseInfo) && p.RequestResponseInfo > 0 { + buf.WriteByte(PropRequestResponseInfo) + buf.WriteByte(p.RequestResponseInfo) + } + + if mods.AllowResponseInfo && p.canEncode(pkt, PropResponseInfo) && len(p.ResponseInfo) > 0 { // [MQTT-3.1.2-28] + buf.WriteByte(PropResponseInfo) + buf.Write(encodeString(p.ResponseInfo)) + } + + if p.canEncode(pkt, PropServerReference) && len(p.ServerReference) > 0 { + buf.WriteByte(PropServerReference) + buf.Write(encodeString(p.ServerReference)) + } + + // [MQTT-3.2.2-19] [MQTT-3.14.2-3] [MQTT-3.4.2-2] [MQTT-3.5.2-2] + // [MQTT-3.6.2-2] [MQTT-3.9.2-1] [MQTT-3.11.2-1] [MQTT-3.15.2-2] + if !mods.DisallowProblemInfo && p.canEncode(pkt, PropReasonString) && p.ReasonString != "" { + b := encodeString(p.ReasonString) + if mods.MaxSize == 0 || uint32(n+len(b)+1) < mods.MaxSize { + buf.WriteByte(PropReasonString) + buf.Write(b) + } + } + + if p.canEncode(pkt, PropReceiveMaximum) && p.ReceiveMaximum > 0 { + buf.WriteByte(PropReceiveMaximum) + buf.Write(encodeUint16(p.ReceiveMaximum)) + } + + if p.canEncode(pkt, PropTopicAliasMaximum) && p.TopicAliasMaximum > 0 { + buf.WriteByte(PropTopicAliasMaximum) + buf.Write(encodeUint16(p.TopicAliasMaximum)) + } + + if p.canEncode(pkt, PropTopicAlias) && p.TopicAliasFlag && p.TopicAlias > 0 { // [MQTT-3.3.2-8] + buf.WriteByte(PropTopicAlias) + buf.Write(encodeUint16(p.TopicAlias)) + } + + if p.canEncode(pkt, PropMaximumQos) && p.MaximumQosFlag && p.MaximumQos < 2 { + buf.WriteByte(PropMaximumQos) + buf.WriteByte(p.MaximumQos) + } + + if p.canEncode(pkt, PropRetainAvailable) && p.RetainAvailableFlag { + buf.WriteByte(PropRetainAvailable) + buf.WriteByte(p.RetainAvailable) + } + + if !mods.DisallowProblemInfo && p.canEncode(pkt, PropUser) { + pb := bytes.NewBuffer([]byte{}) + for _, v := range p.User { + pb.WriteByte(PropUser) + pb.Write(encodeString(v.Key)) + pb.Write(encodeString(v.Val)) + } + // [MQTT-3.2.2-20] [MQTT-3.14.2-4] [MQTT-3.4.2-3] [MQTT-3.5.2-3] + // [MQTT-3.6.2-3] [MQTT-3.9.2-2] [MQTT-3.11.2-2] [MQTT-3.15.2-3] + if mods.MaxSize == 0 || uint32(n+pb.Len()+1) < mods.MaxSize { + buf.Write(pb.Bytes()) + } + } + + if p.canEncode(pkt, PropMaximumPacketSize) && p.MaximumPacketSize > 0 { + buf.WriteByte(PropMaximumPacketSize) + buf.Write(encodeUint32(p.MaximumPacketSize)) + } + + if p.canEncode(pkt, PropWildcardSubAvailable) && p.WildcardSubAvailableFlag { + buf.WriteByte(PropWildcardSubAvailable) + buf.WriteByte(p.WildcardSubAvailable) + } + + if p.canEncode(pkt, PropSubIDAvailable) && p.SubIDAvailableFlag { + buf.WriteByte(PropSubIDAvailable) + buf.WriteByte(p.SubIDAvailable) + } + + if p.canEncode(pkt, PropSharedSubAvailable) && p.SharedSubAvailableFlag { + buf.WriteByte(PropSharedSubAvailable) + buf.WriteByte(p.SharedSubAvailable) + } + + encodeLength(b, int64(buf.Len())) + buf.WriteTo(b) // [MQTT-3.1.3-10] +} + +// Decode decodes property bytes into a properties struct. +func (p *Properties) Decode(pkt byte, b *bytes.Buffer) (n int, err error) { + if p == nil { + return 0, nil + } + + var bu int + n, bu, err = DecodeLength(b) + if err != nil { + return n + bu, err + } + + if n == 0 { + return n + bu, nil + } + + bt := b.Bytes() + var k byte + for offset := 0; offset < n; { + k, offset, err = decodeByte(bt, offset) + if err != nil { + return n + bu, err + } + + if _, ok := validPacketProperties[k][pkt]; !ok { + return n + bu, fmt.Errorf("property type %v not valid for packet type %v: %w", k, pkt, ErrProtocolViolationUnsupportedProperty) + } + + switch k { + case PropPayloadFormat: + p.PayloadFormat, offset, err = decodeByte(bt, offset) + p.PayloadFormatFlag = true + case PropMessageExpiryInterval: + p.MessageExpiryInterval, offset, err = decodeUint32(bt, offset) + case PropContentType: + p.ContentType, offset, err = decodeString(bt, offset) + case PropResponseTopic: + p.ResponseTopic, offset, err = decodeString(bt, offset) + case PropCorrelationData: + p.CorrelationData, offset, err = decodeBytes(bt, offset) + case PropSubscriptionIdentifier: + if p.SubscriptionIdentifier == nil { + p.SubscriptionIdentifier = []int{} + } + + n, bu, err := DecodeLength(bytes.NewBuffer(bt[offset:])) + if err != nil { + return n + bu, err + } + p.SubscriptionIdentifier = append(p.SubscriptionIdentifier, n) + offset += bu + case PropSessionExpiryInterval: + p.SessionExpiryInterval, offset, err = decodeUint32(bt, offset) + p.SessionExpiryIntervalFlag = true + case PropAssignedClientID: + p.AssignedClientID, offset, err = decodeString(bt, offset) + case PropServerKeepAlive: + p.ServerKeepAlive, offset, err = decodeUint16(bt, offset) + p.ServerKeepAliveFlag = true + case PropAuthenticationMethod: + p.AuthenticationMethod, offset, err = decodeString(bt, offset) + case PropAuthenticationData: + p.AuthenticationData, offset, err = decodeBytes(bt, offset) + case PropRequestProblemInfo: + p.RequestProblemInfo, offset, err = decodeByte(bt, offset) + p.RequestProblemInfoFlag = true + case PropWillDelayInterval: + p.WillDelayInterval, offset, err = decodeUint32(bt, offset) + case PropRequestResponseInfo: + p.RequestResponseInfo, offset, err = decodeByte(bt, offset) + case PropResponseInfo: + p.ResponseInfo, offset, err = decodeString(bt, offset) + case PropServerReference: + p.ServerReference, offset, err = decodeString(bt, offset) + case PropReasonString: + p.ReasonString, offset, err = decodeString(bt, offset) + case PropReceiveMaximum: + p.ReceiveMaximum, offset, err = decodeUint16(bt, offset) + case PropTopicAliasMaximum: + p.TopicAliasMaximum, offset, err = decodeUint16(bt, offset) + case PropTopicAlias: + p.TopicAlias, offset, err = decodeUint16(bt, offset) + p.TopicAliasFlag = true + case PropMaximumQos: + p.MaximumQos, offset, err = decodeByte(bt, offset) + p.MaximumQosFlag = true + case PropRetainAvailable: + p.RetainAvailable, offset, err = decodeByte(bt, offset) + p.RetainAvailableFlag = true + case PropUser: + var k, v string + k, offset, err = decodeString(bt, offset) + if err != nil { + return n + bu, err + } + v, offset, err = decodeString(bt, offset) + p.User = append(p.User, UserProperty{Key: k, Val: v}) + case PropMaximumPacketSize: + p.MaximumPacketSize, offset, err = decodeUint32(bt, offset) + case PropWildcardSubAvailable: + p.WildcardSubAvailable, offset, err = decodeByte(bt, offset) + p.WildcardSubAvailableFlag = true + case PropSubIDAvailable: + p.SubIDAvailable, offset, err = decodeByte(bt, offset) + p.SubIDAvailableFlag = true + case PropSharedSubAvailable: + p.SharedSubAvailable, offset, err = decodeByte(bt, offset) + p.SharedSubAvailableFlag = true + } + + if err != nil { + return n + bu, err + } + } + + return n + bu, nil +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/packets/tpackets.go b/vendor/github.com/mochi-mqtt/server/v2/packets/tpackets.go new file mode 100644 index 000000000..9e44c12ed --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/packets/tpackets.go @@ -0,0 +1,3939 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package packets + +// TPacketCase contains data for cross-checking the encoding and decoding +// of packets and expected scenarios. +type TPacketCase struct { + RawBytes []byte // the bytes that make the packet + ActualBytes []byte // the actual byte array that is created in the event of a byte mutation + Group string // a group that should run the test, blank for all + Desc string // a description of the test + FailFirst error // expected fail result to be run immediately after the method is called + Packet *Packet // the packet that is Expected + ActualPacket *Packet // the actual packet after mutations + Expect error // generic Expected fail result to be checked + Isolate bool // isolate can be used to isolate a test + Primary bool // primary is a test that should be run using readPackets + Case byte // the identifying byte of the case +} + +// TPacketCases is a slice of TPacketCase. +type TPacketCases []TPacketCase + +// Get returns a case matching a given T byte. +func (f TPacketCases) Get(b byte) TPacketCase { + for _, v := range f { + if v.Case == b { + return v + } + } + + return TPacketCase{} +} + +const ( + TConnectMqtt31 byte = iota + TConnectMqtt311 + TConnectMqtt5 + TConnectMqtt5LWT + TConnectClean + TConnectCleanLWT + TConnectUserPass + TConnectUserPassLWT + TConnectMalProtocolName + TConnectMalProtocolVersion + TConnectMalFlags + TConnectMalKeepalive + TConnectMalClientID + TConnectMalWillTopic + TConnectMalWillFlag + TConnectMalUsername + TConnectMalPassword + TConnectMalFixedHeader + TConnectMalReservedBit + TConnectMalProperties + TConnectMalWillProperties + TConnectInvalidProtocolName + TConnectInvalidProtocolVersion + TConnectInvalidProtocolVersion2 + TConnectInvalidReservedBit + TConnectInvalidClientIDTooLong + TConnectInvalidPasswordNoUsername + TConnectInvalidFlagNoUsername + TConnectInvalidFlagNoPassword + TConnectInvalidUsernameNoFlag + TConnectInvalidPasswordNoFlag + TConnectInvalidUsernameTooLong + TConnectInvalidPasswordTooLong + TConnectInvalidWillFlagNoPayload + TConnectInvalidWillFlagQosOutOfRange + TConnectInvalidWillSurplusRetain + TConnectZeroByteUsername + TConnectSpecInvalidUTF8D800 + TConnectSpecInvalidUTF8DFFF + TConnectSpecInvalidUTF80000 + TConnectSpecInvalidUTF8NoSkip + TConnackAcceptedNoSession + TConnackAcceptedSessionExists + TConnackAcceptedMqtt5 + TConnackAcceptedAdjustedExpiryInterval + TConnackMinMqtt5 + TConnackMinCleanMqtt5 + TConnackServerKeepalive + TConnackInvalidMinMqtt5 + TConnackBadProtocolVersion + TConnackProtocolViolationNoSession + TConnackBadClientID + TConnackServerUnavailable + TConnackBadUsernamePassword + TConnackBadUsernamePasswordNoSession + TConnackMqtt5BadUsernamePasswordNoSession + TConnackNotAuthorised + TConnackMalSessionPresent + TConnackMalReturnCode + TConnackMalProperties + TConnackDropProperties + TConnackDropPropertiesPartial + TPublishNoPayload + TPublishBasic + TPublishBasicTopicAliasOnly + TPublishBasicMqtt5 + TPublishMqtt5 + TPublishQos1 + TPublishQos1Mqtt5 + TPublishQos1NoPayload + TPublishQos1Dup + TPublishQos2 + TPublishQos2Mqtt5 + TPublishQos2Upgraded + TPublishSubscriberIdentifier + TPublishRetain + TPublishRetainMqtt5 + TPublishDup + TPublishMalTopicName + TPublishMalPacketID + TPublishMalProperties + TPublishCopyBasic + TPublishSpecQos0NoPacketID + TPublishSpecQosMustPacketID + TPublishDropOversize + TPublishInvalidQos0NoPacketID + TPublishInvalidQosMustPacketID + TPublishInvalidSurplusSubID + TPublishInvalidSurplusWildcard + TPublishInvalidSurplusWildcard2 + TPublishInvalidNoTopic + TPublishInvalidTopicAlias + TPublishInvalidExcessTopicAlias + TPublishSpecDenySysTopic + TPuback + TPubackMqtt5 + TPubackMalPacketID + TPubackMalProperties + TPubackUnexpectedError + TPubrec + TPubrecMqtt5 + TPubrecMqtt5IDInUse + TPubrecMalPacketID + TPubrecMalProperties + TPubrecMalReasonCode + TPubrecInvalidReason + TPubrel + TPubrelMqtt5 + TPubrelMqtt5AckNoPacket + TPubrelMalPacketID + TPubrelMalProperties + TPubrelInvalidReason + TPubcomp + TPubcompMqtt5 + TPubcompMqtt5AckNoPacket + TPubcompMalPacketID + TPubcompMalProperties + TPubcompInvalidReason + TSubscribe + TSubscribeMany + TSubscribeMqtt5 + TSubscribeRetainHandling1 + TSubscribeRetainHandling2 + TSubscribeRetainAsPublished + TSubscribeMalPacketID + TSubscribeMalTopic + TSubscribeMalQos + TSubscribeMalQosRange + TSubscribeMalProperties + TSubscribeInvalidQosMustPacketID + TSubscribeSpecQosMustPacketID + TSubscribeInvalidNoFilters + TSubscribeInvalidSharedNoLocal + TSubscribeInvalidFilter + TSubscribeInvalidIdentifierOversize + TSuback + TSubackMany + TSubackDeny + TSubackUnspecifiedError + TSubackUnspecifiedErrorMqtt5 + TSubackMqtt5 + TSubackPacketIDInUse + TSubackInvalidFilter + TSubackInvalidSharedNoLocal + TSubackMalPacketID + TSubackMalProperties + TUnsubscribe + TUnsubscribeMany + TUnsubscribeMqtt5 + TUnsubscribeDropProperties + TUnsubscribeMalPacketID + TUnsubscribeMalTopicName + TUnsubscribeMalProperties + TUnsubscribeInvalidQosMustPacketID + TUnsubscribeSpecQosMustPacketID + TUnsubscribeInvalidNoFilters + TUnsuback + TUnsubackMany + TUnsubackMqtt5 + TUnsubackPacketIDInUse + TUnsubackMalPacketID + TUnsubackMalProperties + TPingreq + TPingresp + TDisconnect + TDisconnectTakeover + TDisconnectMqtt5 + TDisconnectNormalMqtt5 + TDisconnectSecondConnect + TDisconnectReceiveMaximum + TDisconnectDropProperties + TDisconnectShuttingDown + TDisconnectMalProperties + TDisconnectMalReasonCode + TDisconnectZeroNonZeroExpiry + TAuth + TAuthMalReasonCode + TAuthMalProperties + TAuthInvalidReason + TAuthInvalidReason2 +) + +// TPacketData contains individual encoding and decoding scenarios for each packet type. +var TPacketData = map[byte]TPacketCases{ + Connect: { + { + Case: TConnectMqtt31, + Desc: "mqtt v3.1", + Primary: true, + RawBytes: []byte{ + Connect << 4, 17, // Fixed header + 0, 6, // Protocol Name - MSB+LSB + 'M', 'Q', 'I', 's', 'd', 'p', // Protocol Name + 3, // Protocol Version + 0, // Packet Flags + 0, 30, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 17, + }, + ProtocolVersion: 3, + Connect: ConnectParams{ + ProtocolName: []byte("MQIsdp"), + Clean: false, + Keepalive: 30, + ClientIdentifier: "zen", + }, + }, + }, + { + Case: TConnectMqtt311, + Desc: "mqtt v3.1.1", + Primary: true, + RawBytes: []byte{ + Connect << 4, 15, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0, // Packet Flags + 0, 60, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 15, + }, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Clean: false, + Keepalive: 60, + ClientIdentifier: "zen", + }, + }, + }, + { + Case: TConnectMqtt5, + Desc: "mqtt v5", + Primary: true, + RawBytes: []byte{ + Connect << 4, 87, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 5, // Protocol Version + 0, // Packet Flags + 0, 30, // Keepalive + + // Properties + 71, // length + 17, 0, 0, 0, 120, // Session Expiry Interval (17) + 21, 0, 5, 'S', 'H', 'A', '-', '1', // Authentication Method (21) + 22, 0, 9, 'a', 'u', 't', 'h', '-', 'd', 'a', 't', 'a', // Authentication Data (22) + 23, 1, // Request Problem Info (23) + 25, 1, // Request Response Info (25) + 33, 1, 244, // Receive Maximum (33) + 34, 3, 231, // Topic Alias Maximum (34) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + 38, // User Properties (38) + 0, 4, 'k', 'e', 'y', '2', + 0, 6, 'v', 'a', 'l', 'u', 'e', '2', + 39, 0, 0, 125, 0, // Maximum Packet Size (39) + + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 87, + }, + ProtocolVersion: 5, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Clean: false, + Keepalive: 30, + ClientIdentifier: "zen", + }, + Properties: Properties{ + SessionExpiryInterval: uint32(120), + SessionExpiryIntervalFlag: true, + AuthenticationMethod: "SHA-1", + AuthenticationData: []byte("auth-data"), + RequestProblemInfo: byte(1), + RequestProblemInfoFlag: true, + RequestResponseInfo: byte(1), + ReceiveMaximum: uint16(500), + TopicAliasMaximum: uint16(999), + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + { + Key: "key2", + Val: "value2", + }, + }, + MaximumPacketSize: uint32(32000), + }, + }, + }, + { + Case: TConnectClean, + Desc: "mqtt 3.1.1, clean session", + RawBytes: []byte{ + Connect << 4, 15, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 2, // Packet Flags + 0, 45, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 15, + }, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Clean: true, + Keepalive: 45, + ClientIdentifier: "zen", + }, + }, + }, + { + Case: TConnectMqtt5LWT, + Desc: "mqtt 5 clean session, lwt", + RawBytes: []byte{ + Connect << 4, 47, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 5, // Protocol Version + 14, // Packet Flags + 0, 30, // Keepalive + + // Properties + 10, // length + 17, 0, 0, 0, 120, // Session Expiry Interval (17) + 39, 0, 0, 125, 0, // Maximum Packet Size (39) + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 5, // will properties length + 24, 0, 0, 2, 88, // will delay interval (24) + + 0, 3, // Will Topic - MSB+LSB + 'l', 'w', 't', + 0, 8, // Will Message MSB+LSB + 'n', 'o', 't', 'a', 'g', 'a', 'i', 'n', + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 42, + }, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Clean: true, + Keepalive: 30, + ClientIdentifier: "zen", + WillFlag: true, + WillTopic: "lwt", + WillPayload: []byte("notagain"), + WillQos: 1, + WillProperties: Properties{ + WillDelayInterval: uint32(600), + }, + }, + Properties: Properties{ + SessionExpiryInterval: uint32(120), + SessionExpiryIntervalFlag: true, + MaximumPacketSize: uint32(32000), + }, + }, + }, + { + Case: TConnectUserPass, + Desc: "mqtt 3.1.1, username, password", + RawBytes: []byte{ + Connect << 4, 28, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0 | 1<<6 | 1<<7, // Packet Flags + 0, 20, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 0, 5, // Username MSB+LSB + 'm', 'o', 'c', 'h', 'i', + 0, 4, // Password MSB+LSB + ',', '.', '/', ';', + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 28, + }, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Clean: false, + Keepalive: 20, + ClientIdentifier: "zen", + UsernameFlag: true, + PasswordFlag: true, + Username: []byte("mochi"), + Password: []byte(",./;"), + }, + }, + }, + { + Case: TConnectUserPassLWT, + Desc: "mqtt 3.1.1, username, password, lwt", + Primary: true, + RawBytes: []byte{ + Connect << 4, 44, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 206, // Packet Flags + 0, 120, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 0, 3, // Will Topic - MSB+LSB + 'l', 'w', 't', + 0, 9, // Will Message MSB+LSB + 'n', 'o', 't', ' ', 'a', 'g', 'a', 'i', 'n', + 0, 5, // Username MSB+LSB + 'm', 'o', 'c', 'h', 'i', + 0, 4, // Password MSB+LSB + ',', '.', '/', ';', + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 44, + }, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Clean: true, + Keepalive: 120, + ClientIdentifier: "zen", + UsernameFlag: true, + PasswordFlag: true, + Username: []byte("mochi"), + Password: []byte(",./;"), + WillFlag: true, + WillTopic: "lwt", + WillPayload: []byte("not again"), + WillQos: 1, + }, + }, + }, + { + Case: TConnectZeroByteUsername, + Desc: "username flag but 0 byte username", + Group: "decode", + RawBytes: []byte{ + Connect << 4, 23, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 5, // Protocol Version + 130, // Packet Flags + 0, 30, // Keepalive + 5, // length + 17, 0, 0, 0, 120, // Session Expiry Interval (17) + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 0, 0, // Username MSB+LSB + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 23, + }, + ProtocolVersion: 5, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Clean: true, + Keepalive: 30, + ClientIdentifier: "zen", + Username: []byte{}, + UsernameFlag: true, + }, + Properties: Properties{ + SessionExpiryInterval: uint32(120), + SessionExpiryIntervalFlag: true, + }, + }, + }, + + // Fail States + { + Case: TConnectMalProtocolName, + Desc: "malformed protocol name", + Group: "decode", + FailFirst: ErrMalformedProtocolName, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 7, // Protocol Name - MSB+LSB + 'M', 'Q', 'I', 's', 'd', // Protocol Name + }, + }, + { + Case: TConnectMalProtocolVersion, + Desc: "malformed protocol version", + Group: "decode", + FailFirst: ErrMalformedProtocolVersion, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + }, + }, + { + Case: TConnectMalFlags, + Desc: "malformed flags", + Group: "decode", + FailFirst: ErrMalformedFlags, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + + }, + }, + { + Case: TConnectMalKeepalive, + Desc: "malformed keepalive", + Group: "decode", + FailFirst: ErrMalformedKeepalive, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0, // Flags + }, + }, + { + Case: TConnectMalClientID, + Desc: "malformed client id", + Group: "decode", + FailFirst: ErrClientIdentifierNotValid, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0, // Flags + 0, 20, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', // Client ID "zen" + }, + }, + { + Case: TConnectMalWillTopic, + Desc: "malformed will topic", + Group: "decode", + FailFirst: ErrMalformedWillTopic, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 14, // Flags + 0, 20, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 0, 6, // Will Topic - MSB+LSB + 'l', + }, + }, + { + Case: TConnectMalWillFlag, + Desc: "malformed will flag", + Group: "decode", + FailFirst: ErrMalformedWillPayload, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 14, // Flags + 0, 20, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 0, 3, // Will Topic - MSB+LSB + 'l', 'w', 't', + 0, 9, // Will Message MSB+LSB + 'n', 'o', 't', ' ', 'a', + }, + }, + { + Case: TConnectMalUsername, + Desc: "malformed username", + Group: "decode", + FailFirst: ErrMalformedUsername, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 206, // Flags + 0, 20, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 0, 3, // Will Topic - MSB+LSB + 'l', 'w', 't', + 0, 9, // Will Message MSB+LSB + 'n', 'o', 't', ' ', 'a', 'g', 'a', 'i', 'n', + 0, 5, // Username MSB+LSB + 'm', 'o', 'c', + }, + }, + + { + Case: TConnectInvalidFlagNoUsername, + Desc: "username flag with no username bytes", + Group: "decode", + FailFirst: ErrProtocolViolationFlagNoUsername, + RawBytes: []byte{ + Connect << 4, 17, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 5, // Protocol Version + 130, // Flags + 0, 20, // Keepalive + 0, + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + }, + }, + { + Case: TConnectMalPassword, + Desc: "malformed password", + Group: "decode", + FailFirst: ErrMalformedPassword, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 206, // Flags + 0, 20, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 0, 3, // Will Topic - MSB+LSB + 'l', 'w', 't', + 0, 9, // Will Message MSB+LSB + 'n', 'o', 't', ' ', 'a', 'g', 'a', 'i', 'n', + 0, 5, // Username MSB+LSB + 'm', 'o', 'c', 'h', 'i', + 0, 4, // Password MSB+LSB + ',', '.', + }, + }, + { + Case: TConnectMalFixedHeader, + Desc: "malformed fixedheader oversize", + Group: "decode", + FailFirst: ErrMalformedProtocolName, // packet test doesn't test fixedheader oversize + RawBytes: []byte{ + Connect << 4, 255, 255, 255, 255, 255, // Fixed header + }, + }, + { + Case: TConnectMalReservedBit, + Desc: "reserved bit not 0", + Group: "nodecode", + FailFirst: ErrProtocolViolation, + RawBytes: []byte{ + Connect << 4, 15, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 1, // Packet Flags + 0, 45, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', + }, + }, + { + Case: TConnectMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Connect << 4, 47, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 5, // Protocol Version + 14, // Packet Flags + 0, 30, // Keepalive + 10, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + { + Case: TConnectMalWillProperties, + Desc: "malformed will properties", + Group: "decode", + FailFirst: ErrMalformedWillProperties, + RawBytes: []byte{ + Connect << 4, 47, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 5, // Protocol Version + 14, // Packet Flags + 0, 30, // Keepalive + 10, // length + 17, 0, 0, 0, 120, // Session Expiry Interval (17) + 39, 0, 0, 125, 0, // Maximum Packet Size (39) + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 5, // will properties length + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + + // Validation Tests + { + Case: TConnectInvalidProtocolName, + Desc: "invalid protocol name", + Group: "validate", + Expect: ErrProtocolViolationProtocolName, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + Connect: ConnectParams{ + ProtocolName: []byte("stuff"), + }, + }, + }, + { + Case: TConnectInvalidProtocolVersion, + Desc: "invalid protocol version", + Group: "validate", + Expect: ErrProtocolViolationProtocolVersion, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 2, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + }, + }, + }, + { + Case: TConnectInvalidProtocolVersion2, + Desc: "invalid protocol version", + Group: "validate", + Expect: ErrProtocolViolationProtocolVersion, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 2, + Connect: ConnectParams{ + ProtocolName: []byte("MQIsdp"), + }, + }, + }, + { + Case: TConnectInvalidReservedBit, + Desc: "reserved bit not 0", + Group: "validate", + Expect: ErrProtocolViolationReservedBit, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + }, + ReservedBit: 1, + }, + }, + { + Case: TConnectInvalidClientIDTooLong, + Desc: "client id too long", + Group: "validate", + Expect: ErrClientIdentifierNotValid, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + ClientIdentifier: func() string { + return string(make([]byte, 65536)) + }(), + }, + }, + }, + { + Case: TConnectInvalidUsernameNoFlag, + Desc: "has username but no flag", + Group: "validate", + Expect: ErrProtocolViolationUsernameNoFlag, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Username: []byte("username"), + }, + }, + }, + { + Case: TConnectInvalidFlagNoPassword, + Desc: "has password flag but no password", + Group: "validate", + Expect: ErrProtocolViolationFlagNoPassword, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + PasswordFlag: true, + }, + }, + }, + { + Case: TConnectInvalidPasswordNoFlag, + Desc: "has password flag but no password", + Group: "validate", + Expect: ErrProtocolViolationPasswordNoFlag, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Password: []byte("password"), + }, + }, + }, + { + Case: TConnectInvalidUsernameTooLong, + Desc: "username too long", + Group: "validate", + Expect: ErrProtocolViolationUsernameTooLong, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + UsernameFlag: true, + Username: func() []byte { + return make([]byte, 65536) + }(), + }, + }, + }, + { + Case: TConnectInvalidPasswordTooLong, + Desc: "password too long", + Group: "validate", + Expect: ErrProtocolViolationPasswordTooLong, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + UsernameFlag: true, + Username: []byte{}, + PasswordFlag: true, + Password: func() []byte { + return make([]byte, 65536) + }(), + }, + }, + }, + { + Case: TConnectInvalidWillFlagNoPayload, + Desc: "will flag no payload", + Group: "validate", + Expect: ErrProtocolViolationWillFlagNoPayload, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + WillFlag: true, + }, + }, + }, + { + Case: TConnectInvalidWillFlagQosOutOfRange, + Desc: "will flag no payload", + Group: "validate", + Expect: ErrProtocolViolationQosOutOfRange, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + WillFlag: true, + WillTopic: "a/b/c", + WillPayload: []byte{'b'}, + WillQos: 4, + }, + }, + }, + { + Case: TConnectInvalidWillSurplusRetain, + Desc: "no will flag surplus retain", + Group: "validate", + Expect: ErrProtocolViolationWillFlagSurplusRetain, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + WillRetain: true, + }, + }, + }, + + // Spec Tests + { + Case: TConnectSpecInvalidUTF8D800, + Desc: "invalid utf8 string (a) - code point U+D800", + Group: "decode", + FailFirst: ErrClientIdentifierNotValid, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0, // Flags + 0, 20, // Keepalive + 0, 4, // Client ID - MSB+LSB + 'e', 0xed, 0xa0, 0x80, // Client id bearing U+D800 + }, + }, + { + Case: TConnectSpecInvalidUTF8DFFF, + Desc: "invalid utf8 string (b) - code point U+DFFF", + Group: "decode", + FailFirst: ErrClientIdentifierNotValid, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0, // Flags + 0, 20, // Keepalive + 0, 4, // Client ID - MSB+LSB + 'e', 0xed, 0xa3, 0xbf, // Client id bearing U+D8FF + }, + }, + + { + Case: TConnectSpecInvalidUTF80000, + Desc: "invalid utf8 string (c) - code point U+0000", + Group: "decode", + FailFirst: ErrClientIdentifierNotValid, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0, // Flags + 0, 20, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'e', 0xc0, 0x80, // Client id bearing U+0000 + }, + }, + + { + Case: TConnectSpecInvalidUTF8NoSkip, + Desc: "utf8 string must not skip or strip code point U+FEFF", + //Group: "decode", + //FailFirst: ErrMalformedClientID, + RawBytes: []byte{ + Connect << 4, 18, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0, // Flags + 0, 20, // Keepalive + 0, 6, // Client ID - MSB+LSB + 'e', 'b', 0xEF, 0xBB, 0xBF, 'd', // Client id bearing U+FEFF + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 16, + }, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Keepalive: 20, + ClientIdentifier: string([]byte{'e', 'b', 0xEF, 0xBB, 0xBF, 'd'}), + }, + }, + }, + }, + Connack: { + { + Case: TConnackAcceptedNoSession, + Desc: "accepted, no session", + Primary: true, + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 0, // No existing session + CodeSuccess.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + SessionPresent: false, + ReasonCode: CodeSuccess.Code, + }, + }, + { + Case: TConnackAcceptedSessionExists, + Desc: "accepted, session exists", + Primary: true, + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 1, // Session present + CodeSuccess.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + SessionPresent: true, + ReasonCode: CodeSuccess.Code, + }, + }, + { + Case: TConnackAcceptedAdjustedExpiryInterval, + Desc: "accepted, no session, adjusted expiry interval mqtt5", + Primary: true, + RawBytes: []byte{ + Connack << 4, 8, // fixed header + 0, // Session present + CodeSuccess.Code, + 5, // length + 17, 0, 0, 0, 120, // Session Expiry Interval (17) + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 8, + }, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + SessionExpiryInterval: uint32(120), + SessionExpiryIntervalFlag: true, + }, + }, + }, + { + Case: TConnackAcceptedMqtt5, + Desc: "accepted no session mqtt5", + Primary: true, + RawBytes: []byte{ + Connack << 4, 124, // fixed header + 0, // No existing session + CodeSuccess.Code, + // Properties + 121, // length + 17, 0, 0, 0, 120, // Session Expiry Interval (17) + 18, 0, 8, 'm', 'o', 'c', 'h', 'i', '-', 'v', '5', // Assigned Client ID (18) + 19, 0, 20, // Server Keep Alive (19) + 21, 0, 5, 'S', 'H', 'A', '-', '1', // Authentication Method (21) + 22, 0, 9, 'a', 'u', 't', 'h', '-', 'd', 'a', 't', 'a', // Authentication Data (22) + 26, 0, 8, 'r', 'e', 's', 'p', 'o', 'n', 's', 'e', // Response Info (26) + 28, 0, 7, 'm', 'o', 'c', 'h', 'i', '-', '2', // Server Reference (28) + 31, 0, 6, 'r', 'e', 'a', 's', 'o', 'n', // Reason String (31) + 33, 1, 244, // Receive Maximum (33) + 34, 3, 231, // Topic Alias Maximum (34) + 36, 1, // Maximum Qos (36) + 37, 1, // Retain Available (37) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + 38, // User Properties (38) + 0, 4, 'k', 'e', 'y', '2', + 0, 6, 'v', 'a', 'l', 'u', 'e', '2', + 39, 0, 0, 125, 0, // Maximum Packet Size (39) + 40, 1, // Wildcard Subscriptions Available (40) + 41, 1, // Subscription ID Available (41) + 42, 1, // Shared Subscriptions Available (42) + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 124, + }, + SessionPresent: false, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + SessionExpiryInterval: uint32(120), + SessionExpiryIntervalFlag: true, + AssignedClientID: "mochi-v5", + ServerKeepAlive: uint16(20), + ServerKeepAliveFlag: true, + AuthenticationMethod: "SHA-1", + AuthenticationData: []byte("auth-data"), + ResponseInfo: "response", + ServerReference: "mochi-2", + ReasonString: "reason", + ReceiveMaximum: uint16(500), + TopicAliasMaximum: uint16(999), + MaximumQos: byte(1), + MaximumQosFlag: true, + RetainAvailable: byte(1), + RetainAvailableFlag: true, + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + { + Key: "key2", + Val: "value2", + }, + }, + MaximumPacketSize: uint32(32000), + WildcardSubAvailable: byte(1), + WildcardSubAvailableFlag: true, + SubIDAvailable: byte(1), + SubIDAvailableFlag: true, + SharedSubAvailable: byte(1), + SharedSubAvailableFlag: true, + }, + }, + }, + { + Case: TConnackMinMqtt5, + Desc: "accepted min properties mqtt5", + Primary: true, + RawBytes: []byte{ + Connack << 4, 13, // fixed header + 1, // existing session + CodeSuccess.Code, + 10, // Properties length + 18, 0, 5, 'm', 'o', 'c', 'h', 'i', // Assigned Client ID (18) + 36, 1, // Maximum Qos (36) + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 13, + }, + SessionPresent: true, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + AssignedClientID: "mochi", + MaximumQos: byte(1), + MaximumQosFlag: true, + }, + }, + }, + { + Case: TConnackMinCleanMqtt5, + Desc: "accepted min properties mqtt5b", + Primary: true, + RawBytes: []byte{ + Connack << 4, 3, // fixed header + 0, // existing session + CodeSuccess.Code, + 0, // Properties length + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 16, + }, + SessionPresent: false, + ReasonCode: CodeSuccess.Code, + }, + }, + { + Case: TConnackServerKeepalive, + Desc: "server set keepalive", + Primary: true, + RawBytes: []byte{ + Connack << 4, 6, // fixed header + 1, // existing session + CodeSuccess.Code, + 3, // Properties length + 19, 0, 10, // server keepalive + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 6, + }, + SessionPresent: true, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + ServerKeepAlive: uint16(10), + ServerKeepAliveFlag: true, + }, + }, + }, + { + Case: TConnackInvalidMinMqtt5, + Desc: "failure min properties mqtt5", + Primary: true, + RawBytes: append([]byte{ + Connack << 4, 23, // fixed header + 0, // No existing session + ErrUnspecifiedError.Code, + // Properties + 20, // length + 31, 0, 17, // Reason String (31) + }, []byte(ErrUnspecifiedError.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 23, + }, + SessionPresent: false, + ReasonCode: ErrUnspecifiedError.Code, + Properties: Properties{ + ReasonString: ErrUnspecifiedError.Reason, + }, + }, + }, + + { + Case: TConnackProtocolViolationNoSession, + Desc: "miscellaneous protocol violation", + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 0, // Session present + ErrProtocolViolation.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + ReasonCode: ErrProtocolViolation.Code, + }, + }, + { + Case: TConnackBadProtocolVersion, + Desc: "bad protocol version", + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 1, // Session present + ErrProtocolViolationProtocolVersion.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + SessionPresent: true, + ReasonCode: ErrProtocolViolationProtocolVersion.Code, + }, + }, + { + Case: TConnackBadClientID, + Desc: "bad client id", + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 1, // Session present + ErrClientIdentifierNotValid.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + SessionPresent: true, + ReasonCode: ErrClientIdentifierNotValid.Code, + }, + }, + { + Case: TConnackServerUnavailable, + Desc: "server unavailable", + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 1, // Session present + ErrServerUnavailable.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + SessionPresent: true, + ReasonCode: ErrServerUnavailable.Code, + }, + }, + { + Case: TConnackBadUsernamePassword, + Desc: "bad username or password", + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 1, // Session present + ErrBadUsernameOrPassword.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + SessionPresent: true, + ReasonCode: ErrBadUsernameOrPassword.Code, + }, + }, + { + Case: TConnackBadUsernamePasswordNoSession, + Desc: "bad username or password no session", + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 0, // No session present + Err3NotAuthorized.Code, // use v3 remapping + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + ReasonCode: Err3NotAuthorized.Code, + }, + }, + { + Case: TConnackMqtt5BadUsernamePasswordNoSession, + Desc: "mqtt5 bad username or password no session", + RawBytes: []byte{ + Connack << 4, 3, // fixed header + 0, // No session present + ErrBadUsernameOrPassword.Code, + 0, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + ReasonCode: ErrBadUsernameOrPassword.Code, + }, + }, + + { + Case: TConnackNotAuthorised, + Desc: "not authorised", + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 1, // Session present + ErrNotAuthorized.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + SessionPresent: true, + ReasonCode: ErrNotAuthorized.Code, + }, + }, + { + Case: TConnackDropProperties, + Desc: "drop oversize properties", + Group: "encode", + RawBytes: []byte{ + Connack << 4, 40, // fixed header + 0, // No existing session + CodeSuccess.Code, + 19, // length + 28, 0, 7, 'm', 'o', 'c', 'h', 'i', '-', '2', // Server Reference (28) + 31, 0, 6, 'r', 'e', 'a', 's', 'o', 'n', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + ActualBytes: []byte{ + Connack << 4, 13, // fixed header + 0, // No existing session + CodeSuccess.Code, + 10, // length + 28, 0, 7, 'm', 'o', 'c', 'h', 'i', '-', '2', // Server Reference (28) + }, + Packet: &Packet{ + Mods: Mods{ + MaxSize: 5, + }, + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 40, + }, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + ReasonString: "reason", + ServerReference: "mochi-2", + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TConnackDropPropertiesPartial, + Desc: "drop oversize properties partial", + Group: "encode", + RawBytes: []byte{ + Connack << 4, 40, // fixed header + 0, // No existing session + CodeSuccess.Code, + 19, // length + 28, 0, 7, 'm', 'o', 'c', 'h', 'i', '-', '2', // Server Reference (28) + 31, 0, 6, 'r', 'e', 'a', 's', 'o', 'n', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + ActualBytes: []byte{ + Connack << 4, 22, // fixed header + 0, // No existing session + CodeSuccess.Code, + 19, // length + 28, 0, 7, 'm', 'o', 'c', 'h', 'i', '-', '2', // Server Reference (28) + 31, 0, 6, 'r', 'e', 'a', 's', 'o', 'n', // Reason String (31) + }, + Packet: &Packet{ + Mods: Mods{ + MaxSize: 18, + }, + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 40, + }, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + ReasonString: "reason", + ServerReference: "mochi-2", + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + // Fail States + { + Case: TConnackMalSessionPresent, + Desc: "malformed session present", + Group: "decode", + FailFirst: ErrMalformedSessionPresent, + RawBytes: []byte{ + Connect << 4, 2, // Fixed header + }, + }, + { + Case: TConnackMalReturnCode, + Desc: "malformed bad return Code", + Group: "decode", + //Primary: true, + FailFirst: ErrMalformedReasonCode, + RawBytes: []byte{ + Connect << 4, 2, // Fixed header + 0, + }, + }, + { + Case: TConnackMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Connack << 4, 40, // fixed header + 0, // No existing session + CodeSuccess.Code, + 19, // length + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + + Publish: { + { + Case: TPublishNoPayload, + Desc: "no payload", + Primary: true, + RawBytes: []byte{ + Publish << 4, 7, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 7, + }, + TopicName: "a/b/c", + Payload: []byte{}, + }, + }, + { + Case: TPublishBasic, + Desc: "basic", + Primary: true, + RawBytes: []byte{ + Publish << 4, 18, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 18, + }, + TopicName: "a/b/c", + Payload: []byte("hello mochi"), + }, + }, + + { + Case: TPublishMqtt5, + Desc: "mqtt v5", + Primary: true, + RawBytes: []byte{ + Publish << 4, 77, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 58, // length + 1, 1, // Payload Format (1) + 2, 0, 0, 0, 2, // Message Expiry (2) + 3, 0, 10, 't', 'e', 'x', 't', '/', 'p', 'l', 'a', 'i', 'n', // Content Type (3) + 8, 0, 5, 'a', '/', 'b', '/', 'c', // Response Topic (8) + 9, 0, 4, 'd', 'a', 't', 'a', // Correlations Data (9) + 11, 202, 212, 19, // Subscription Identifier (11) + 35, 0, 3, // Topic Alias (35) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 77, + }, + TopicName: "a/b/c", + Properties: Properties{ + PayloadFormat: byte(1), // UTF-8 Format + PayloadFormatFlag: true, + MessageExpiryInterval: uint32(2), + ContentType: "text/plain", + ResponseTopic: "a/b/c", + CorrelationData: []byte("data"), + SubscriptionIdentifier: []int{322122}, + TopicAlias: uint16(3), + TopicAliasFlag: true, + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + Payload: []byte("hello mochi"), + }, + }, + { + Case: TPublishBasicTopicAliasOnly, + Desc: "mqtt v5 topic alias only", + Primary: true, + RawBytes: []byte{ + Publish << 4, 17, // Fixed header + 0, 0, // Topic Name - LSB+MSB + 3, // length + 35, 0, 1, // Topic Alias (35) + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 17, + }, + Properties: Properties{ + TopicAlias: 1, + TopicAliasFlag: true, + }, + Payload: []byte("hello mochi"), + }, + }, + { + Case: TPublishBasicMqtt5, + Desc: "mqtt basic v5", + Primary: true, + RawBytes: []byte{ + Publish << 4, 22, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 3, // length + 35, 0, 1, // Topic Alias (35) + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 22, + }, + TopicName: "a/b/c", + Properties: Properties{ + TopicAlias: uint16(1), + TopicAliasFlag: true, + }, + Payload: []byte("hello mochi"), + }, + }, + + { + Case: TPublishQos1, + Desc: "qos:1, packet id", + Primary: true, + RawBytes: []byte{ + Publish<<4 | 1<<1, 20, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 7, // Packet ID - LSB+MSB + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Qos: 1, + Remaining: 20, + }, + TopicName: "a/b/c", + Payload: []byte("hello mochi"), + PacketID: 7, + }, + }, + { + Case: TPublishQos1Mqtt5, + Desc: "mqtt v5", + Primary: true, + RawBytes: []byte{ + Publish<<4 | 1<<1, 37, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 7, // Packet ID - LSB+MSB + // Properties + 16, // length + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 37, + Qos: 1, + }, + PacketID: 7, + TopicName: "a/b/c", + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + Payload: []byte("hello mochi"), + }, + }, + + { + Case: TPublishQos1Dup, + Desc: "qos:1, dup:true, packet id", + Primary: true, + RawBytes: []byte{ + Publish<<4 | 2 | 8, 20, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 7, // Packet ID - LSB+MSB + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Qos: 1, + Remaining: 20, + Dup: true, + }, + TopicName: "a/b/c", + Payload: []byte("hello mochi"), + PacketID: 7, + }, + }, + { + Case: TPublishQos1NoPayload, + Desc: "qos:1, packet id, no payload", + Primary: true, + RawBytes: []byte{ + Publish<<4 | 2, 9, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'y', '/', 'u', '/', 'i', // Topic Name + 0, 7, // Packet ID - LSB+MSB + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Qos: 1, + Remaining: 9, + }, + TopicName: "y/u/i", + PacketID: 7, + Payload: []byte{}, + }, + }, + { + Case: TPublishQos2, + Desc: "qos:2, packet id", + Primary: true, + RawBytes: []byte{ + Publish<<4 | 2<<1, 14, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 7, // Packet ID - LSB+MSB + 'h', 'e', 'l', 'l', 'o', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Qos: 2, + Remaining: 14, + }, + TopicName: "a/b/c", + Payload: []byte("hello"), + PacketID: 7, + }, + }, + { + Case: TPublishQos2Mqtt5, + Desc: "mqtt v5", + Primary: true, + RawBytes: []byte{ + Publish<<4 | 2<<1, 37, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 7, // Packet ID - LSB+MSB + // Properties + 16, // length + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 37, + Qos: 2, + }, + PacketID: 7, + TopicName: "a/b/c", + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + Payload: []byte("hello mochi"), + }, + }, + { + Case: TPublishSubscriberIdentifier, + Desc: "subscription identifiers", + Primary: true, + RawBytes: []byte{ + Publish << 4, 23, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 4, // properties length + 11, 2, // Subscription Identifier (11) + 11, 3, // Subscription Identifier (11) + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 23, + }, + TopicName: "a/b/c", + Properties: Properties{ + SubscriptionIdentifier: []int{2, 3}, + }, + Payload: []byte("hello mochi"), + }, + }, + + { + Case: TPublishQos2Upgraded, + Desc: "qos:2, upgraded from publish to qos2 sub", + Primary: true, + RawBytes: []byte{ + Publish<<4 | 2<<1, 20, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 1, // Packet ID - LSB+MSB + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Qos: 2, + Remaining: 18, + }, + TopicName: "a/b/c", + Payload: []byte("hello mochi"), + PacketID: 1, + }, + }, + { + Case: TPublishRetain, + Desc: "retain", + RawBytes: []byte{ + Publish<<4 | 1<<0, 18, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Retain: true, + }, + TopicName: "a/b/c", + Payload: []byte("hello mochi"), + }, + }, + { + Case: TPublishRetainMqtt5, + Desc: "retain mqtt5", + RawBytes: []byte{ + Publish<<4 | 1<<0, 19, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, // properties length + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Publish, + Retain: true, + Remaining: 19, + }, + TopicName: "a/b/c", + Properties: Properties{}, + Payload: []byte("hello mochi"), + }, + }, + { + Case: TPublishDup, + Desc: "dup", + RawBytes: []byte{ + Publish<<4 | 8, 10, // Fixed header + 0, 3, // Topic Name - LSB+MSB + 'a', '/', 'b', // Topic Name + 'h', 'e', 'l', 'l', 'o', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Dup: true, + }, + TopicName: "a/b", + Payload: []byte("hello"), + }, + }, + + // Fail States + { + Case: TPublishMalTopicName, + Desc: "malformed topic name", + Group: "decode", + FailFirst: ErrMalformedTopic, + RawBytes: []byte{ + Publish << 4, 7, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', + 0, 11, // Packet ID - LSB+MSB + }, + }, + { + Case: TPublishMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Publish<<4 | 2, 7, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'x', '/', 'y', '/', 'z', // Topic Name + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TPublishMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Publish << 4, 35, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 16, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + + // Copy tests + { + Case: TPublishCopyBasic, + Desc: "basic copyable", + Group: "copy", + RawBytes: []byte{ + Publish << 4, 18, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'z', '/', 'e', '/', 'n', // Topic Name + 'm', 'o', 'c', 'h', 'i', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Dup: true, + Retain: true, + Qos: 1, + }, + TopicName: "z/e/n", + Payload: []byte("mochi mochi"), + }, + }, + + // Spec tests + { + Case: TPublishSpecQos0NoPacketID, + Desc: "packet id must be 0 if qos is 0 (a)", + Group: "encode", + // this version tests for correct byte array mutuation. + // this does not check if -incoming- Packets are parsed as correct, + // it is impossible for the parser to determine if the payload start is incorrect. + RawBytes: []byte{ + Publish << 4, 12, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 3, // Packet ID - LSB+MSB + 'h', 'e', 'l', 'l', 'o', // Payload + }, + ActualBytes: []byte{ + Publish << 4, 12, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + // Packet ID is removed. + 'h', 'e', 'l', 'l', 'o', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 12, + }, + TopicName: "a/b/c", + Payload: []byte("hello"), + }, + }, + { + Case: TPublishSpecQosMustPacketID, + Desc: "no packet id with qos > 0", + Group: "encode", + Expect: ErrProtocolViolationNoPacketID, + RawBytes: []byte{ + Publish<<4 | 2, 14, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 0, // Packet ID - LSB+MSB + 'h', 'e', 'l', 'l', 'o', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Qos: 1, + }, + TopicName: "a/b/c", + Payload: []byte("hello"), + PacketID: 0, + }, + }, + { + Case: TPublishDropOversize, + Desc: "drop oversized publish packet", + Group: "encode", + FailFirst: ErrPacketTooLarge, + RawBytes: []byte{ + Publish << 4, 10, // Fixed header + 0, 3, // Topic Name - LSB+MSB + 'a', '/', 'b', // Topic Name + 'h', 'e', 'l', 'l', 'o', // Payload + }, + Packet: &Packet{ + Mods: Mods{ + MaxSize: 2, + }, + FixedHeader: FixedHeader{ + Type: Publish, + }, + TopicName: "a/b", + Payload: []byte("hello"), + }, + }, + + // Validation Tests + { + Case: TPublishInvalidQos0NoPacketID, + Desc: "packet id must be 0 if qos is 0 (b)", + Group: "validate", + Expect: ErrProtocolViolationSurplusPacketID, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 12, + Qos: 0, + }, + TopicName: "a/b/c", + Payload: []byte("hello"), + PacketID: 3, + }, + }, + { + Case: TPublishInvalidQosMustPacketID, + Desc: "no packet id with qos > 0", + Group: "validate", + Expect: ErrProtocolViolationNoPacketID, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Qos: 1, + }, + PacketID: 0, + }, + }, + { + Case: TPublishInvalidSurplusSubID, + Desc: "surplus subscription identifier", + Group: "validate", + Expect: ErrProtocolViolationSurplusSubID, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + }, + Properties: Properties{ + SubscriptionIdentifier: []int{1}, + }, + TopicName: "a/b", + }, + }, + { + Case: TPublishInvalidSurplusWildcard, + Desc: "topic contains wildcards", + Group: "validate", + Expect: ErrProtocolViolationSurplusWildcard, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + }, + TopicName: "a/+", + }, + }, + { + Case: TPublishInvalidSurplusWildcard2, + Desc: "topic contains wildcards 2", + Group: "validate", + Expect: ErrProtocolViolationSurplusWildcard, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + }, + TopicName: "a/#", + }, + }, + { + Case: TPublishInvalidNoTopic, + Desc: "no topic or alias specified", + Group: "validate", + Expect: ErrProtocolViolationNoTopic, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + }, + }, + }, + { + Case: TPublishInvalidExcessTopicAlias, + Desc: "topic alias over maximum", + Group: "validate", + Expect: ErrTopicAliasInvalid, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + }, + Properties: Properties{ + TopicAlias: 1025, + }, + TopicName: "a/b", + }, + }, + { + Case: TPublishInvalidTopicAlias, + Desc: "topic alias flag and no alias", + Group: "validate", + Expect: ErrTopicAliasInvalid, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + }, + Properties: Properties{ + TopicAliasFlag: true, + TopicAlias: 0, + }, + TopicName: "a/b/", + }, + }, + { + Case: TPublishSpecDenySysTopic, + Desc: "deny publishing to $SYS topics", + Group: "validate", + Expect: CodeSuccess, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + }, + TopicName: "$SYS/any", + Payload: []byte("y"), + }, + RawBytes: []byte{ + Publish << 4, 11, // Fixed header + 0, 5, // Topic Name - LSB+MSB + '$', 'S', 'Y', 'S', '/', 'a', 'n', 'y', // Topic Name + 'y', // Payload + }, + }, + }, + + Puback: { + { + Case: TPuback, + Desc: "puback", + Primary: true, + RawBytes: []byte{ + Puback << 4, 2, // Fixed header + 0, 7, // Packet ID - LSB+MSB + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Puback, + Remaining: 2, + }, + PacketID: 7, + }, + }, + { + Case: TPubackMqtt5, + Desc: "mqtt5", + Primary: true, + RawBytes: []byte{ + Puback << 4, 20, // Fixed header + 0, 7, // Packet ID - LSB+MSB + CodeGrantedQos0.Code, // Reason Code + 16, // Properties Length + // 31, 0, 6, 'r', 'e', 'a', 's', 'o', 'n', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Puback, + Remaining: 20, + }, + PacketID: 7, + ReasonCode: CodeGrantedQos0.Code, + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TPubackUnexpectedError, + Desc: "unexpected error", + Group: "decode", + RawBytes: []byte{ + Puback << 4, 29, // Fixed header + 0, 7, // Packet ID - LSB+MSB + ErrPayloadFormatInvalid.Code, // Reason Code + 25, // Properties Length + 31, 0, 22, 'p', 'a', 'y', 'l', 'o', 'a', 'd', + ' ', 'f', 'o', 'r', 'm', 'a', 't', + ' ', 'i', 'n', 'v', 'a', 'l', 'i', 'd', // Reason String (31) + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Puback, + Remaining: 28, + }, + PacketID: 7, + ReasonCode: ErrPayloadFormatInvalid.Code, + Properties: Properties{ + ReasonString: ErrPayloadFormatInvalid.Reason, + }, + }, + }, + + // Fail states + { + Case: TPubackMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Puback << 4, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TPubackMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Puback << 4, 20, // Fixed header + 0, 7, // Packet ID - LSB+MSB + CodeGrantedQos0.Code, // Reason Code + 16, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + Pubrec: { + { + Case: TPubrec, + Desc: "pubrec", + Primary: true, + RawBytes: []byte{ + Pubrec << 4, 2, // Fixed header + 0, 7, // Packet ID - LSB+MSB + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pubrec, + Remaining: 2, + }, + PacketID: 7, + }, + }, + { + Case: TPubrecMqtt5, + Desc: "pubrec mqtt5", + Primary: true, + RawBytes: []byte{ + Pubrec << 4, 20, // Fixed header + 0, 7, // Packet ID - LSB+MSB + CodeSuccess.Code, // Reason Code + 16, // Properties Length + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Pubrec, + Remaining: 20, + }, + PacketID: 7, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TPubrecMqtt5IDInUse, + Desc: "packet id in use mqtt5", + Primary: true, + RawBytes: []byte{ + Pubrec << 4, 47, // Fixed header + 0, 7, // Packet ID - LSB+MSB + ErrPacketIdentifierInUse.Code, // Reason Code + 43, // Properties Length + 31, 0, 24, 'p', 'a', 'c', 'k', 'e', 't', + ' ', 'i', 'd', 'e', 'n', 't', 'i', 'f', 'i', 'e', 'r', + ' ', 'i', 'n', + ' ', 'u', 's', 'e', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Pubrec, + Remaining: 31, + }, + PacketID: 7, + ReasonCode: ErrPacketIdentifierInUse.Code, + Properties: Properties{ + ReasonString: ErrPacketIdentifierInUse.Reason, + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TPubrecMalReasonCode, + Desc: "malformed reason code", + Group: "decode", + FailFirst: ErrMalformedReasonCode, + RawBytes: []byte{ + Pubrec << 4, 31, // Fixed header + 0, 7, // Packet ID - LSB+MSB + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + // Validation + { + Case: TPubrecInvalidReason, + Desc: "invalid reason code", + Group: "validate", + FailFirst: ErrProtocolViolationInvalidReason, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pubrec, + }, + PacketID: 7, + ReasonCode: ErrConnectionRateExceeded.Code, + }, + }, + // Fail states + { + Case: TPubrecMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Pubrec << 4, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TPubrecMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Pubrec << 4, 31, // Fixed header + 0, 7, // Packet ID - LSB+MSB + ErrPacketIdentifierInUse.Code, // Reason Code + 27, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + Pubrel: { + { + Case: TPubrel, + Desc: "pubrel", + Primary: true, + RawBytes: []byte{ + Pubrel<<4 | 1<<1, 2, // Fixed header + 0, 7, // Packet ID - LSB+MSB + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pubrel, + Remaining: 2, + Qos: 1, + }, + PacketID: 7, + }, + }, + { + Case: TPubrelMqtt5, + Desc: "pubrel mqtt5", + Primary: true, + RawBytes: []byte{ + Pubrel<<4 | 1<<1, 20, // Fixed header + 0, 7, // Packet ID - LSB+MSB + CodeSuccess.Code, // Reason Code + 16, // Properties Length + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Pubrel, + Remaining: 20, + Qos: 1, + }, + PacketID: 7, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TPubrelMqtt5AckNoPacket, + Desc: "mqtt5 no packet id ack", + Primary: true, + RawBytes: append([]byte{ + Pubrel<<4 | 1<<1, 34, // Fixed header + 0, 7, // Packet ID - LSB+MSB + ErrPacketIdentifierNotFound.Code, // Reason Code + 30, // Properties Length + 31, 0, byte(len(ErrPacketIdentifierNotFound.Reason)), + }, []byte(ErrPacketIdentifierNotFound.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Pubrel, + Remaining: 34, + Qos: 1, + }, + PacketID: 7, + ReasonCode: ErrPacketIdentifierNotFound.Code, + Properties: Properties{ + ReasonString: ErrPacketIdentifierNotFound.Reason, + }, + }, + }, + // Validation + { + Case: TPubrelInvalidReason, + Desc: "invalid reason code", + Group: "validate", + FailFirst: ErrProtocolViolationInvalidReason, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pubrel, + }, + PacketID: 7, + ReasonCode: ErrConnectionRateExceeded.Code, + }, + }, + // Fail states + { + Case: TPubrelMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Pubrel << 4, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TPubrelMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Pubrel<<4 | 1<<1, 20, // Fixed header + 0, 7, // Packet ID - LSB+MSB + CodeSuccess.Code, // Reason Code + 16, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + Pubcomp: { + { + Case: TPubcomp, + Desc: "pubcomp", + Primary: true, + RawBytes: []byte{ + Pubcomp << 4, 2, // Fixed header + 0, 7, // Packet ID - LSB+MSB + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pubcomp, + Remaining: 2, + }, + PacketID: 7, + }, + }, + { + Case: TPubcompMqtt5, + Desc: "mqtt5", + Primary: true, + RawBytes: []byte{ + Pubcomp << 4, 20, // Fixed header + 0, 7, // Packet ID - LSB+MSB + CodeSuccess.Code, // Reason Code + 16, // Properties Length + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Pubcomp, + Remaining: 20, + }, + PacketID: 7, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TPubcompMqtt5AckNoPacket, + Desc: "mqtt5 no packet id ack", + Primary: true, + RawBytes: append([]byte{ + Pubcomp << 4, 34, // Fixed header + 0, 7, // Packet ID - LSB+MSB + ErrPacketIdentifierNotFound.Code, // Reason Code + 30, // Properties Length + 31, 0, byte(len(ErrPacketIdentifierNotFound.Reason)), + }, []byte(ErrPacketIdentifierNotFound.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Pubcomp, + Remaining: 34, + }, + PacketID: 7, + ReasonCode: ErrPacketIdentifierNotFound.Code, + Properties: Properties{ + ReasonString: ErrPacketIdentifierNotFound.Reason, + }, + }, + }, + // Validation + { + Case: TPubcompInvalidReason, + Desc: "invalid reason code", + Group: "validate", + FailFirst: ErrProtocolViolationInvalidReason, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pubcomp, + }, + ReasonCode: ErrConnectionRateExceeded.Code, + }, + }, + // Fail states + { + Case: TPubcompMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Pubcomp << 4, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TPubcompMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Pubcomp << 4, 34, // Fixed header + 0, 7, // Packet ID - LSB+MSB + ErrPacketIdentifierNotFound.Code, // Reason Code + 22, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + Subscribe: { + { + Case: TSubscribe, + Desc: "subscribe", + Primary: true, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 10, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, // QoS + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Remaining: 10, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + {Filter: "a/b/c"}, + }, + }, + }, + { + Case: TSubscribeMany, + Desc: "many", + Primary: true, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 30, // Fixed header + 0, 15, // Packet ID - LSB+MSB + + 0, 3, // Topic Name - LSB+MSB + 'a', '/', 'b', // Topic Name + 0, // QoS + + 0, 11, // Topic Name - LSB+MSB + 'd', '/', 'e', '/', 'f', '/', 'g', '/', 'h', '/', 'i', // Topic Name + 1, // QoS + + 0, 5, // Topic Name - LSB+MSB + 'x', '/', 'y', '/', 'z', // Topic Name + 2, // QoS + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Remaining: 30, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + {Filter: "a/b", Qos: 0}, + {Filter: "d/e/f/g/h/i", Qos: 1}, + {Filter: "x/y/z", Qos: 2}, + }, + }, + }, + { + Case: TSubscribeMqtt5, + Desc: "mqtt5", + Primary: true, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 31, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 20, + 11, 202, 212, 19, // Subscription Identifier (11) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + + 0, 5, 'a', '/', 'b', '/', 'c', // Topic Name + 46, // subscription options + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Subscribe, + Remaining: 31, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + { + Filter: "a/b/c", + Qos: 2, + NoLocal: true, + RetainAsPublished: true, + RetainHandling: 2, + Identifier: 322122, + }, + }, + Properties: Properties{ + SubscriptionIdentifier: []int{322122}, + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TSubscribeRetainHandling1, + Desc: "retain handling 1", + Primary: true, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 11, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, // no properties + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0 | 1<<4, // subscription options + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Subscribe, + Remaining: 11, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + { + Filter: "a/b/c", + RetainHandling: 1, + }, + }, + }, + }, + { + Case: TSubscribeRetainHandling2, + Desc: "retain handling 2", + Primary: true, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 11, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, // no properties + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0 | 2<<4, // subscription options + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Subscribe, + Remaining: 11, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + { + Filter: "a/b/c", + RetainHandling: 2, + }, + }, + }, + }, + { + Case: TSubscribeRetainAsPublished, + Desc: "retain as published", + Primary: true, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 11, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, // no properties + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0 | 1<<3, // subscription options + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Subscribe, + Remaining: 11, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + { + Filter: "a/b/c", + RetainAsPublished: true, + }, + }, + }, + }, + { + Case: TSubscribeInvalidFilter, + Desc: "invalid filter", + Group: "reference", + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + {Filter: "$SHARE/#", Identifier: 5}, + }, + }, + }, + { + Case: TSubscribeInvalidSharedNoLocal, + Desc: "shared and no local", + Group: "reference", + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + {Filter: "$SHARE/tmp/a/b/c", Identifier: 5, NoLocal: true}, + }, + }, + }, + + // Fail states + { + Case: TSubscribeMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TSubscribeMalTopic, + Desc: "malformed topic", + Group: "decode", + FailFirst: ErrMalformedTopic, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 2, // Fixed header + 0, 21, // Packet ID - LSB+MSB + + 0, 3, // Topic Name - LSB+MSB + 'a', '/', + }, + }, + { + Case: TSubscribeMalQos, + Desc: "malformed subscribe - qos", + Group: "decode", + FailFirst: ErrMalformedQos, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 2, // Fixed header + 0, 22, // Packet ID - LSB+MSB + + 0, 3, // Topic Name - LSB+MSB + 'j', '/', 'b', // Topic Name + + }, + }, + { + Case: TSubscribeMalQosRange, + Desc: "malformed qos out of range", + Group: "decode", + FailFirst: ErrProtocolViolationQosOutOfRange, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 2, // Fixed header + 0, 22, // Packet ID - LSB+MSB + + 0, 3, // Topic Name - LSB+MSB + 'c', '/', 'd', // Topic Name + 5, // QoS + + }, + }, + { + Case: TSubscribeMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Subscribe << 4, 11, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 4, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + + // Validation + { + Case: TSubscribeInvalidQosMustPacketID, + Desc: "no packet id with qos > 0", + Group: "validate", + Expect: ErrProtocolViolationNoPacketID, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Qos: 1, + }, + PacketID: 0, + Filters: Subscriptions{ + {Filter: "a/b"}, + }, + }, + }, + { + Case: TSubscribeInvalidNoFilters, + Desc: "no filters", + Group: "validate", + Expect: ErrProtocolViolationNoFilters, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Qos: 1, + }, + PacketID: 2, + }, + }, + + { + Case: TSubscribeInvalidIdentifierOversize, + Desc: "oversize identifier", + Group: "validate", + Expect: ErrProtocolViolationOversizeSubID, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Qos: 1, + }, + PacketID: 2, + Filters: Subscriptions{ + {Filter: "a/b", Identifier: 5}, + {Filter: "d/f", Identifier: 268435456}, + }, + }, + }, + + // Spec tests + { + Case: TSubscribeSpecQosMustPacketID, + Desc: "no packet id with qos > 0", + Group: "encode", + Expect: ErrProtocolViolationNoPacketID, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 10, // Fixed header + 0, 0, // Packet ID - LSB+MSB + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 1, // QoS + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Qos: 1, + Remaining: 10, + }, + Filters: Subscriptions{ + {Filter: "a/b/c", Qos: 1}, + }, + PacketID: 0, + }, + }, + }, + Suback: { + { + Case: TSuback, + Desc: "suback", + Primary: true, + RawBytes: []byte{ + Suback << 4, 3, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, // Return Code QoS 0 + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Suback, + Remaining: 3, + }, + PacketID: 15, + ReasonCodes: []byte{0}, + }, + }, + { + Case: TSubackMany, + Desc: "many", + Primary: true, + RawBytes: []byte{ + Suback << 4, 6, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, // Return Code QoS 0 + 1, // Return Code QoS 1 + 2, // Return Code QoS 2 + 0x80, // Return Code fail + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Suback, + Remaining: 6, + }, + PacketID: 15, + ReasonCodes: []byte{0, 1, 2, 0x80}, + }, + }, + { + Case: TSubackDeny, + Desc: "deny mqtt5", + Primary: true, + RawBytes: []byte{ + Suback << 4, 4, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, // no properties + ErrNotAuthorized.Code, // Return Code QoS 0 + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Suback, + Remaining: 4, + }, + PacketID: 15, + ReasonCodes: []byte{ErrNotAuthorized.Code}, + }, + }, + { + Case: TSubackUnspecifiedError, + Desc: "unspecified error", + Primary: true, + RawBytes: []byte{ + Suback << 4, 3, // Fixed header + 0, 15, // Packet ID - LSB+MSB + ErrUnspecifiedError.Code, // Return Code QoS 0 + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Suback, + Remaining: 3, + }, + PacketID: 15, + ReasonCodes: []byte{ErrUnspecifiedError.Code}, + }, + }, + { + Case: TSubackUnspecifiedErrorMqtt5, + Desc: "unspecified error mqtt5", + Primary: true, + RawBytes: []byte{ + Suback << 4, 4, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, // no properties + ErrUnspecifiedError.Code, // Return Code QoS 0 + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Suback, + Remaining: 4, + }, + PacketID: 15, + ReasonCodes: []byte{ErrUnspecifiedError.Code}, + }, + }, + { + Case: TSubackMqtt5, + Desc: "mqtt5", + Primary: true, + RawBytes: []byte{ + Suback << 4, 20, // Fixed header + 0, 15, // Packet ID + 16, // Properties Length + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + CodeGrantedQos2.Code, // Return Code QoS 0 + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Suback, + Remaining: 20, + }, + PacketID: 15, + ReasonCodes: []byte{CodeGrantedQos2.Code}, + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TSubackPacketIDInUse, + Desc: "packet id in use", + Primary: true, + RawBytes: []byte{ + Suback << 4, 47, // Fixed header + 0, 15, // Packet ID + 43, // Properties Length + 31, 0, 24, 'p', 'a', 'c', 'k', 'e', 't', + ' ', 'i', 'd', 'e', 'n', 't', 'i', 'f', 'i', 'e', 'r', + ' ', 'i', 'n', + ' ', 'u', 's', 'e', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + ErrPacketIdentifierInUse.Code, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Suback, + Remaining: 47, + }, + PacketID: 15, + ReasonCodes: []byte{ErrPacketIdentifierInUse.Code}, + Properties: Properties{ + ReasonString: ErrPacketIdentifierInUse.Reason, + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + + // Fail states + { + Case: TSubackMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Suback << 4, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TSubackMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Suback << 4, 47, + 0, 15, + 43, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + + { + Case: TSubackInvalidFilter, + Desc: "malformed packet id", + Group: "reference", + RawBytes: []byte{ + Suback << 4, 4, + 0, 15, + 0, // no properties + ErrTopicFilterInvalid.Code, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + { + Case: TSubackInvalidSharedNoLocal, + Desc: "invalid shared no local", + Group: "reference", + RawBytes: []byte{ + Suback << 4, 4, + 0, 15, + 0, // no properties + ErrProtocolViolationInvalidSharedNoLocal.Code, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + + Unsubscribe: { + { + Case: TUnsubscribe, + Desc: "unsubscribe", + Primary: true, + RawBytes: []byte{ + Unsubscribe<<4 | 1<<1, 9, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Unsubscribe, + Remaining: 9, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + {Filter: "a/b/c"}, + }, + }, + }, + { + Case: TUnsubscribeMany, + Desc: "unsubscribe many", + Primary: true, + RawBytes: []byte{ + Unsubscribe<<4 | 1<<1, 27, // Fixed header + 0, 35, // Packet ID - LSB+MSB + + 0, 3, // Topic Name - LSB+MSB + 'a', '/', 'b', // Topic Name + + 0, 11, // Topic Name - LSB+MSB + 'd', '/', 'e', '/', 'f', '/', 'g', '/', 'h', '/', 'i', // Topic Name + + 0, 5, // Topic Name - LSB+MSB + 'x', '/', 'y', '/', 'z', // Topic Name + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Unsubscribe, + Remaining: 27, + Qos: 1, + }, + PacketID: 35, + Filters: Subscriptions{ + {Filter: "a/b"}, + {Filter: "d/e/f/g/h/i"}, + {Filter: "x/y/z"}, + }, + }, + }, + { + Case: TUnsubscribeMqtt5, + Desc: "mqtt5", + Primary: true, + RawBytes: []byte{ + Unsubscribe<<4 | 1<<1, 31, // Fixed header + 0, 15, // Packet ID - LSB+MSB + + 16, + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + + 0, 3, // Topic Name - LSB+MSB + 'a', '/', 'b', + + 0, 5, // Topic Name - LSB+MSB + 'x', '/', 'y', '/', 'w', + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Unsubscribe, + Remaining: 31, + Qos: 1, + }, + PacketID: 15, + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + Filters: Subscriptions{ + {Filter: "a/b"}, + {Filter: "x/y/w"}, + }, + }, + }, + + // Fail states + { + Case: TUnsubscribeMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Unsubscribe << 4, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TUnsubscribeMalTopicName, + Desc: "malformed topic", + Group: "decode", + FailFirst: ErrMalformedTopic, + RawBytes: []byte{ + Unsubscribe << 4, 2, // Fixed header + 0, 21, // Packet ID - LSB+MSB + 0, 3, // Topic Name - LSB+MSB + 'a', '/', + }, + }, + { + Case: TUnsubscribeMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Unsubscribe<<4 | 1<<1, 31, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 16, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + + { + Case: TUnsubscribeInvalidQosMustPacketID, + Desc: "no packet id with qos > 0", + Group: "validate", + Expect: ErrProtocolViolationNoPacketID, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Unsubscribe, + Qos: 1, + }, + PacketID: 0, + Filters: Subscriptions{ + Subscription{Filter: "a/b"}, + }, + }, + }, + { + Case: TUnsubscribeInvalidNoFilters, + Desc: "no filters", + Group: "validate", + Expect: ErrProtocolViolationNoFilters, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Unsubscribe, + Qos: 1, + }, + PacketID: 2, + }, + }, + + { + Case: TUnsubscribeSpecQosMustPacketID, + Desc: "no packet id with qos > 0", + Group: "encode", + Expect: ErrProtocolViolationNoPacketID, + RawBytes: []byte{ + Unsubscribe<<4 | 1<<1, 9, // Fixed header + 0, 0, // Packet ID - LSB+MSB + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Unsubscribe, + Qos: 1, + Remaining: 9, + }, + PacketID: 0, + Filters: Subscriptions{ + {Filter: "a/b/c"}, + }, + }, + }, + }, + Unsuback: { + { + Case: TUnsuback, + Desc: "unsuback", + Primary: true, + RawBytes: []byte{ + Unsuback << 4, 2, // Fixed header + 0, 15, // Packet ID - LSB+MSB + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Unsuback, + Remaining: 2, + }, + PacketID: 15, + }, + }, + { + Case: TUnsubackMany, + Desc: "unsuback many", + Primary: true, + RawBytes: []byte{ + Unsuback << 4, 5, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, + CodeSuccess.Code, CodeSuccess.Code, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Unsuback, + Remaining: 5, + }, + PacketID: 15, + ReasonCodes: []byte{CodeSuccess.Code, CodeSuccess.Code}, + }, + }, + { + Case: TUnsubackMqtt5, + Desc: "mqtt5", + Primary: true, + RawBytes: []byte{ + Unsuback << 4, 21, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 16, // Properties Length + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + CodeSuccess.Code, CodeNoSubscriptionExisted.Code, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Unsuback, + Remaining: 21, + }, + PacketID: 15, + ReasonCodes: []byte{CodeSuccess.Code, CodeNoSubscriptionExisted.Code}, + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TUnsubackPacketIDInUse, + Desc: "packet id in use", + Primary: true, + RawBytes: []byte{ + Unsuback << 4, 48, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 43, // Properties Length + 31, 0, 24, 'p', 'a', 'c', 'k', 'e', 't', + ' ', 'i', 'd', 'e', 'n', 't', 'i', 'f', 'i', 'e', 'r', + ' ', 'i', 'n', + ' ', 'u', 's', 'e', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + ErrPacketIdentifierInUse.Code, ErrPacketIdentifierInUse.Code, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Unsuback, + Remaining: 48, + }, + PacketID: 15, + ReasonCodes: []byte{ErrPacketIdentifierInUse.Code, ErrPacketIdentifierInUse.Code}, + Properties: Properties{ + ReasonString: ErrPacketIdentifierInUse.Reason, + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + + // Fail states + { + Case: TUnsubackMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Unsuback << 4, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TUnsubackMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Unsuback << 4, 48, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 43, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + + Pingreq: { + { + Case: TPingreq, + Desc: "ping request", + Primary: true, + RawBytes: []byte{ + Pingreq << 4, 0, // fixed header + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pingreq, + Remaining: 0, + }, + }, + }, + }, + Pingresp: { + { + Case: TPingresp, + Desc: "ping response", + Primary: true, + RawBytes: []byte{ + Pingresp << 4, 0, // fixed header + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pingresp, + Remaining: 0, + }, + }, + }, + }, + + Disconnect: { + { + Case: TDisconnect, + Desc: "disconnect", + Primary: true, + RawBytes: []byte{ + Disconnect << 4, 0, // fixed header + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 0, + }, + }, + }, + { + Case: TDisconnectTakeover, + Desc: "takeover", + Primary: true, + RawBytes: append([]byte{ + Disconnect << 4, 21, // fixed header + ErrSessionTakenOver.Code, // Reason Code + 19, // Properties Length + 31, 0, 16, // Reason String (31) + }, []byte(ErrSessionTakenOver.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 0, + }, + ReasonCode: ErrSessionTakenOver.Code, + Properties: Properties{ + ReasonString: ErrSessionTakenOver.Reason, + }, + }, + }, + { + Case: TDisconnectShuttingDown, + Desc: "shutting down", + Primary: true, + RawBytes: append([]byte{ + Disconnect << 4, 25, // fixed header + ErrServerShuttingDown.Code, // Reason Code + 23, // Properties Length + 31, 0, 20, // Reason String (31) + }, []byte(ErrServerShuttingDown.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 0, + }, + ReasonCode: ErrServerShuttingDown.Code, + Properties: Properties{ + ReasonString: ErrServerShuttingDown.Reason, + }, + }, + }, + { + Case: TDisconnectMqtt5, + Desc: "mqtt5", + Primary: true, + RawBytes: append([]byte{ + Disconnect << 4, 22, // fixed header + CodeDisconnect.Code, // Reason Code + 20, // Properties Length + 17, 0, 0, 0, 120, // Session Expiry Interval (17) + 31, 0, 12, // Reason String (31) + }, []byte(CodeDisconnect.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 22, + }, + ReasonCode: CodeDisconnect.Code, + Properties: Properties{ + ReasonString: CodeDisconnect.Reason, + SessionExpiryInterval: 120, + SessionExpiryIntervalFlag: true, + }, + }, + }, + { + Case: TDisconnectSecondConnect, + Desc: "second connect packet mqtt5", + RawBytes: append([]byte{ + Disconnect << 4, 46, // fixed header + ErrProtocolViolationSecondConnect.Code, + 44, + 31, 0, 41, // Reason String (31) + }, []byte(ErrProtocolViolationSecondConnect.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 45, + }, + ReasonCode: ErrProtocolViolationSecondConnect.Code, + Properties: Properties{ + ReasonString: ErrProtocolViolationSecondConnect.Reason, + }, + }, + }, + { + Case: TDisconnectZeroNonZeroExpiry, + Desc: "zero non zero expiry", + RawBytes: []byte{ + Disconnect << 4, 2, // fixed header + ErrProtocolViolationZeroNonZeroExpiry.Code, + 0, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 2, + }, + ReasonCode: ErrProtocolViolationZeroNonZeroExpiry.Code, + }, + }, + { + Case: TDisconnectReceiveMaximum, + Desc: "receive maximum mqtt5", + RawBytes: append([]byte{ + Disconnect << 4, 29, // fixed header + ErrReceiveMaximum.Code, + 27, // Properties Length + 31, 0, 24, // Reason String (31) + }, []byte(ErrReceiveMaximum.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 29, + }, + ReasonCode: ErrReceiveMaximum.Code, + Properties: Properties{ + ReasonString: ErrReceiveMaximum.Reason, + }, + }, + }, + { + Case: TDisconnectDropProperties, + Desc: "drop oversize properties partial", + Group: "encode", + RawBytes: []byte{ + Disconnect << 4, 39, // fixed header + CodeDisconnect.Code, + 19, // length + 28, 0, 7, 'm', 'o', 'c', 'h', 'i', '-', '2', // Server Reference (28) + 31, 0, 6, 'r', 'e', 'a', 's', 'o', 'n', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + ActualBytes: []byte{ + Disconnect << 4, 12, // fixed header + CodeDisconnect.Code, + 10, // length + 28, 0, 7, 'm', 'o', 'c', 'h', 'i', '-', '2', // Server Reference (28) + }, + Packet: &Packet{ + Mods: Mods{ + MaxSize: 3, + }, + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 40, + }, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + ReasonString: "reason", + ServerReference: "mochi-2", + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + // fail states + { + Case: TDisconnectMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Disconnect << 4, 48, // fixed header + CodeDisconnect.Code, // Reason Code + 46, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + { + Case: TDisconnectMalReasonCode, + Desc: "malformed reason code", + Group: "decode", + FailFirst: ErrMalformedReasonCode, + RawBytes: []byte{ + Disconnect << 4, 48, // fixed header + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + Auth: { + { + Case: TAuth, + Desc: "auth", + Primary: true, + RawBytes: []byte{ + Auth << 4, 47, + CodeSuccess.Code, // reason code + 45, + 21, 0, 5, 'S', 'H', 'A', '-', '1', // Authentication Method (21) + 22, 0, 9, 'a', 'u', 't', 'h', '-', 'd', 'a', 't', 'a', // Authentication Data (22) + 31, 0, 6, 'r', 'e', 'a', 's', 'o', 'n', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Auth, + Remaining: 47, + }, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + AuthenticationMethod: "SHA-1", + AuthenticationData: []byte("auth-data"), + ReasonString: "reason", + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TAuthMalReasonCode, + Desc: "malformed reason code", + Group: "decode", + FailFirst: ErrMalformedReasonCode, + RawBytes: []byte{ + Auth << 4, 47, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Auth, + }, + ReasonCode: CodeNoMatchingSubscribers.Code, + }, + }, + // fail states + { + Case: TAuthMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Auth << 4, 3, + CodeSuccess.Code, + 12, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + // Validation + { + Case: TAuthInvalidReason, + Desc: "invalid reason code", + Group: "validate", + Expect: ErrProtocolViolationInvalidReason, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Auth, + }, + ReasonCode: CodeNoMatchingSubscribers.Code, + }, + }, + { + Case: TAuthInvalidReason2, + Desc: "invalid reason code", + Group: "validate", + Expect: ErrProtocolViolationInvalidReason, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Auth, + }, + ReasonCode: CodeNoMatchingSubscribers.Code, + }, + }, + }, +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/server.go b/vendor/github.com/mochi-mqtt/server/v2/server.go new file mode 100644 index 000000000..0c50b6aa9 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/server.go @@ -0,0 +1,1533 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +// package mqtt provides a high performance, fully compliant MQTT v5 broker server with v3.1.1 backward compatibility. +package mqtt + +import ( + "errors" + "fmt" + "math" + "net" + "os" + "runtime" + "sort" + "strconv" + "sync/atomic" + "time" + + "github.com/mochi-mqtt/server/v2/hooks/storage" + "github.com/mochi-mqtt/server/v2/listeners" + "github.com/mochi-mqtt/server/v2/packets" + "github.com/mochi-mqtt/server/v2/system" + + "github.com/rs/zerolog" +) + +const ( + Version = "2.3.0" // the current server version. + defaultSysTopicInterval int64 = 1 // the interval between $SYS topic publishes +) + +var ( + // DefaultServerCapabilities defines the default features and capabilities provided by the server. + DefaultServerCapabilities = &Capabilities{ + MaximumSessionExpiryInterval: math.MaxUint32, // maximum number of seconds to keep disconnected sessions + MaximumMessageExpiryInterval: 60 * 60 * 24, // maximum message expiry if message expiry is 0 or over + ReceiveMaximum: 1024, // maximum number of concurrent qos messages per client + MaximumQos: 2, // maxmimum qos value available to clients + RetainAvailable: 1, // retain messages is available + MaximumPacketSize: 0, // no maximum packet size + TopicAliasMaximum: math.MaxUint16, // maximum topic alias value + WildcardSubAvailable: 1, // wildcard subscriptions are available + SubIDAvailable: 1, // subscription identifiers are available + SharedSubAvailable: 1, // shared subscriptions are available + MinimumProtocolVersion: 3, // minimum supported mqtt version (3.0.0) + MaximumClientWritesPending: 1024 * 8, // maximum number of pending message writes for a client + } + + ErrListenerIDExists = errors.New("listener id already exists") // a listener with the same id already exists. + ErrConnectionClosed = errors.New("connection not open") // connection is closed +) + +// Capabilities indicates the capabilities and features provided by the server. +type Capabilities struct { + MaximumMessageExpiryInterval int64 + MaximumClientWritesPending int32 + MaximumSessionExpiryInterval uint32 + MaximumPacketSize uint32 + maximumPacketID uint32 // unexported, used for testing only + ReceiveMaximum uint16 + TopicAliasMaximum uint16 + SharedSubAvailable byte + MinimumProtocolVersion byte + Compatibilities Compatibilities + MaximumQos byte + RetainAvailable byte + WildcardSubAvailable byte + SubIDAvailable byte +} + +// Compatibilities provides flags for using compatibility modes. +type Compatibilities struct { + ObscureNotAuthorized bool // return unspecified errors instead of not authorized + PassiveClientDisconnect bool // don't disconnect the client forcefully after sending disconnect packet (paho - spec violation) + AlwaysReturnResponseInfo bool // always return response info (useful for testing) + RestoreSysInfoOnRestart bool // restore system info from store as if server never stopped + NoInheritedPropertiesOnAck bool // don't allow inherited user properties on ack (paho - spec violation) +} + +// Options contains configurable options for the server. +type Options struct { + // Capabilities defines the server features and behaviour. If you only wish to modify + // several of these values, set them explicitly - e.g. + // server.Options.Capabilities.MaximumClientWritesPending = 16 * 1024 + Capabilities *Capabilities + + // ClientNetWriteBufferSize specifies the size of the client *bufio.Writer write buffer. + ClientNetWriteBufferSize int + + // ClientNetReadBufferSize specifies the size of the client *bufio.Reader read buffer. + ClientNetReadBufferSize int + + // Logger specifies a custom configured implementation of zerolog to override + // the servers default logger configuration. If you wish to change the log level, + // of the default logger, you can do so by setting + // server := mqtt.New(nil) + // l := server.Log.Level(zerolog.DebugLevel) + // server.Log = &l + Logger *zerolog.Logger + + // SysTopicResendInterval specifies the interval between $SYS topic updates in seconds. + SysTopicResendInterval int64 +} + +// Server is an MQTT broker server. It should be created with server.New() +// in order to ensure all the internal fields are correctly populated. +type Server struct { + Options *Options // configurable server options + Listeners *listeners.Listeners // listeners are network interfaces which listen for new connections + Clients *Clients // clients known to the broker + Topics *TopicsIndex // an index of topic filter subscriptions and retained messages + Info *system.Info // values about the server commonly known as $SYS topics + loop *loop // loop contains tickers for the system event loop + done chan bool // indicate that the server is ending + Log *zerolog.Logger // minimal no-alloc logger + hooks *Hooks // hooks contains hooks for extra functionality such as auth and persistent storage. +} + +// loop contains interval tickers for the system events loop. +type loop struct { + sysTopics *time.Ticker // interval ticker for sending updating $SYS topics + clientExpiry *time.Ticker // interval ticker for cleaning expired clients + inflightExpiry *time.Ticker // interval ticker for cleaning up expired inflight messages + retainedExpiry *time.Ticker // interval ticker for cleaning retained messages + willDelaySend *time.Ticker // interval ticker for sending will messages with a delay + willDelayed *packets.Packets // activate LWT packets which will be sent after a delay +} + +// ops contains server values which can be propagated to other structs. +type ops struct { + options *Options // a pointer to the server options and capabilities, for referencing in clients + info *system.Info // pointers to server system info + hooks *Hooks // pointer to the server hooks + log *zerolog.Logger // a structured logger for the client +} + +// New returns a new instance of mochi mqtt broker. Optional parameters +// can be specified to override some default settings (see Options). +func New(opts *Options) *Server { + if opts == nil { + opts = new(Options) + } + + opts.ensureDefaults() + + s := &Server{ + done: make(chan bool), + Clients: NewClients(), + Topics: NewTopicsIndex(), + Listeners: listeners.New(), + loop: &loop{ + sysTopics: time.NewTicker(time.Second * time.Duration(opts.SysTopicResendInterval)), + clientExpiry: time.NewTicker(time.Second), + inflightExpiry: time.NewTicker(time.Second), + retainedExpiry: time.NewTicker(time.Second), + willDelaySend: time.NewTicker(time.Second), + willDelayed: packets.NewPackets(), + }, + Options: opts, + Info: &system.Info{ + Version: Version, + Started: time.Now().Unix(), + }, + Log: opts.Logger, + hooks: &Hooks{ + Log: opts.Logger, + }, + } + + return s +} + +// ensureDefaults ensures that the server starts with sane default values, if none are provided. +func (o *Options) ensureDefaults() { + if o.Capabilities == nil { + o.Capabilities = DefaultServerCapabilities + } + + o.Capabilities.maximumPacketID = math.MaxUint16 // spec maximum is 65535 + + if o.SysTopicResendInterval == 0 { + o.SysTopicResendInterval = defaultSysTopicInterval + } + + if o.ClientNetWriteBufferSize == 0 { + o.ClientNetWriteBufferSize = 1024 * 2 + } + + if o.ClientNetReadBufferSize == 0 { + o.ClientNetReadBufferSize = 1024 * 2 + } + + if o.Logger == nil { + log := zerolog.New(os.Stderr).With().Timestamp().Logger().Level(zerolog.InfoLevel).Output(zerolog.ConsoleWriter{Out: os.Stderr}) + o.Logger = &log + } +} + +// NewClient returns a new Client instance, populated with all the required values and +// references to be used with the server. If you are using this client to directly publish +// messages from the embedding application, set the inline flag to true to bypass ACL and +// topic validation checks. +func (s *Server) NewClient(c net.Conn, listener string, id string, inline bool) *Client { + cl := newClient(c, &ops{ // [MQTT-3.1.2-6] implicit + options: s.Options, + info: s.Info, + hooks: s.hooks, + log: s.Log, + }) + + cl.ID = id + cl.Net.Listener = listener + + if inline { // inline clients bypass acl and some validity checks. + cl.Net.Inline = true + // By default, we don't want to restrict developer publishes, + // but if you do, reset this after creating inline client. + cl.State.Inflight.ResetReceiveQuota(math.MaxInt32) + } else { + go cl.WriteLoop() // can only write to real clients + } + + return cl +} + +// AddHook attaches a new Hook to the server. Ideally, this should be called +// before the server is started with s.Serve(). +func (s *Server) AddHook(hook Hook, config any) error { + nl := s.Log.With().Str("hook", hook.ID()).Logger() + hook.SetOpts(&nl, &HookOptions{ + Capabilities: s.Options.Capabilities, + }) + + s.Log.Info().Str("hook", hook.ID()).Msg("added hook") + return s.hooks.Add(hook, config) +} + +// AddListener adds a new network listener to the server, for receiving incoming client connections. +func (s *Server) AddListener(l listeners.Listener) error { + if _, ok := s.Listeners.Get(l.ID()); ok { + return ErrListenerIDExists + } + + nl := s.Log.With().Str("listener", l.ID()).Logger() + err := l.Init(&nl) + if err != nil { + return err + } + + s.Listeners.Add(l) + + s.Log.Info().Str("id", l.ID()).Str("protocol", l.Protocol()).Str("address", l.Address()).Msg("attached listener") + return nil +} + +// Serve starts the event loops responsible for establishing client connections +// on all attached listeners, publishing the system topics, and starting all hooks. +func (s *Server) Serve() error { + s.Log.Info().Str("version", Version).Msg("mochi mqtt starting") + defer s.Log.Info().Msg("mochi mqtt server started") + + if s.hooks.Provides( + StoredClients, + StoredInflightMessages, + StoredRetainedMessages, + StoredSubscriptions, + StoredSysInfo, + ) { + err := s.readStore() + if err != nil { + return err + } + } + + go s.eventLoop() // spin up event loop for issuing $SYS values and closing server. + s.Listeners.ServeAll(s.EstablishConnection) // start listening on all listeners. + s.publishSysTopics() // begin publishing $SYS system values. + s.hooks.OnStarted() + + return nil +} + +// eventLoop loops forever, running various server housekeeping methods at different intervals. +func (s *Server) eventLoop() { + s.Log.Debug().Msg("system event loop started") + defer s.Log.Debug().Msg("system event loop halted") + + for { + select { + case <-s.done: + s.loop.sysTopics.Stop() + return + case <-s.loop.sysTopics.C: + s.publishSysTopics() + case <-s.loop.clientExpiry.C: + s.clearExpiredClients(time.Now().Unix()) + case <-s.loop.retainedExpiry.C: + s.clearExpiredRetainedMessages(time.Now().Unix()) + case <-s.loop.willDelaySend.C: + s.sendDelayedLWT(time.Now().Unix()) + case <-s.loop.inflightExpiry.C: + s.clearExpiredInflights(time.Now().Unix()) + } + } +} + +// EstablishConnection establishes a new client when a listener accepts a new connection. +func (s *Server) EstablishConnection(listener string, c net.Conn) error { + cl := s.NewClient(c, listener, "", false) + return s.attachClient(cl, listener) +} + +// attachClient validates an incoming client connection and if viable, attaches the client +// to the server, performs session housekeeping, and reads incoming packets. +func (s *Server) attachClient(cl *Client, listener string) error { + defer cl.Stop(nil) + pk, err := s.readConnectionPacket(cl) + if err != nil { + return fmt.Errorf("read connection: %w", err) + } + + cl.ParseConnect(listener, pk) + code := s.validateConnect(cl, pk) // [MQTT-3.1.4-1] [MQTT-3.1.4-2] + if code != packets.CodeSuccess { + if err := s.SendConnack(cl, code, false, nil); err != nil { + return fmt.Errorf("invalid connection send ack: %w", err) + } + return code // [MQTT-3.2.2-7] [MQTT-3.1.4-6] + } + + err = s.hooks.OnConnect(cl, pk) + if err != nil { + return err + } + + cl.refreshDeadline(cl.State.Keepalive) + if !s.hooks.OnConnectAuthenticate(cl, pk) { // [MQTT-3.1.4-2] + err := s.SendConnack(cl, packets.ErrBadUsernameOrPassword, false, nil) + if err != nil { + return fmt.Errorf("invalid connection send ack: %w", err) + } + + return packets.ErrBadUsernameOrPassword + } + + atomic.AddInt64(&s.Info.ClientsConnected, 1) + defer atomic.AddInt64(&s.Info.ClientsConnected, -1) + + s.hooks.OnSessionEstablish(cl, pk) + + sessionPresent := s.inheritClientSession(pk, cl) + s.Clients.Add(cl) // [MQTT-4.1.0-1] + + err = s.SendConnack(cl, code, sessionPresent, nil) // [MQTT-3.1.4-5] [MQTT-3.2.0-1] [MQTT-3.2.0-2] &[MQTT-3.14.0-1] + if err != nil { + return fmt.Errorf("ack connection packet: %w", err) + } + + s.loop.willDelayed.Delete(cl.ID) // [MQTT-3.1.3-9] + + if sessionPresent { + err = cl.ResendInflightMessages(true) + if err != nil { + return fmt.Errorf("resend inflight: %w", err) + } + } + + s.hooks.OnSessionEstablished(cl, pk) + + err = cl.Read(s.receivePacket) + if err != nil { + s.sendLWT(cl) + cl.Stop(err) + } else { + cl.Properties.Will = Will{} // [MQTT-3.14.4-3] [MQTT-3.1.2-10] + } + + s.Log.Debug().Str("client", cl.ID).Err(err).Str("remote", cl.Net.Remote).Str("listener", listener).Msg("client disconnected") + expire := (cl.Properties.ProtocolVersion == 5 && cl.Properties.Props.SessionExpiryInterval == 0) || (cl.Properties.ProtocolVersion < 5 && cl.Properties.Clean) + s.hooks.OnDisconnect(cl, err, expire) + + if expire && atomic.LoadUint32(&cl.State.isTakenOver) == 0 { + cl.ClearInflights(math.MaxInt64, 0) + s.UnsubscribeClient(cl) + s.Clients.Delete(cl.ID) // [MQTT-4.1.0-2] ![MQTT-3.1.2-23] + } + + return err +} + +// readConnectionPacket reads the first incoming header for a connection, and if +// acceptable, returns the valid connection packet. +func (s *Server) readConnectionPacket(cl *Client) (pk packets.Packet, err error) { + fh := new(packets.FixedHeader) + err = cl.ReadFixedHeader(fh) + if err != nil { + return + } + + if fh.Type != packets.Connect { + return pk, packets.ErrProtocolViolationRequireFirstConnect // [MQTT-3.1.0-1] + } + + pk, err = cl.ReadPacket(fh) + if err != nil { + return + } + + return +} + +// receivePacket processes an incoming packet for a client, and issues a disconnect to the client +// if an error has occurred (if mqtt v5). +func (s *Server) receivePacket(cl *Client, pk packets.Packet) error { + err := s.processPacket(cl, pk) + if err != nil { + if code, ok := err.(packets.Code); ok && + cl.Properties.ProtocolVersion == 5 && + code.Code >= packets.ErrUnspecifiedError.Code { + s.DisconnectClient(cl, code) + } + + s.Log.Warn().Err(err).Str("client", cl.ID).Str("listener", cl.Net.Listener).Interface("pk", pk).Msg("error processing packet") + + return err + } + + return nil +} + +// validateConnect validates that a connect packet is compliant. +func (s *Server) validateConnect(cl *Client, pk packets.Packet) packets.Code { + code := pk.ConnectValidate() // [MQTT-3.1.4-1] [MQTT-3.1.4-2] + if code != packets.CodeSuccess { + return code + } + + if cl.Properties.ProtocolVersion < 5 && !pk.Connect.Clean && pk.Connect.ClientIdentifier == "" { + return packets.ErrUnspecifiedError + } + + if cl.Properties.ProtocolVersion < s.Options.Capabilities.MinimumProtocolVersion { + return packets.ErrUnsupportedProtocolVersion // [MQTT-3.1.2-2] + } else if cl.Properties.Will.Qos > s.Options.Capabilities.MaximumQos { + return packets.ErrQosNotSupported // [MQTT-3.2.2-12] + } else if cl.Properties.Will.Retain && s.Options.Capabilities.RetainAvailable == 0x00 { + return packets.ErrRetainNotSupported // [MQTT-3.2.2-13] + } + + return code +} + +// inheritClientSession inherits the state of an existing client sharing the same +// connection ID. If clean is true, the state of any previously existing client +// session is abandoned. +func (s *Server) inheritClientSession(pk packets.Packet, cl *Client) bool { + if existing, ok := s.Clients.Get(pk.Connect.ClientIdentifier); ok { + s.DisconnectClient(existing, packets.ErrSessionTakenOver) // [MQTT-3.1.4-3] + if pk.Connect.Clean || (existing.Properties.Clean && existing.Properties.ProtocolVersion < 5) { // [MQTT-3.1.2-4] [MQTT-3.1.4-4] + s.UnsubscribeClient(existing) + existing.ClearInflights(math.MaxInt64, 0) + atomic.StoreUint32(&existing.State.isTakenOver, 1) // only set isTakenOver after unsubscribe has occurred + return false // [MQTT-3.2.2-3] + } + + atomic.StoreUint32(&existing.State.isTakenOver, 1) + if existing.State.Inflight.Len() > 0 { + cl.State.Inflight = existing.State.Inflight.Clone() // [MQTT-3.1.2-5] + if cl.State.Inflight.maximumReceiveQuota == 0 && cl.ops.options.Capabilities.ReceiveMaximum != 0 { + cl.State.Inflight.ResetReceiveQuota(int32(cl.ops.options.Capabilities.ReceiveMaximum)) // server receive max per client + cl.State.Inflight.ResetSendQuota(int32(cl.Properties.Props.ReceiveMaximum)) // client receive max + } + } + + for _, sub := range existing.State.Subscriptions.GetAll() { + existed := !s.Topics.Subscribe(cl.ID, sub) // [MQTT-3.8.4-3] + if !existed { + atomic.AddInt64(&s.Info.Subscriptions, 1) + } + cl.State.Subscriptions.Add(sub.Filter, sub) + } + + // Clean the state of the existing client to prevent sequential take-overs + // from increasing memory usage by inflights + subs * client-id. + s.UnsubscribeClient(existing) + existing.ClearInflights(math.MaxInt64, 0) + s.Log.Debug().Str("client", cl.ID). + Str("old_remote", existing.Net.Remote). + Str("new_remote", cl.Net.Remote). + Msg("session taken over") + + return true // [MQTT-3.2.2-3] + } + + if atomic.LoadInt64(&s.Info.ClientsConnected) > atomic.LoadInt64(&s.Info.ClientsMaximum) { + atomic.AddInt64(&s.Info.ClientsMaximum, 1) + } + + return false // [MQTT-3.2.2-2] +} + +// SendConnack returns a Connack packet to a client. +func (s *Server) SendConnack(cl *Client, reason packets.Code, present bool, properties *packets.Properties) error { + if properties == nil { + properties = &packets.Properties{ + ReceiveMaximum: s.Options.Capabilities.ReceiveMaximum, + } + } + + properties.ReceiveMaximum = s.Options.Capabilities.ReceiveMaximum // 3.2.2.3.3 Receive Maximum + if cl.State.ServerKeepalive { // You can set this dynamically using the OnConnect hook. + properties.ServerKeepAlive = cl.State.Keepalive // [MQTT-3.1.2-21] + properties.ServerKeepAliveFlag = true + } + + if reason.Code >= packets.ErrUnspecifiedError.Code { + if cl.Properties.ProtocolVersion < 5 { + if v3reason, ok := packets.V5CodesToV3[reason]; ok { // NB v3 3.2.2.3 Connack return codes + reason = v3reason + } + } + + properties.ReasonString = reason.Reason + ack := packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Connack, + }, + SessionPresent: false, // [MQTT-3.2.2-6] + ReasonCode: reason.Code, // [MQTT-3.2.2-8] + Properties: *properties, + } + return cl.WritePacket(ack) + } + + if s.Options.Capabilities.MaximumQos < 2 { + properties.MaximumQos = s.Options.Capabilities.MaximumQos // [MQTT-3.2.2-9] + properties.MaximumQosFlag = true + } + + if cl.Properties.Props.AssignedClientID != "" { + properties.AssignedClientID = cl.Properties.Props.AssignedClientID // [MQTT-3.1.3-7] [MQTT-3.2.2-16] + } + + if cl.Properties.Props.SessionExpiryInterval > s.Options.Capabilities.MaximumSessionExpiryInterval { + properties.SessionExpiryInterval = s.Options.Capabilities.MaximumSessionExpiryInterval + properties.SessionExpiryIntervalFlag = true + cl.Properties.Props.SessionExpiryInterval = properties.SessionExpiryInterval + cl.Properties.Props.SessionExpiryIntervalFlag = true + } + + ack := packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Connack, + }, + SessionPresent: present, + ReasonCode: reason.Code, // [MQTT-3.2.2-8] + Properties: *properties, + } + return cl.WritePacket(ack) +} + +// processPacket processes an inbound packet for a client. Since the method is +// typically called as a goroutine, errors are primarily for test checking purposes. +func (s *Server) processPacket(cl *Client, pk packets.Packet) error { + var err error + + switch pk.FixedHeader.Type { + case packets.Connect: + err = s.processConnect(cl, pk) + case packets.Disconnect: + err = s.processDisconnect(cl, pk) + case packets.Pingreq: + err = s.processPingreq(cl, pk) + case packets.Publish: + code := pk.PublishValidate(s.Options.Capabilities.TopicAliasMaximum) + if code != packets.CodeSuccess { + return code + } + err = s.processPublish(cl, pk) + case packets.Puback: + err = s.processPuback(cl, pk) + case packets.Pubrec: + err = s.processPubrec(cl, pk) + case packets.Pubrel: + err = s.processPubrel(cl, pk) + case packets.Pubcomp: + err = s.processPubcomp(cl, pk) + case packets.Subscribe: + code := pk.SubscribeValidate() + if code != packets.CodeSuccess { + return code + } + err = s.processSubscribe(cl, pk) + case packets.Unsubscribe: + code := pk.UnsubscribeValidate() + if code != packets.CodeSuccess { + return code + } + err = s.processUnsubscribe(cl, pk) + case packets.Auth: + code := pk.AuthValidate() + if code != packets.CodeSuccess { + return code + } + err = s.processAuth(cl, pk) + default: + return fmt.Errorf("no valid packet available; %v", pk.FixedHeader.Type) + } + + s.hooks.OnPacketProcessed(cl, pk, err) + if err != nil { + return err + } + + if cl.State.Inflight.Len() > 0 && atomic.LoadInt32(&cl.State.Inflight.sendQuota) > 0 { + next, ok := cl.State.Inflight.NextImmediate() + if ok { + _ = cl.WritePacket(next) + if ok := cl.State.Inflight.Delete(next.PacketID); ok { + atomic.AddInt64(&s.Info.Inflight, -1) + } + cl.State.Inflight.DecreaseSendQuota() + } + } + + return nil +} + +// processConnect processes a Connect packet. The packet cannot be used to establish +// a new connection on an existing connection. See EstablishConnection instead. +func (s *Server) processConnect(cl *Client, _ packets.Packet) error { + s.sendLWT(cl) + return packets.ErrProtocolViolationSecondConnect // [MQTT-3.1.0-2] +} + +// processPingreq processes a Pingreq packet. +func (s *Server) processPingreq(cl *Client, _ packets.Packet) error { + return cl.WritePacket(packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Pingresp, // [MQTT-3.12.4-1] + }, + }) +} + +// Publish publishes a publish packet into the broker as if it were sent from the speicfied client. +// This is a convenience function which wraps InjectPacket. As such, this method can publish packets +// to any topic (including $SYS) and bypass ACL checks. The qos byte is used for limiting the +// outbound qos (mqtt v5) rather than issuing to the broker (we assume qos 2 complete). +func (s *Server) Publish(topic string, payload []byte, retain bool, qos byte) error { + cl := s.NewClient(nil, "local", "inline", true) + return s.InjectPacket(cl, packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Publish, + Qos: qos, + Retain: retain, + }, + TopicName: topic, + Payload: payload, + PacketID: uint16(qos), // we never process the inbound qos, but we need a packet id for validity checks. + }) +} + +// InjectPacket injects a packet into the broker as if it were sent from the specified client. +// InlineClients using this method can publish packets to any topic (including $SYS) and bypass ACL checks. +func (s *Server) InjectPacket(cl *Client, pk packets.Packet) error { + pk.ProtocolVersion = cl.Properties.ProtocolVersion + + err := s.processPacket(cl, pk) + if err != nil { + return err + } + + atomic.AddInt64(&cl.ops.info.PacketsReceived, 1) + if pk.FixedHeader.Type == packets.Publish { + atomic.AddInt64(&cl.ops.info.MessagesReceived, 1) + } + + return nil +} + +// processPublish processes a Publish packet. +func (s *Server) processPublish(cl *Client, pk packets.Packet) error { + if !cl.Net.Inline && !IsValidFilter(pk.TopicName, true) { + return nil + } + + if atomic.LoadInt32(&cl.State.Inflight.receiveQuota) == 0 { + return s.DisconnectClient(cl, packets.ErrReceiveMaximum) // ~[MQTT-3.3.4-7] ~[MQTT-3.3.4-8] + } + + if !cl.Net.Inline && !s.hooks.OnACLCheck(cl, pk.TopicName, true) { + return nil + } + + pk.Origin = cl.ID + pk.Created = time.Now().Unix() + + if !cl.Net.Inline { + if pki, ok := cl.State.Inflight.Get(pk.PacketID); ok { + if pki.FixedHeader.Type == packets.Pubrec { // [MQTT-4.3.3-10] + ack := s.buildAck(pk.PacketID, packets.Pubrec, 0, pk.Properties, packets.ErrPacketIdentifierInUse) + return cl.WritePacket(ack) + } + if ok := cl.State.Inflight.Delete(pk.PacketID); ok { // [MQTT-4.3.2-5] + atomic.AddInt64(&s.Info.Inflight, -1) + } + } + } + + if pk.Properties.TopicAliasFlag && pk.Properties.TopicAlias > 0 { // [MQTT-3.3.2-11] + pk.TopicName = cl.State.TopicAliases.Inbound.Set(pk.Properties.TopicAlias, pk.TopicName) + } + + if pk.FixedHeader.Qos > s.Options.Capabilities.MaximumQos { + pk.FixedHeader.Qos = s.Options.Capabilities.MaximumQos // [MQTT-3.2.2-9] Reduce Qos based on server max qos capability + } + + pkx, err := s.hooks.OnPublish(cl, pk) + if err == nil { + pk = pkx + } else if errors.Is(err, packets.ErrRejectPacket) { + return nil + } else if errors.Is(err, packets.CodeSuccessIgnore) { + pk.Ignore = true + } else if cl.Properties.ProtocolVersion == 5 && pk.FixedHeader.Qos > 0 && errors.As(err, new(packets.Code)) { + err = cl.WritePacket(s.buildAck(pk.PacketID, packets.Puback, 0, pk.Properties, err.(packets.Code))) + if err != nil { + return err + } + return nil + } + + if pk.FixedHeader.Retain { // [MQTT-3.3.1-5] ![MQTT-3.3.1-8] + s.retainMessage(cl, pk) + } + + if pk.FixedHeader.Qos == 0 { + s.publishToSubscribers(pk) + s.hooks.OnPublished(cl, pk) + return nil + } + + cl.State.Inflight.DecreaseReceiveQuota() + ack := s.buildAck(pk.PacketID, packets.Puback, 0, pk.Properties, packets.QosCodes[pk.FixedHeader.Qos]) // [MQTT-4.3.2-4] + if pk.FixedHeader.Qos == 2 { + ack = s.buildAck(pk.PacketID, packets.Pubrec, 0, pk.Properties, packets.CodeSuccess) // [MQTT-3.3.4-1] [MQTT-4.3.3-8] + } + + if ok := cl.State.Inflight.Set(ack); ok { + atomic.AddInt64(&s.Info.Inflight, 1) + s.hooks.OnQosPublish(cl, ack, ack.Created, 0) + } + + err = cl.WritePacket(ack) + if err != nil { + return err + } + + if pk.FixedHeader.Qos == 1 { + if ok := cl.State.Inflight.Delete(ack.PacketID); ok { + atomic.AddInt64(&s.Info.Inflight, -1) + } + cl.State.Inflight.IncreaseReceiveQuota() + s.hooks.OnQosComplete(cl, ack) + } + + s.publishToSubscribers(pk) + s.hooks.OnPublished(cl, pk) + + return nil +} + +// retainMessage adds a message to a topic, and if a persistent store is provided, +// adds the message to the store to be reloaded if necessary. +func (s *Server) retainMessage(cl *Client, pk packets.Packet) { + if s.Options.Capabilities.RetainAvailable == 0 || pk.Ignore { + return + } + + out := pk.Copy(false) + r := s.Topics.RetainMessage(out) + s.hooks.OnRetainMessage(cl, pk, r) + atomic.StoreInt64(&s.Info.Retained, int64(s.Topics.Retained.Len())) +} + +// publishToSubscribers publishes a publish packet to all subscribers with matching topic filters. +func (s *Server) publishToSubscribers(pk packets.Packet) { + if pk.Ignore { + return + } + + if pk.Created == 0 { + pk.Created = time.Now().Unix() + } + + pk.Expiry = pk.Created + s.Options.Capabilities.MaximumMessageExpiryInterval + if pk.Properties.MessageExpiryInterval > 0 { + pk.Expiry = pk.Created + int64(pk.Properties.MessageExpiryInterval) + } + + subscribers := s.Topics.Subscribers(pk.TopicName) + if len(subscribers.Shared) > 0 { + subscribers = s.hooks.OnSelectSubscribers(subscribers, pk) + if len(subscribers.SharedSelected) == 0 { + subscribers.SelectShared() + } + subscribers.MergeSharedSelected() + } + + for id, subs := range subscribers.Subscriptions { + if cl, ok := s.Clients.Get(id); ok { + _, err := s.publishToClient(cl, subs, pk) + if err != nil { + s.Log.Debug().Err(err).Str("client", cl.ID).Interface("packet", pk).Msg("failed publishing packet") + } + } + } +} + +func (s *Server) publishToClient(cl *Client, sub packets.Subscription, pk packets.Packet) (packets.Packet, error) { + if sub.NoLocal && pk.Origin == cl.ID { + return pk, nil // [MQTT-3.8.3-3] + } + + out := pk.Copy(false) + if !sub.FwdRetainedFlag && ((cl.Properties.ProtocolVersion == 5 && !sub.RetainAsPublished) || cl.Properties.ProtocolVersion < 5) { // ![MQTT-3.3.1-13] [v3 MQTT-3.3.1-9] + out.FixedHeader.Retain = false // [MQTT-3.3.1-12] + } + + if len(sub.Identifiers) > 0 { // [MQTT-3.3.4-3] + out.Properties.SubscriptionIdentifier = []int{} + for _, id := range sub.Identifiers { + out.Properties.SubscriptionIdentifier = append(out.Properties.SubscriptionIdentifier, id) // [MQTT-3.3.4-4] ![MQTT-3.3.4-5] + } + sort.Ints(out.Properties.SubscriptionIdentifier) + } + + if out.FixedHeader.Qos > sub.Qos { + out.FixedHeader.Qos = sub.Qos + } + + if out.FixedHeader.Qos > s.Options.Capabilities.MaximumQos { + out.FixedHeader.Qos = s.Options.Capabilities.MaximumQos // [MQTT-3.2.2-9] + } + + if cl.Properties.Props.TopicAliasMaximum > 0 { + var aliasExists bool + out.Properties.TopicAlias, aliasExists = cl.State.TopicAliases.Outbound.Set(pk.TopicName) + if out.Properties.TopicAlias > 0 { + out.Properties.TopicAliasFlag = true + if aliasExists { + out.TopicName = "" + } + } + } + + if out.FixedHeader.Qos > 0 { + i, err := cl.NextPacketID() // [MQTT-4.3.2-1] [MQTT-4.3.3-1] + if err != nil { + s.hooks.OnPacketIDExhausted(cl, pk) + s.Log.Warn().Err(err).Str("client", cl.ID).Str("listener", cl.Net.Listener).Msg("packet ids exhausted") + return out, packets.ErrQuotaExceeded + } + + out.PacketID = uint16(i) // [MQTT-2.2.1-4] + sentQuota := atomic.LoadInt32(&cl.State.Inflight.sendQuota) + + if ok := cl.State.Inflight.Set(out); ok { // [MQTT-4.3.2-3] [MQTT-4.3.3-3] + atomic.AddInt64(&s.Info.Inflight, 1) + s.hooks.OnQosPublish(cl, out, out.Created, 0) + cl.State.Inflight.DecreaseSendQuota() + } + + if sentQuota == 0 && atomic.LoadInt32(&cl.State.Inflight.maximumSendQuota) > 0 { + out.Expiry = -1 + cl.State.Inflight.Set(out) + return out, nil + } + } + + if cl.Net.Conn == nil || cl.Closed() { + return out, packets.CodeDisconnect + } + + select { + case cl.State.outbound <- &out: + atomic.AddInt32(&cl.State.outboundQty, 1) + default: + atomic.AddInt64(&s.Info.MessagesDropped, 1) + cl.ops.hooks.OnPublishDropped(cl, pk) + cl.State.Inflight.Delete(out.PacketID) // packet was dropped due to irregular circumstances, so rollback inflight. + cl.State.Inflight.IncreaseSendQuota() + return out, packets.ErrPendingClientWritesExceeded + } + + return out, nil +} + +func (s *Server) publishRetainedToClient(cl *Client, sub packets.Subscription, existed bool) { + if IsSharedFilter(sub.Filter) { + return // 4.8.2 Non-normative - Shared Subscriptions - No Retained Messages are sent to the Session when it first subscribes. + } + + if sub.RetainHandling == 1 && existed || sub.RetainHandling == 2 { // [MQTT-3.3.1-10] [MQTT-3.3.1-11] + return + } + + sub.FwdRetainedFlag = true + for _, pkv := range s.Topics.Messages(sub.Filter) { // [MQTT-3.8.4-4] + _, err := s.publishToClient(cl, sub, pkv) + if err != nil { + s.Log.Debug().Err(err).Str("client", cl.ID).Str("listener", cl.Net.Listener).Interface("packet", pkv).Msg("failed to publish retained message") + continue + } + s.hooks.OnRetainPublished(cl, pkv) + } +} + +// buildAck builds a standardised ack message for Puback, Pubrec, Pubrel, Pubcomp packets. +func (s *Server) buildAck(packetID uint16, pkt, qos byte, properties packets.Properties, reason packets.Code) packets.Packet { + if s.Options.Capabilities.Compatibilities.NoInheritedPropertiesOnAck { + properties = packets.Properties{} + } + if reason.Code >= packets.ErrUnspecifiedError.Code { + properties.ReasonString = reason.Reason + } + + pk := packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: pkt, + Qos: qos, + }, + PacketID: packetID, // [MQTT-2.2.1-5] + ReasonCode: reason.Code, // [MQTT-3.4.2-1] + Properties: properties, + Created: time.Now().Unix(), + Expiry: time.Now().Unix() + s.Options.Capabilities.MaximumMessageExpiryInterval, + } + + return pk +} + +// processPuback processes a Puback packet, denoting completion of a QOS 1 packet sent from the server. +func (s *Server) processPuback(cl *Client, pk packets.Packet) error { + if _, ok := cl.State.Inflight.Get(pk.PacketID); !ok { + return nil // omit, but would be packets.ErrPacketIdentifierNotFound + } + + if ok := cl.State.Inflight.Delete(pk.PacketID); ok { // [MQTT-4.3.2-5] + cl.State.Inflight.IncreaseSendQuota() + atomic.AddInt64(&s.Info.Inflight, -1) + s.hooks.OnQosComplete(cl, pk) + } + + return nil +} + +// processPubrec processes a Pubrec packet, denoting receipt of a QOS 2 packet sent from the server. +func (s *Server) processPubrec(cl *Client, pk packets.Packet) error { + if _, ok := cl.State.Inflight.Get(pk.PacketID); !ok { // [MQTT-4.3.3-7] [MQTT-4.3.3-13] + return cl.WritePacket(s.buildAck(pk.PacketID, packets.Pubrel, 1, pk.Properties, packets.ErrPacketIdentifierNotFound)) + } + + if pk.ReasonCode >= packets.ErrUnspecifiedError.Code || !pk.ReasonCodeValid() { // [MQTT-4.3.3-4] + if ok := cl.State.Inflight.Delete(pk.PacketID); ok { + atomic.AddInt64(&s.Info.Inflight, -1) + } + cl.ops.hooks.OnQosDropped(cl, pk) + return nil // as per MQTT5 Section 4.13.2 paragraph 2 + } + + ack := s.buildAck(pk.PacketID, packets.Pubrel, 1, pk.Properties, packets.CodeSuccess) // [MQTT-4.3.3-4] ![MQTT-4.3.3-6] + cl.State.Inflight.DecreaseReceiveQuota() // -1 RECV QUOTA + cl.State.Inflight.Set(ack) // [MQTT-4.3.3-5] + return cl.WritePacket(ack) +} + +// processPubrel processes a Pubrel packet, denoting completion of a QOS 2 packet sent from the client. +func (s *Server) processPubrel(cl *Client, pk packets.Packet) error { + if _, ok := cl.State.Inflight.Get(pk.PacketID); !ok { // [MQTT-4.3.3-7] [MQTT-4.3.3-13] + return cl.WritePacket(s.buildAck(pk.PacketID, packets.Pubcomp, 0, pk.Properties, packets.ErrPacketIdentifierNotFound)) + } + + if pk.ReasonCode >= packets.ErrUnspecifiedError.Code || !pk.ReasonCodeValid() { // [MQTT-4.3.3-9] + if ok := cl.State.Inflight.Delete(pk.PacketID); ok { + atomic.AddInt64(&s.Info.Inflight, -1) + } + cl.ops.hooks.OnQosDropped(cl, pk) + return nil + } + + ack := s.buildAck(pk.PacketID, packets.Pubcomp, 0, pk.Properties, packets.CodeSuccess) // [MQTT-4.3.3-11] + cl.State.Inflight.Set(ack) + + err := cl.WritePacket(ack) + if err != nil { + return err + } + + cl.State.Inflight.IncreaseReceiveQuota() // +1 RECV QUOTA + cl.State.Inflight.IncreaseSendQuota() // +1 SENT QUOTA + if ok := cl.State.Inflight.Delete(pk.PacketID); ok { // [MQTT-4.3.3-12] + atomic.AddInt64(&s.Info.Inflight, -1) + s.hooks.OnQosComplete(cl, pk) + } + + return nil +} + +// processPubcomp processes a Pubcomp packet, denoting completion of a QOS 2 packet sent from the server. +func (s *Server) processPubcomp(cl *Client, pk packets.Packet) error { + // regardless of whether the pubcomp is a success or failure, we end the qos flow, delete inflight, and restore the quotas. + cl.State.Inflight.IncreaseReceiveQuota() // +1 RECV QUOTA + cl.State.Inflight.IncreaseSendQuota() // +1 SENT QUOTA + if ok := cl.State.Inflight.Delete(pk.PacketID); ok { + atomic.AddInt64(&s.Info.Inflight, -1) + s.hooks.OnQosComplete(cl, pk) + } + + return nil +} + +// processSubscribe processes a Subscribe packet. +func (s *Server) processSubscribe(cl *Client, pk packets.Packet) error { + pk = s.hooks.OnSubscribe(cl, pk) + code := packets.CodeSuccess + if _, ok := cl.State.Inflight.Get(pk.PacketID); ok { + code = packets.ErrPacketIdentifierInUse + } + + filterExisted := make([]bool, len(pk.Filters)) + reasonCodes := make([]byte, len(pk.Filters)) + for i, sub := range pk.Filters { + if code != packets.CodeSuccess { + reasonCodes[i] = code.Code // NB 3.9.3 Non-normative 0x91 + continue + } else if !IsValidFilter(sub.Filter, false) { + reasonCodes[i] = packets.ErrTopicFilterInvalid.Code + } else if sub.NoLocal && IsSharedFilter(sub.Filter) { + reasonCodes[i] = packets.ErrProtocolViolationInvalidSharedNoLocal.Code // [MQTT-3.8.3-4] + } else if !s.hooks.OnACLCheck(cl, sub.Filter, false) { + reasonCodes[i] = packets.ErrNotAuthorized.Code + if s.Options.Capabilities.Compatibilities.ObscureNotAuthorized { + reasonCodes[i] = packets.ErrUnspecifiedError.Code + } + } else { + isNew := s.Topics.Subscribe(cl.ID, sub) // [MQTT-3.8.4-3] + if isNew { + atomic.AddInt64(&s.Info.Subscriptions, 1) + } + cl.State.Subscriptions.Add(sub.Filter, sub) // [MQTT-3.2.2-10] + + if sub.Qos > s.Options.Capabilities.MaximumQos { + sub.Qos = s.Options.Capabilities.MaximumQos // [MQTT-3.2.2-9] + } + + filterExisted[i] = !isNew + reasonCodes[i] = sub.Qos // [MQTT-3.9.3-1] [MQTT-3.8.4-7] + } + + if reasonCodes[i] > packets.CodeGrantedQos2.Code && cl.Properties.ProtocolVersion < 5 { // MQTT3 + reasonCodes[i] = packets.ErrUnspecifiedError.Code + } + } + + ack := packets.Packet{ // [MQTT-3.8.4-1] [MQTT-3.8.4-5] + FixedHeader: packets.FixedHeader{ + Type: packets.Suback, + }, + PacketID: pk.PacketID, // [MQTT-2.2.1-6] [MQTT-3.8.4-2] + ReasonCodes: reasonCodes, // [MQTT-3.8.4-6] + Properties: packets.Properties{ + User: pk.Properties.User, + }, + } + + if code.Code >= packets.ErrUnspecifiedError.Code { + ack.Properties.ReasonString = code.Reason + } + + s.hooks.OnSubscribed(cl, pk, reasonCodes) + err := cl.WritePacket(ack) + if err != nil { + return err + } + + for i, sub := range pk.Filters { // [MQTT-3.3.1-9] + if reasonCodes[i] >= packets.ErrUnspecifiedError.Code { + continue + } + + s.publishRetainedToClient(cl, sub, filterExisted[i]) + } + + return nil +} + +// processUnsubscribe processes an unsubscribe packet. +func (s *Server) processUnsubscribe(cl *Client, pk packets.Packet) error { + code := packets.CodeSuccess + if _, ok := cl.State.Inflight.Get(pk.PacketID); ok { + code = packets.ErrPacketIdentifierInUse + } + + pk = s.hooks.OnUnsubscribe(cl, pk) + reasonCodes := make([]byte, len(pk.Filters)) + for i, sub := range pk.Filters { // [MQTT-3.10.4-6] [MQTT-3.11.3-1] + if code != packets.CodeSuccess { + reasonCodes[i] = code.Code // NB 3.11.3 Non-normative 0x91 + continue + } + + if q := s.Topics.Unsubscribe(sub.Filter, cl.ID); q { + atomic.AddInt64(&s.Info.Subscriptions, -1) + reasonCodes[i] = packets.CodeSuccess.Code + } else { + reasonCodes[i] = packets.CodeNoSubscriptionExisted.Code + } + + cl.State.Subscriptions.Delete(sub.Filter) // [MQTT-3.10.4-2] [MQTT-3.10.4-2] ~[MQTT-3.10.4-3] + } + + ack := packets.Packet{ // [MQTT-3.10.4-4] + FixedHeader: packets.FixedHeader{ + Type: packets.Unsuback, + }, + PacketID: pk.PacketID, // [MQTT-2.2.1-6] [MQTT-3.10.4-5] + ReasonCodes: reasonCodes, // [MQTT-3.11.3-2] + Properties: packets.Properties{ + User: pk.Properties.User, + }, + } + + if code.Code >= packets.ErrUnspecifiedError.Code { + ack.Properties.ReasonString = code.Reason + } + + s.hooks.OnUnsubscribed(cl, pk) + return cl.WritePacket(ack) +} + +// UnsubscribeClient unsubscribes a client from all of their subscriptions. +func (s *Server) UnsubscribeClient(cl *Client) { + i := 0 + filterMap := cl.State.Subscriptions.GetAll() + filters := make([]packets.Subscription, len(filterMap)) + for k := range filterMap { + cl.State.Subscriptions.Delete(k) + } + + if atomic.LoadUint32(&cl.State.isTakenOver) == 1 { + return + } + + for k, v := range filterMap { + if s.Topics.Unsubscribe(k, cl.ID) { + atomic.AddInt64(&s.Info.Subscriptions, -1) + } + filters[i] = v + i++ + } + s.hooks.OnUnsubscribed(cl, packets.Packet{FixedHeader: packets.FixedHeader{Type: packets.Unsubscribe}, Filters: filters}) +} + +// processAuth processes an Auth packet. +func (s *Server) processAuth(cl *Client, pk packets.Packet) error { + _, err := s.hooks.OnAuthPacket(cl, pk) + if err != nil { + return err + } + + return nil +} + +// processDisconnect processes a Disconnect packet. +func (s *Server) processDisconnect(cl *Client, pk packets.Packet) error { + if pk.Properties.SessionExpiryIntervalFlag { + if pk.Properties.SessionExpiryInterval > 0 && cl.Properties.Props.SessionExpiryInterval == 0 { + return packets.ErrProtocolViolationZeroNonZeroExpiry + } + + cl.Properties.Props.SessionExpiryInterval = pk.Properties.SessionExpiryInterval + cl.Properties.Props.SessionExpiryIntervalFlag = true + } + + s.loop.willDelayed.Delete(cl.ID) // [MQTT-3.1.3-9] [MQTT-3.1.2-8] + cl.Stop(packets.CodeDisconnect) // [MQTT-3.14.4-2] + + return nil +} + +// DisconnectClient sends a Disconnect packet to a client and then closes the client connection. +func (s *Server) DisconnectClient(cl *Client, code packets.Code) error { + out := packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Disconnect, + }, + ReasonCode: code.Code, + Properties: packets.Properties{}, + } + + if code.Code >= packets.ErrUnspecifiedError.Code { + out.Properties.ReasonString = code.Reason // // [MQTT-3.14.2-1] + } + + // We already have a code we are using to disconnect the client, so we are not + // interested if the write packet fails due to a closed connection (as we are closing it). + err := cl.WritePacket(out) + if !s.Options.Capabilities.Compatibilities.PassiveClientDisconnect { + cl.Stop(code) + if code.Code >= packets.ErrUnspecifiedError.Code { + return code + } + } + + return err +} + +// publishSysTopics publishes the current values to the server $SYS topics. +// Due to the int to string conversions this method is not as cheap as +// some of the others so the publishing interval should be set appropriately. +func (s *Server) publishSysTopics() { + pk := packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Publish, + Retain: true, + }, + Created: time.Now().Unix(), + } + + var m runtime.MemStats + runtime.ReadMemStats(&m) + atomic.StoreInt64(&s.Info.MemoryAlloc, int64(m.HeapInuse)) + atomic.StoreInt64(&s.Info.Threads, int64(runtime.NumGoroutine())) + atomic.StoreInt64(&s.Info.Time, time.Now().Unix()) + atomic.StoreInt64(&s.Info.Uptime, time.Now().Unix()-atomic.LoadInt64(&s.Info.Started)) + atomic.StoreInt64(&s.Info.ClientsTotal, int64(s.Clients.Len())) + atomic.StoreInt64(&s.Info.ClientsDisconnected, atomic.LoadInt64(&s.Info.ClientsTotal)-atomic.LoadInt64(&s.Info.ClientsConnected)) + + topics := map[string]string{ + SysPrefix + "/broker/version": s.Info.Version, + SysPrefix + "/broker/time": AtomicItoa(&s.Info.Time), + SysPrefix + "/broker/uptime": AtomicItoa(&s.Info.Uptime), + SysPrefix + "/broker/started": AtomicItoa(&s.Info.Started), + SysPrefix + "/broker/load/bytes/received": AtomicItoa(&s.Info.BytesReceived), + SysPrefix + "/broker/load/bytes/sent": AtomicItoa(&s.Info.BytesSent), + SysPrefix + "/broker/clients/connected": AtomicItoa(&s.Info.ClientsConnected), + SysPrefix + "/broker/clients/disconnected": AtomicItoa(&s.Info.ClientsDisconnected), + SysPrefix + "/broker/clients/maximum": AtomicItoa(&s.Info.ClientsMaximum), + SysPrefix + "/broker/clients/total": AtomicItoa(&s.Info.ClientsTotal), + SysPrefix + "/broker/packets/received": AtomicItoa(&s.Info.PacketsReceived), + SysPrefix + "/broker/packets/sent": AtomicItoa(&s.Info.PacketsSent), + SysPrefix + "/broker/messages/received": AtomicItoa(&s.Info.MessagesReceived), + SysPrefix + "/broker/messages/sent": AtomicItoa(&s.Info.MessagesSent), + SysPrefix + "/broker/messages/dropped": AtomicItoa(&s.Info.MessagesDropped), + SysPrefix + "/broker/messages/inflight": AtomicItoa(&s.Info.Inflight), + SysPrefix + "/broker/retained": AtomicItoa(&s.Info.Retained), + SysPrefix + "/broker/subscriptions": AtomicItoa(&s.Info.Subscriptions), + SysPrefix + "/broker/system/memory": AtomicItoa(&s.Info.MemoryAlloc), + SysPrefix + "/broker/system/threads": AtomicItoa(&s.Info.Threads), + } + + for topic, payload := range topics { + pk.TopicName = topic + pk.Payload = []byte(payload) + s.Topics.RetainMessage(pk.Copy(false)) + s.publishToSubscribers(pk) + } + + s.hooks.OnSysInfoTick(s.Info) +} + +// Close attempts to gracefully shut down the server, all listeners, clients, and stores. +func (s *Server) Close() error { + close(s.done) + s.Listeners.CloseAll(s.closeListenerClients) + s.hooks.OnStopped() + s.hooks.Stop() + + s.Log.Info().Msg("mochi mqtt server stopped") + return nil +} + +// closeListenerClients closes all clients on the specified listener. +func (s *Server) closeListenerClients(listener string) { + clients := s.Clients.GetByListener(listener) + for _, cl := range clients { + s.DisconnectClient(cl, packets.ErrServerShuttingDown) + } +} + +// sendLWT issues an LWT message to a topic when a client disconnects. +func (s *Server) sendLWT(cl *Client) { + if atomic.LoadUint32(&cl.Properties.Will.Flag) == 0 { + return + } + + modifiedLWT := s.hooks.OnWill(cl, cl.Properties.Will) + + pk := packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Publish, + Retain: modifiedLWT.Retain, // [MQTT-3.1.2-14] [MQTT-3.1.2-15] + Qos: modifiedLWT.Qos, + }, + TopicName: modifiedLWT.TopicName, + Payload: modifiedLWT.Payload, + Properties: packets.Properties{ + User: modifiedLWT.User, + }, + Origin: cl.ID, + Created: time.Now().Unix(), + } + + if cl.Properties.Will.WillDelayInterval > 0 { + pk.Connect.WillProperties.WillDelayInterval = cl.Properties.Will.WillDelayInterval + pk.Expiry = time.Now().Unix() + int64(pk.Connect.WillProperties.WillDelayInterval) + s.loop.willDelayed.Add(cl.ID, pk) + return + } + + if pk.FixedHeader.Retain { + s.retainMessage(cl, pk) + } + + s.publishToSubscribers(pk) // [MQTT-3.1.2-8] + atomic.StoreUint32(&cl.Properties.Will.Flag, 0) // [MQTT-3.1.2-10] + s.hooks.OnWillSent(cl, pk) +} + +// readStore reads in any data from the persistent datastore (if applicable). +func (s *Server) readStore() error { + if s.hooks.Provides(StoredClients) { + clients, err := s.hooks.StoredClients() + if err != nil { + return fmt.Errorf("failed to load clients; %w", err) + } + s.loadClients(clients) + s.Log.Debug(). + Int("len", len(clients)). + Msg("loaded clients from store") + } + + if s.hooks.Provides(StoredSubscriptions) { + subs, err := s.hooks.StoredSubscriptions() + if err != nil { + return fmt.Errorf("load subscriptions; %w", err) + } + s.loadSubscriptions(subs) + s.Log.Debug(). + Int("len", len(subs)). + Msg("loaded subscriptions from store") + } + + if s.hooks.Provides(StoredInflightMessages) { + inflight, err := s.hooks.StoredInflightMessages() + if err != nil { + return fmt.Errorf("load inflight; %w", err) + } + s.loadInflight(inflight) + s.Log.Debug(). + Int("len", len(inflight)). + Msg("loaded inflights from store") + } + + if s.hooks.Provides(StoredRetainedMessages) { + retained, err := s.hooks.StoredRetainedMessages() + if err != nil { + return fmt.Errorf("load retained; %w", err) + } + s.loadRetained(retained) + s.Log.Debug(). + Int("len", len(retained)). + Msg("loaded retained messages from store") + } + + if s.hooks.Provides(StoredSysInfo) { + sysInfo, err := s.hooks.StoredSysInfo() + if err != nil { + return fmt.Errorf("load server info; %w", err) + } + s.loadServerInfo(sysInfo.Info) + s.Log.Debug(). + Msg("loaded $SYS info from store") + } + + return nil +} + +// loadServerInfo restores server info from the datastore. +func (s *Server) loadServerInfo(v system.Info) { + if s.Options.Capabilities.Compatibilities.RestoreSysInfoOnRestart { + atomic.StoreInt64(&s.Info.BytesReceived, v.BytesReceived) + atomic.StoreInt64(&s.Info.BytesSent, v.BytesSent) + atomic.StoreInt64(&s.Info.ClientsMaximum, v.ClientsMaximum) + atomic.StoreInt64(&s.Info.ClientsTotal, v.ClientsTotal) + atomic.StoreInt64(&s.Info.ClientsDisconnected, v.ClientsDisconnected) + atomic.StoreInt64(&s.Info.MessagesReceived, v.MessagesReceived) + atomic.StoreInt64(&s.Info.MessagesSent, v.MessagesSent) + atomic.StoreInt64(&s.Info.MessagesDropped, v.MessagesDropped) + atomic.StoreInt64(&s.Info.PacketsReceived, v.PacketsReceived) + atomic.StoreInt64(&s.Info.PacketsSent, v.PacketsSent) + atomic.StoreInt64(&s.Info.InflightDropped, v.InflightDropped) + } + atomic.StoreInt64(&s.Info.Retained, v.Retained) + atomic.StoreInt64(&s.Info.Inflight, v.Inflight) + atomic.StoreInt64(&s.Info.Subscriptions, v.Subscriptions) +} + +// loadSubscriptions restores subscriptions from the datastore. +func (s *Server) loadSubscriptions(v []storage.Subscription) { + for _, sub := range v { + sb := packets.Subscription{ + Filter: sub.Filter, + RetainHandling: sub.RetainHandling, + Qos: sub.Qos, + RetainAsPublished: sub.RetainAsPublished, + NoLocal: sub.NoLocal, + Identifier: sub.Identifier, + } + if s.Topics.Subscribe(sub.Client, sb) { + if cl, ok := s.Clients.Get(sub.Client); ok { + cl.State.Subscriptions.Add(sub.Filter, sb) + } + } + } +} + +// loadClients restores clients from the datastore. +func (s *Server) loadClients(v []storage.Client) { + for _, c := range v { + cl := s.NewClient(nil, c.Listener, c.ID, false) + cl.Properties.Username = c.Username + cl.Properties.Clean = c.Clean + cl.Properties.ProtocolVersion = c.ProtocolVersion + cl.Properties.Props = packets.Properties{ + SessionExpiryInterval: c.Properties.SessionExpiryInterval, + SessionExpiryIntervalFlag: c.Properties.SessionExpiryIntervalFlag, + AuthenticationMethod: c.Properties.AuthenticationMethod, + AuthenticationData: c.Properties.AuthenticationData, + RequestProblemInfoFlag: c.Properties.RequestProblemInfoFlag, + RequestProblemInfo: c.Properties.RequestProblemInfo, + RequestResponseInfo: c.Properties.RequestResponseInfo, + ReceiveMaximum: c.Properties.ReceiveMaximum, + TopicAliasMaximum: c.Properties.TopicAliasMaximum, + User: c.Properties.User, + MaximumPacketSize: c.Properties.MaximumPacketSize, + } + cl.Properties.Will = Will(c.Will) + s.Clients.Add(cl) + } +} + +// loadInflight restores inflight messages from the datastore. +func (s *Server) loadInflight(v []storage.Message) { + for _, msg := range v { + if client, ok := s.Clients.Get(msg.Origin); ok { + client.State.Inflight.Set(msg.ToPacket()) + } + } +} + +// loadRetained restores retained messages from the datastore. +func (s *Server) loadRetained(v []storage.Message) { + for _, msg := range v { + s.Topics.RetainMessage(msg.ToPacket()) + } +} + +// clearExpiredClients deletes all clients which have been disconnected for longer +// than their given expiry intervals. +func (s *Server) clearExpiredClients(dt int64) { + for id, client := range s.Clients.GetAll() { + disconnected := atomic.LoadInt64(&client.State.disconnected) + if disconnected == 0 { + continue + } + + expire := s.Options.Capabilities.MaximumSessionExpiryInterval + if client.Properties.ProtocolVersion == 5 && client.Properties.Props.SessionExpiryIntervalFlag { + expire = client.Properties.Props.SessionExpiryInterval + } + + if disconnected+int64(expire) < dt { + s.hooks.OnClientExpired(client) + s.Clients.Delete(id) // [MQTT-4.1.0-2] + } + } +} + +// clearExpiredRetainedMessage deletes retained messages from topics if they have expired. +func (s *Server) clearExpiredRetainedMessages(now int64) { + for filter, pk := range s.Topics.Retained.GetAll() { + if (pk.Expiry > 0 && pk.Expiry < now) || pk.Created+s.Options.Capabilities.MaximumMessageExpiryInterval < now { + s.Topics.Retained.Delete(filter) + s.hooks.OnRetainedExpired(filter) + } + } +} + +// clearExpiredInflights deletes any inflight messages which have expired. +func (s *Server) clearExpiredInflights(now int64) { + for _, client := range s.Clients.GetAll() { + if deleted := client.ClearInflights(now, s.Options.Capabilities.MaximumMessageExpiryInterval); len(deleted) > 0 { + for _, id := range deleted { + s.hooks.OnQosDropped(client, packets.Packet{PacketID: id}) + } + } + } +} + +// sendDelayedLWT sends any LWT messages which have reached their issue time. +func (s *Server) sendDelayedLWT(dt int64) { + for id, pk := range s.loop.willDelayed.GetAll() { + if dt > pk.Expiry { + s.publishToSubscribers(pk) // [MQTT-3.1.2-8] + if cl, ok := s.Clients.Get(id); ok { + if pk.FixedHeader.Retain { + s.retainMessage(cl, pk) + } + cl.Properties.Will = Will{} // [MQTT-3.1.2-10] + s.hooks.OnWillSent(cl, pk) + } + s.loop.willDelayed.Delete(id) + } + } +} + +// AtomicItoa converts an int64 point to a string. +func AtomicItoa(ptr *int64) string { + return strconv.FormatInt(atomic.LoadInt64(ptr), 10) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/system/system.go b/vendor/github.com/mochi-mqtt/server/v2/system/system.go new file mode 100644 index 000000000..2ed47d0c4 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/system/system.go @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package system + +import "sync/atomic" + +// Info contains atomic counters and values for various server statistics +// commonly found in $SYS topics (and others). +// based on https://github.com/mqtt/mqtt.org/wiki/SYS-Topics +type Info struct { + Version string `json:"version"` // the current version of the server + Started int64 `json:"started"` // the time the server started in unix seconds + Time int64 `json:"time"` // current time on the server + Uptime int64 `json:"uptime"` // the number of seconds the server has been online + BytesReceived int64 `json:"bytes_received"` // total number of bytes received since the broker started + BytesSent int64 `json:"bytes_sent"` // total number of bytes sent since the broker started + ClientsConnected int64 `json:"clients_connected"` // number of currently connected clients + ClientsDisconnected int64 `json:"clients_disconnected"` // total number of persistent clients (with clean session disabled) that are registered at the broker but are currently disconnected + ClientsMaximum int64 `json:"clients_maximum"` // maximum number of active clients that have been connected + ClientsTotal int64 `json:"clients_total"` // total number of connected and disconnected clients with a persistent session currently connected and registered + MessagesReceived int64 `json:"messages_received"` // total number of publish messages received + MessagesSent int64 `json:"messages_sent"` // total number of publish messages sent + MessagesDropped int64 `json:"messages_dropped"` // total number of publish messages dropped to slow subscriber + Retained int64 `json:"retained"` // total number of retained messages active on the broker + Inflight int64 `json:"inflight"` // the number of messages currently in-flight + InflightDropped int64 `json:"inflight_dropped"` // the number of inflight messages which were dropped + Subscriptions int64 `json:"subscriptions"` // total number of subscriptions active on the broker + PacketsReceived int64 `json:"packets_received"` // the total number of publish messages received + PacketsSent int64 `json:"packets_sent"` // total number of messages of any type sent since the broker started + MemoryAlloc int64 `json:"memory_alloc"` // memory currently allocated + Threads int64 `json:"threads"` // number of active goroutines, named as threads for platform ambiguity +} + +// Clone makes a copy of Info using atomic operation +func (i *Info) Clone() *Info { + return &Info{ + Version: i.Version, + Started: atomic.LoadInt64(&i.Started), + Time: atomic.LoadInt64(&i.Time), + Uptime: atomic.LoadInt64(&i.Uptime), + BytesReceived: atomic.LoadInt64(&i.BytesReceived), + BytesSent: atomic.LoadInt64(&i.BytesSent), + ClientsConnected: atomic.LoadInt64(&i.ClientsConnected), + ClientsMaximum: atomic.LoadInt64(&i.ClientsMaximum), + ClientsTotal: atomic.LoadInt64(&i.ClientsTotal), + ClientsDisconnected: atomic.LoadInt64(&i.ClientsDisconnected), + MessagesReceived: atomic.LoadInt64(&i.MessagesReceived), + MessagesSent: atomic.LoadInt64(&i.MessagesSent), + MessagesDropped: atomic.LoadInt64(&i.MessagesDropped), + Retained: atomic.LoadInt64(&i.Retained), + Inflight: atomic.LoadInt64(&i.Inflight), + InflightDropped: atomic.LoadInt64(&i.InflightDropped), + Subscriptions: atomic.LoadInt64(&i.Subscriptions), + PacketsReceived: atomic.LoadInt64(&i.PacketsReceived), + PacketsSent: atomic.LoadInt64(&i.PacketsSent), + MemoryAlloc: atomic.LoadInt64(&i.MemoryAlloc), + Threads: atomic.LoadInt64(&i.Threads), + } +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/topics.go b/vendor/github.com/mochi-mqtt/server/v2/topics.go new file mode 100644 index 000000000..be7c9a396 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/topics.go @@ -0,0 +1,707 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2023 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package mqtt + +import ( + "strings" + "sync" + "sync/atomic" + + "github.com/mochi-mqtt/server/v2/packets" +) + +var ( + SharePrefix = "$SHARE" // the prefix indicating a share topic + SysPrefix = "$SYS" // the prefix indicating a system info topic +) + +// TopicAliases contains inbound and outbound topic alias registrations. +type TopicAliases struct { + Inbound *InboundTopicAliases + Outbound *OutboundTopicAliases +} + +// NewTopicAliases returns an instance of TopicAliases. +func NewTopicAliases(topicAliasMaximum uint16) TopicAliases { + return TopicAliases{ + Inbound: NewInboundTopicAliases(topicAliasMaximum), + Outbound: NewOutboundTopicAliases(topicAliasMaximum), + } +} + +// NewInboundTopicAliases returns a pointer to InboundTopicAliases. +func NewInboundTopicAliases(topicAliasMaximum uint16) *InboundTopicAliases { + return &InboundTopicAliases{ + maximum: topicAliasMaximum, + internal: map[uint16]string{}, + } +} + +// InboundTopicAliases contains a map of topic aliases received from the client. +type InboundTopicAliases struct { + internal map[uint16]string + sync.RWMutex + maximum uint16 +} + +// Set sets a new alias for a specific topic. +func (a *InboundTopicAliases) Set(id uint16, topic string) string { + a.Lock() + defer a.Unlock() + + if a.maximum == 0 { + return topic // ? + } + + if existing, ok := a.internal[id]; ok && topic == "" { + return existing + } + + a.internal[id] = topic + return topic +} + +// OutboundTopicAliases contains a map of topic aliases sent from the broker to the client. +type OutboundTopicAliases struct { + internal map[string]uint16 + sync.RWMutex + cursor uint32 + maximum uint16 +} + +// NewOutboundTopicAliases returns a pointer to OutboundTopicAliases. +func NewOutboundTopicAliases(topicAliasMaximum uint16) *OutboundTopicAliases { + return &OutboundTopicAliases{ + maximum: topicAliasMaximum, + internal: map[string]uint16{}, + } +} + +// Set sets a new topic alias for a topic and returns the alias value, and a boolean +// indicating if the alias already existed. +func (a *OutboundTopicAliases) Set(topic string) (uint16, bool) { + a.Lock() + defer a.Unlock() + + if a.maximum == 0 { + return 0, false + } + + if i, ok := a.internal[topic]; ok { + return i, true + } + + i := atomic.LoadUint32(&a.cursor) + if i+1 > uint32(a.maximum) { + // if i+1 > math.MaxUint16 { + return 0, false + } + + a.internal[topic] = uint16(i) + 1 + atomic.StoreUint32(&a.cursor, i+1) + return uint16(i) + 1, false +} + +// SharedSubscriptions contains a map of subscriptions to a shared filter, +// keyed on share group then client id. +type SharedSubscriptions struct { + internal map[string]map[string]packets.Subscription + sync.RWMutex +} + +// NewSharedSubscriptions returns a new instance of Subscriptions. +func NewSharedSubscriptions() *SharedSubscriptions { + return &SharedSubscriptions{ + internal: map[string]map[string]packets.Subscription{}, + } +} + +// Add creates a new shared subscription for a group and client id pair. +func (s *SharedSubscriptions) Add(group, id string, val packets.Subscription) { + s.Lock() + defer s.Unlock() + if _, ok := s.internal[group]; !ok { + s.internal[group] = map[string]packets.Subscription{} + } + s.internal[group][id] = val +} + +// Delete deletes a client id from a shared subscription group. +func (s *SharedSubscriptions) Delete(group, id string) { + s.Lock() + defer s.Unlock() + delete(s.internal[group], id) + if len(s.internal[group]) == 0 { + delete(s.internal, group) + } +} + +// Get returns the subscription properties for a client id in a share group, if one exists. +func (s *SharedSubscriptions) Get(group, id string) (val packets.Subscription, ok bool) { + s.RLock() + defer s.RUnlock() + if _, ok := s.internal[group]; !ok { + return val, ok + } + + val, ok = s.internal[group][id] + return val, ok +} + +// GroupLen returns the number of groups subscribed to the filter. +func (s *SharedSubscriptions) GroupLen() int { + s.RLock() + defer s.RUnlock() + val := len(s.internal) + return val +} + +// Len returns the total number of shared subscriptions to a filter across all groups. +func (s *SharedSubscriptions) Len() int { + s.RLock() + defer s.RUnlock() + n := 0 + for _, group := range s.internal { + n += len(group) + } + return n +} + +// GetAll returns all shared subscription groups and their subscriptions. +func (s *SharedSubscriptions) GetAll() map[string]map[string]packets.Subscription { + s.RLock() + defer s.RUnlock() + m := map[string]map[string]packets.Subscription{} + for group, subs := range s.internal { + if _, ok := m[group]; !ok { + m[group] = map[string]packets.Subscription{} + } + + for id, sub := range subs { + m[group][id] = sub + } + } + return m +} + +// Subscriptions is a map of subscriptions keyed on client. +type Subscriptions struct { + internal map[string]packets.Subscription + sync.RWMutex +} + +// NewSubscriptions returns a new instance of Subscriptions. +func NewSubscriptions() *Subscriptions { + return &Subscriptions{ + internal: map[string]packets.Subscription{}, + } +} + +// Add adds a new subscription for a client. ID can be a filter in the +// case this map is client state, or a client id if particle state. +func (s *Subscriptions) Add(id string, val packets.Subscription) { + s.Lock() + defer s.Unlock() + s.internal[id] = val +} + +// GetAll returns all subscriptions. +func (s *Subscriptions) GetAll() map[string]packets.Subscription { + s.RLock() + defer s.RUnlock() + m := map[string]packets.Subscription{} + for k, v := range s.internal { + m[k] = v + } + return m +} + +// Get returns a subscriptions for a specific client or filter id. +func (s *Subscriptions) Get(id string) (val packets.Subscription, ok bool) { + s.RLock() + defer s.RUnlock() + val, ok = s.internal[id] + return val, ok +} + +// Len returns the number of subscriptions. +func (s *Subscriptions) Len() int { + s.RLock() + defer s.RUnlock() + val := len(s.internal) + return val +} + +// Delete removes a subscription by client or filter id. +func (s *Subscriptions) Delete(id string) { + s.Lock() + defer s.Unlock() + delete(s.internal, id) +} + +// ClientSubscriptions is a map of aggregated subscriptions for a client. +type ClientSubscriptions map[string]packets.Subscription + +// Subscribers contains the shared and non-shared subscribers matching a topic. +type Subscribers struct { + Shared map[string]map[string]packets.Subscription + SharedSelected map[string]packets.Subscription + Subscriptions map[string]packets.Subscription +} + +// SelectShared returns one subscriber for each shared subscription group. +func (s *Subscribers) SelectShared() { + s.SharedSelected = map[string]packets.Subscription{} + for _, subs := range s.Shared { + for client, sub := range subs { + cls, ok := s.SharedSelected[client] + if !ok { + cls = sub + } + + s.SharedSelected[client] = cls.Merge(sub) + break + } + } +} + +// MergeSharedSelected merges the selected subscribers for a shared subscription group +// and the non-shared subscribers, to ensure that no subscriber gets multiple messages +// due to have both types of subscription matching the same filter. +func (s *Subscribers) MergeSharedSelected() { + for client, sub := range s.SharedSelected { + cls, ok := s.Subscriptions[client] + if !ok { + cls = sub + } + + s.Subscriptions[client] = cls.Merge(sub) + } +} + +// TopicsIndex is a prefix/trie tree containing topic subscribers and retained messages. +type TopicsIndex struct { + Retained *packets.Packets + root *particle // a leaf containing a message and more leaves. +} + +// NewTopicsIndex returns a pointer to a new instance of Index. +func NewTopicsIndex() *TopicsIndex { + return &TopicsIndex{ + Retained: packets.NewPackets(), + root: &particle{ + particles: newParticles(), + subscriptions: NewSubscriptions(), + }, + } +} + +// Subscribe adds a new subscription for a client to a topic filter, returning +// true if the subscription was new. +func (x *TopicsIndex) Subscribe(client string, subscription packets.Subscription) bool { + x.root.Lock() + defer x.root.Unlock() + + var existed bool + prefix, _ := isolateParticle(subscription.Filter, 0) + if strings.EqualFold(prefix, SharePrefix) { + group, _ := isolateParticle(subscription.Filter, 1) + n := x.set(subscription.Filter, 2) + _, existed = n.shared.Get(group, client) + n.shared.Add(group, client, subscription) + } else { + n := x.set(subscription.Filter, 0) + _, existed = n.subscriptions.Get(client) + n.subscriptions.Add(client, subscription) + } + + return !existed +} + +// Unsubscribe removes a subscription filter for a client, returning true if the +// subscription existed. +func (x *TopicsIndex) Unsubscribe(filter, client string) bool { + x.root.Lock() + defer x.root.Unlock() + + var d int + prefix, _ := isolateParticle(filter, 0) + shareSub := strings.EqualFold(prefix, SharePrefix) + if shareSub { + d = 2 + } + + particle := x.seek(filter, d) + if particle == nil { + return false + } + + if shareSub { + group, _ := isolateParticle(filter, 1) + particle.shared.Delete(group, client) + } else { + particle.subscriptions.Delete(client) + } + + x.trim(particle) + return true +} + +// RetainMessage saves a message payload to the end of a topic address. Returns +// 1 if a retained message was added, and -1 if the retained message was removed. +// 0 is returned if sequential empty payloads are received. +func (x *TopicsIndex) RetainMessage(pk packets.Packet) int64 { + x.root.Lock() + defer x.root.Unlock() + + n := x.set(pk.TopicName, 0) + n.Lock() + defer n.Unlock() + if len(pk.Payload) > 0 { + n.retainPath = pk.TopicName + x.Retained.Add(pk.TopicName, pk) + return 1 + } + + var out int64 + if pke, ok := x.Retained.Get(pk.TopicName); ok && len(pke.Payload) > 0 && pke.FixedHeader.Retain { + out = -1 // if a retained packet existed, return -1 + } + + n.retainPath = "" + x.Retained.Delete(pk.TopicName) // [MQTT-3.3.1-6] [MQTT-3.3.1-7] + x.trim(n) + + return out +} + +// set creates a topic address in the index and returns the final particle. +func (x *TopicsIndex) set(topic string, d int) *particle { + var key string + var hasNext = true + n := x.root + for hasNext { + key, hasNext = isolateParticle(topic, d) + d++ + + p := n.particles.get(key) + if p == nil { + p = newParticle(key, n) + n.particles.add(p) + } + n = p + } + + return n +} + +// seek finds the particle at a specific index in a topic filter. +func (x *TopicsIndex) seek(filter string, d int) *particle { + var key string + var hasNext = true + n := x.root + for hasNext { + key, hasNext = isolateParticle(filter, d) + n = n.particles.get(key) + d++ + if n == nil { + return nil + } + } + + return n +} + +// trim removes empty filter particles from the index. +func (x *TopicsIndex) trim(n *particle) { + for n.parent != nil && n.retainPath == "" && n.particles.len()+n.subscriptions.Len()+n.shared.Len() == 0 { + key := n.key + n = n.parent + n.particles.delete(key) + } +} + +// Messages returns a slice of any retained messages which match a filter. +func (x *TopicsIndex) Messages(filter string) []packets.Packet { + return x.scanMessages(filter, 0, nil, []packets.Packet{}) +} + +// scanMessages returns all retained messages on topics matching a given filter. +func (x *TopicsIndex) scanMessages(filter string, d int, n *particle, pks []packets.Packet) []packets.Packet { + if n == nil { + n = x.root + } + + if len(filter) == 0 || x.Retained.Len() == 0 { + return pks + } + + if !strings.ContainsRune(filter, '#') && !strings.ContainsRune(filter, '+') { + if pk, ok := x.Retained.Get(filter); ok { + pks = append(pks, pk) + } + return pks + } + + key, hasNext := isolateParticle(filter, d) + if key == "+" || key == "#" || d == -1 { + for _, adjacent := range n.particles.getAll() { + if d == 0 && adjacent.key == SysPrefix { + continue + } + + if !hasNext { + if adjacent.retainPath != "" { + if pk, ok := x.Retained.Get(adjacent.retainPath); ok { + pks = append(pks, pk) + } + } + } + + if hasNext || (d >= 0 && key == "#") { + pks = x.scanMessages(filter, d+1, adjacent, pks) + } + } + return pks + } + + if particle := n.particles.get(key); particle != nil { + if hasNext { + return x.scanMessages(filter, d+1, particle, pks) + } + + if pk, ok := x.Retained.Get(particle.retainPath); ok { + pks = append(pks, pk) + } + } + + return pks +} + +// Subscribers returns a map of clients who are subscribed to matching filters, +// their subscription ids and highest qos. +func (x *TopicsIndex) Subscribers(topic string) *Subscribers { + return x.scanSubscribers(topic, 0, nil, &Subscribers{ + Shared: map[string]map[string]packets.Subscription{}, + SharedSelected: map[string]packets.Subscription{}, + Subscriptions: map[string]packets.Subscription{}, + }) +} + +// scanSubscribers returns a list of client subscriptions matching an indexed topic address. +func (x *TopicsIndex) scanSubscribers(topic string, d int, n *particle, subs *Subscribers) *Subscribers { + if n == nil { + n = x.root + } + + if len(topic) == 0 { + return subs + } + + key, hasNext := isolateParticle(topic, d) + for _, partKey := range []string{key, "+"} { + if particle := n.particles.get(partKey); particle != nil { // [MQTT-3.3.2-3] + if hasNext { + x.scanSubscribers(topic, d+1, particle, subs) + } else { + x.gatherSubscriptions(topic, particle, subs) + x.gatherSharedSubscriptions(particle, subs) + + if wild := particle.particles.get("#"); wild != nil && partKey != "+" { + x.gatherSubscriptions(topic, wild, subs) // also match any subs where filter/# is filter as per 4.7.1.2 + x.gatherSharedSubscriptions(wild, subs) + } + } + } + } + + if particle := n.particles.get("#"); particle != nil { + x.gatherSubscriptions(topic, particle, subs) + x.gatherSharedSubscriptions(particle, subs) + } + + return subs +} + +// gatherSubscriptions collects any matching subscriptions, and gathers any identifiers or highest qos values. +func (x *TopicsIndex) gatherSubscriptions(topic string, particle *particle, subs *Subscribers) { + if subs.Subscriptions == nil { + subs.Subscriptions = map[string]packets.Subscription{} + } + + for client, sub := range particle.subscriptions.GetAll() { + if len(sub.Filter) > 0 && topic[0] == '$' && (sub.Filter[0] == '+' || sub.Filter[0] == '#') { // don't match $ topics with top level wildcards [MQTT-4.7.1-1] [MQTT-4.7.1-2] + continue + } + + cls, ok := subs.Subscriptions[client] + if !ok { + cls = sub + } + + subs.Subscriptions[client] = cls.Merge(sub) + } +} + +// gatherSharedSubscriptions gathers all shared subscriptions for a particle. +func (x *TopicsIndex) gatherSharedSubscriptions(particle *particle, subs *Subscribers) { + if subs.Shared == nil { + subs.Shared = map[string]map[string]packets.Subscription{} + } + + for _, shares := range particle.shared.GetAll() { + for client, sub := range shares { + if _, ok := subs.Shared[sub.Filter]; !ok { + subs.Shared[sub.Filter] = map[string]packets.Subscription{} + } + + subs.Shared[sub.Filter][client] = sub + } + } +} + +// isolateParticle extracts a particle between d / and d+1 / without allocations. +func isolateParticle(filter string, d int) (particle string, hasNext bool) { + var next, end int + for i := 0; end > -1 && i <= d; i++ { + end = strings.IndexRune(filter, '/') + + switch { + case d > -1 && i == d && end > -1: + hasNext = true + particle = filter[next:end] + case end > -1: + hasNext = false + filter = filter[end+1:] + default: + hasNext = false + particle = filter[next:] + } + } + + return +} + +// IsSharedFilter returns true if the filter uses the share prefix. +func IsSharedFilter(filter string) bool { + prefix, _ := isolateParticle(filter, 0) + return strings.EqualFold(prefix, SharePrefix) +} + +// IsValidFilter returns true if the filter is valid. +func IsValidFilter(filter string, forPublish bool) bool { + if !forPublish && len(filter) == 0 { // publishing can accept zero-length topic filter if topic alias exists, so we don't enforce for publihs. + return false // [MQTT-4.7.3-1] + } + + if forPublish { + if len(filter) >= len(SysPrefix) && strings.EqualFold(filter[0:len(SysPrefix)], SysPrefix) { + // 4.7.2 Non-normative - The Server SHOULD prevent Clients from using such Topic Names [$SYS] to exchange messages with other Clients. + return false + } + + if strings.ContainsRune(filter, '+') || strings.ContainsRune(filter, '#') { + return false //[MQTT-3.3.2-2] + } + } + + wildhash := strings.IndexRune(filter, '#') + if wildhash >= 0 && wildhash != len(filter)-1 { // [MQTT-4.7.1-2] + return false + } + + prefix, hasNext := isolateParticle(filter, 0) + if !hasNext && strings.EqualFold(prefix, SharePrefix) { + return false // [MQTT-4.8.2-1] + } + + if hasNext && strings.EqualFold(prefix, SharePrefix) { + group, hasNext := isolateParticle(filter, 1) + if !hasNext { + return false // [MQTT-4.8.2-1] + } + + if strings.ContainsRune(group, '+') || strings.ContainsRune(group, '#') { + return false // [MQTT-4.8.2-2] + } + } + + return true +} + +// particle is a child node on the tree. +type particle struct { + key string // the key of the particle + parent *particle // a pointer to the parent of the particle + particles particles // a map of child particles + subscriptions *Subscriptions // a map of subscriptions made by clients to this ending address + shared *SharedSubscriptions // a map of shared subscriptions keyed on group name + retainPath string // path of a retained message + sync.Mutex // mutex for when making changes to the particle +} + +// newParticle returns a pointer to a new instance of particle. +func newParticle(key string, parent *particle) *particle { + return &particle{ + key: key, + parent: parent, + particles: newParticles(), + subscriptions: NewSubscriptions(), + shared: NewSharedSubscriptions(), + } +} + +// particles is a concurrency safe map of particles. +type particles struct { + internal map[string]*particle + sync.RWMutex +} + +// newParticles returns a map of particles. +func newParticles() particles { + return particles{ + internal: map[string]*particle{}, + } +} + +// add adds a new particle. +func (p *particles) add(val *particle) { + p.Lock() + p.internal[val.key] = val + p.Unlock() +} + +// getAll returns all particles. +func (p *particles) getAll() map[string]*particle { + p.RLock() + defer p.RUnlock() + m := map[string]*particle{} + for k, v := range p.internal { + m[k] = v + } + return m +} + +// get returns a particle by id (key). +func (p *particles) get(id string) *particle { + p.RLock() + defer p.RUnlock() + return p.internal[id] +} + +// len returns the number of particles. +func (p *particles) len() int { + p.RLock() + defer p.RUnlock() + val := len(p.internal) + return val +} + +// delete removes a particle. +func (p *particles) delete(id string) { + p.Lock() + defer p.Unlock() + delete(p.internal, id) +} diff --git a/vendor/github.com/rs/xid/.appveyor.yml b/vendor/github.com/rs/xid/.appveyor.yml new file mode 100644 index 000000000..c73bb33ba --- /dev/null +++ b/vendor/github.com/rs/xid/.appveyor.yml @@ -0,0 +1,27 @@ +version: 1.0.0.{build} + +platform: x64 + +branches: + only: + - master + +clone_folder: c:\gopath\src\github.com\rs\xid + +environment: + GOPATH: c:\gopath + +install: + - echo %PATH% + - echo %GOPATH% + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version + - go env + - go get -t . + +build_script: + - go build + +test_script: + - go test + diff --git a/vendor/github.com/rs/xid/.travis.yml b/vendor/github.com/rs/xid/.travis.yml new file mode 100644 index 000000000..b37da1594 --- /dev/null +++ b/vendor/github.com/rs/xid/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: +- "1.9" +- "1.10" +- "master" +matrix: + allow_failures: + - go: "master" diff --git a/vendor/github.com/rs/xid/LICENSE b/vendor/github.com/rs/xid/LICENSE new file mode 100644 index 000000000..47c5e9d2d --- /dev/null +++ b/vendor/github.com/rs/xid/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2015 Olivier Poitrey + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is furnished +to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/rs/xid/README.md b/vendor/github.com/rs/xid/README.md new file mode 100644 index 000000000..5bf462e83 --- /dev/null +++ b/vendor/github.com/rs/xid/README.md @@ -0,0 +1,116 @@ +# Globally Unique ID Generator + +[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/xid) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/xid/master/LICENSE) [![Build Status](https://travis-ci.org/rs/xid.svg?branch=master)](https://travis-ci.org/rs/xid) [![Coverage](http://gocover.io/_badge/github.com/rs/xid)](http://gocover.io/github.com/rs/xid) + +Package xid is a globally unique id generator library, ready to safely be used directly in your server code. + +Xid uses the Mongo Object ID algorithm to generate globally unique ids with a different serialization (base64) to make it shorter when transported as a string: +https://docs.mongodb.org/manual/reference/object-id/ + +- 4-byte value representing the seconds since the Unix epoch, +- 3-byte machine identifier, +- 2-byte process id, and +- 3-byte counter, starting with a random value. + +The binary representation of the id is compatible with Mongo 12 bytes Object IDs. +The string representation is using base32 hex (w/o padding) for better space efficiency +when stored in that form (20 bytes). The hex variant of base32 is used to retain the +sortable property of the id. + +Xid doesn't use base64 because case sensitivity and the 2 non alphanum chars may be an +issue when transported as a string between various systems. Base36 wasn't retained either +because 1/ it's not standard 2/ the resulting size is not predictable (not bit aligned) +and 3/ it would not remain sortable. To validate a base32 `xid`, expect a 20 chars long, +all lowercase sequence of `a` to `v` letters and `0` to `9` numbers (`[0-9a-v]{20}`). + +UUIDs are 16 bytes (128 bits) and 36 chars as string representation. Twitter Snowflake +ids are 8 bytes (64 bits) but require machine/data-center configuration and/or central +generator servers. xid stands in between with 12 bytes (96 bits) and a more compact +URL-safe string representation (20 chars). No configuration or central generator server +is required so it can be used directly in server's code. + +| Name | Binary Size | String Size | Features +|-------------|-------------|----------------|---------------- +| [UUID] | 16 bytes | 36 chars | configuration free, not sortable +| [shortuuid] | 16 bytes | 22 chars | configuration free, not sortable +| [Snowflake] | 8 bytes | up to 20 chars | needs machine/DC configuration, needs central server, sortable +| [MongoID] | 12 bytes | 24 chars | configuration free, sortable +| xid | 12 bytes | 20 chars | configuration free, sortable + +[UUID]: https://en.wikipedia.org/wiki/Universally_unique_identifier +[shortuuid]: https://github.com/stochastic-technologies/shortuuid +[Snowflake]: https://blog.twitter.com/2010/announcing-snowflake +[MongoID]: https://docs.mongodb.org/manual/reference/object-id/ + +Features: + +- Size: 12 bytes (96 bits), smaller than UUID, larger than snowflake +- Base32 hex encoded by default (20 chars when transported as printable string, still sortable) +- Non configured, you don't need set a unique machine and/or data center id +- K-ordered +- Embedded time with 1 second precision +- Unicity guaranteed for 16,777,216 (24 bits) unique ids per second and per host/process +- Lock-free (i.e.: unlike UUIDv1 and v2) + +Best used with [zerolog](https://github.com/rs/zerolog)'s +[RequestIDHandler](https://godoc.org/github.com/rs/zerolog/hlog#RequestIDHandler). + +Notes: + +- Xid is dependent on the system time, a monotonic counter and so is not cryptographically secure. If unpredictability of IDs is important, you should not use Xids. It is worth noting that most other UUID-like implementations are also not cryptographically secure. You should use libraries that rely on cryptographically secure sources (like /dev/urandom on unix, crypto/rand in golang), if you want a truly random ID generator. + +References: + +- http://www.slideshare.net/davegardnerisme/unique-id-generation-in-distributed-systems +- https://en.wikipedia.org/wiki/Universally_unique_identifier +- https://blog.twitter.com/2010/announcing-snowflake +- Python port by [Graham Abbott](https://github.com/graham): https://github.com/graham/python_xid +- Scala port by [Egor Kolotaev](https://github.com/kolotaev): https://github.com/kolotaev/ride +- Rust port by [Jérôme Renard](https://github.com/jeromer/): https://github.com/jeromer/libxid +- Ruby port by [Valar](https://github.com/valarpirai/): https://github.com/valarpirai/ruby_xid +- Java port by [0xShamil](https://github.com/0xShamil/): https://github.com/0xShamil/java-xid +- Dart port by [Peter Bwire](https://github.com/pitabwire): https://pub.dev/packages/xid + +## Install + + go get github.com/rs/xid + +## Usage + +```go +guid := xid.New() + +println(guid.String()) +// Output: 9m4e2mr0ui3e8a215n4g +``` + +Get `xid` embedded info: + +```go +guid.Machine() +guid.Pid() +guid.Time() +guid.Counter() +``` + +## Benchmark + +Benchmark against Go [Maxim Bublis](https://github.com/satori)'s [UUID](https://github.com/satori/go.uuid). + +``` +BenchmarkXID 20000000 91.1 ns/op 32 B/op 1 allocs/op +BenchmarkXID-2 20000000 55.9 ns/op 32 B/op 1 allocs/op +BenchmarkXID-4 50000000 32.3 ns/op 32 B/op 1 allocs/op +BenchmarkUUIDv1 10000000 204 ns/op 48 B/op 1 allocs/op +BenchmarkUUIDv1-2 10000000 160 ns/op 48 B/op 1 allocs/op +BenchmarkUUIDv1-4 10000000 195 ns/op 48 B/op 1 allocs/op +BenchmarkUUIDv4 1000000 1503 ns/op 64 B/op 2 allocs/op +BenchmarkUUIDv4-2 1000000 1427 ns/op 64 B/op 2 allocs/op +BenchmarkUUIDv4-4 1000000 1452 ns/op 64 B/op 2 allocs/op +``` + +Note: UUIDv1 requires a global lock, hence the performance degradation as we add more CPUs. + +## Licenses + +All source code is licensed under the [MIT License](https://raw.github.com/rs/xid/master/LICENSE). diff --git a/vendor/github.com/rs/xid/error.go b/vendor/github.com/rs/xid/error.go new file mode 100644 index 000000000..ea2537493 --- /dev/null +++ b/vendor/github.com/rs/xid/error.go @@ -0,0 +1,11 @@ +package xid + +const ( + // ErrInvalidID is returned when trying to unmarshal an invalid ID. + ErrInvalidID strErr = "xid: invalid ID" +) + +// strErr allows declaring errors as constants. +type strErr string + +func (err strErr) Error() string { return string(err) } diff --git a/vendor/github.com/rs/xid/hostid_darwin.go b/vendor/github.com/rs/xid/hostid_darwin.go new file mode 100644 index 000000000..08351ff72 --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_darwin.go @@ -0,0 +1,9 @@ +// +build darwin + +package xid + +import "syscall" + +func readPlatformMachineID() (string, error) { + return syscall.Sysctl("kern.uuid") +} diff --git a/vendor/github.com/rs/xid/hostid_fallback.go b/vendor/github.com/rs/xid/hostid_fallback.go new file mode 100644 index 000000000..7fbd3c004 --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_fallback.go @@ -0,0 +1,9 @@ +// +build !darwin,!linux,!freebsd,!windows + +package xid + +import "errors" + +func readPlatformMachineID() (string, error) { + return "", errors.New("not implemented") +} diff --git a/vendor/github.com/rs/xid/hostid_freebsd.go b/vendor/github.com/rs/xid/hostid_freebsd.go new file mode 100644 index 000000000..be25a039e --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_freebsd.go @@ -0,0 +1,9 @@ +// +build freebsd + +package xid + +import "syscall" + +func readPlatformMachineID() (string, error) { + return syscall.Sysctl("kern.hostuuid") +} diff --git a/vendor/github.com/rs/xid/hostid_linux.go b/vendor/github.com/rs/xid/hostid_linux.go new file mode 100644 index 000000000..837b20436 --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_linux.go @@ -0,0 +1,13 @@ +// +build linux + +package xid + +import "io/ioutil" + +func readPlatformMachineID() (string, error) { + b, err := ioutil.ReadFile("/etc/machine-id") + if err != nil || len(b) == 0 { + b, err = ioutil.ReadFile("/sys/class/dmi/id/product_uuid") + } + return string(b), err +} diff --git a/vendor/github.com/rs/xid/hostid_windows.go b/vendor/github.com/rs/xid/hostid_windows.go new file mode 100644 index 000000000..ec2593ee3 --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_windows.go @@ -0,0 +1,38 @@ +// +build windows + +package xid + +import ( + "fmt" + "syscall" + "unsafe" +) + +func readPlatformMachineID() (string, error) { + // source: https://github.com/shirou/gopsutil/blob/master/host/host_syscall.go + var h syscall.Handle + err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, syscall.StringToUTF16Ptr(`SOFTWARE\Microsoft\Cryptography`), 0, syscall.KEY_READ|syscall.KEY_WOW64_64KEY, &h) + if err != nil { + return "", err + } + defer syscall.RegCloseKey(h) + + const syscallRegBufLen = 74 // len(`{`) + len(`abcdefgh-1234-456789012-123345456671` * 2) + len(`}`) // 2 == bytes/UTF16 + const uuidLen = 36 + + var regBuf [syscallRegBufLen]uint16 + bufLen := uint32(syscallRegBufLen) + var valType uint32 + err = syscall.RegQueryValueEx(h, syscall.StringToUTF16Ptr(`MachineGuid`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen) + if err != nil { + return "", err + } + + hostID := syscall.UTF16ToString(regBuf[:]) + hostIDLen := len(hostID) + if hostIDLen != uuidLen { + return "", fmt.Errorf("HostID incorrect: %q\n", hostID) + } + + return hostID, nil +} diff --git a/vendor/github.com/rs/xid/id.go b/vendor/github.com/rs/xid/id.go new file mode 100644 index 000000000..1f536b415 --- /dev/null +++ b/vendor/github.com/rs/xid/id.go @@ -0,0 +1,392 @@ +// Package xid is a globally unique id generator suited for web scale +// +// Xid is using Mongo Object ID algorithm to generate globally unique ids: +// https://docs.mongodb.org/manual/reference/object-id/ +// +// - 4-byte value representing the seconds since the Unix epoch, +// - 3-byte machine identifier, +// - 2-byte process id, and +// - 3-byte counter, starting with a random value. +// +// The binary representation of the id is compatible with Mongo 12 bytes Object IDs. +// The string representation is using base32 hex (w/o padding) for better space efficiency +// when stored in that form (20 bytes). The hex variant of base32 is used to retain the +// sortable property of the id. +// +// Xid doesn't use base64 because case sensitivity and the 2 non alphanum chars may be an +// issue when transported as a string between various systems. Base36 wasn't retained either +// because 1/ it's not standard 2/ the resulting size is not predictable (not bit aligned) +// and 3/ it would not remain sortable. To validate a base32 `xid`, expect a 20 chars long, +// all lowercase sequence of `a` to `v` letters and `0` to `9` numbers (`[0-9a-v]{20}`). +// +// UUID is 16 bytes (128 bits), snowflake is 8 bytes (64 bits), xid stands in between +// with 12 bytes with a more compact string representation ready for the web and no +// required configuration or central generation server. +// +// Features: +// +// - Size: 12 bytes (96 bits), smaller than UUID, larger than snowflake +// - Base32 hex encoded by default (16 bytes storage when transported as printable string) +// - Non configured, you don't need set a unique machine and/or data center id +// - K-ordered +// - Embedded time with 1 second precision +// - Unicity guaranteed for 16,777,216 (24 bits) unique ids per second and per host/process +// +// Best used with xlog's RequestIDHandler (https://godoc.org/github.com/rs/xlog#RequestIDHandler). +// +// References: +// +// - http://www.slideshare.net/davegardnerisme/unique-id-generation-in-distributed-systems +// - https://en.wikipedia.org/wiki/Universally_unique_identifier +// - https://blog.twitter.com/2010/announcing-snowflake +package xid + +import ( + "bytes" + "crypto/md5" + "crypto/rand" + "database/sql/driver" + "encoding/binary" + "fmt" + "hash/crc32" + "io/ioutil" + "os" + "sort" + "sync/atomic" + "time" + "unsafe" +) + +// Code inspired from mgo/bson ObjectId + +// ID represents a unique request id +type ID [rawLen]byte + +const ( + encodedLen = 20 // string encoded len + rawLen = 12 // binary raw len + + // encoding stores a custom version of the base32 encoding with lower case + // letters. + encoding = "0123456789abcdefghijklmnopqrstuv" +) + +var ( + // objectIDCounter is atomically incremented when generating a new ObjectId + // using NewObjectId() function. It's used as a counter part of an id. + // This id is initialized with a random value. + objectIDCounter = randInt() + + // machineId stores machine id generated once and used in subsequent calls + // to NewObjectId function. + machineID = readMachineID() + + // pid stores the current process id + pid = os.Getpid() + + nilID ID + + // dec is the decoding map for base32 encoding + dec [256]byte +) + +func init() { + for i := 0; i < len(dec); i++ { + dec[i] = 0xFF + } + for i := 0; i < len(encoding); i++ { + dec[encoding[i]] = byte(i) + } + + // If /proc/self/cpuset exists and is not /, we can assume that we are in a + // form of container and use the content of cpuset xor-ed with the PID in + // order get a reasonable machine global unique PID. + b, err := ioutil.ReadFile("/proc/self/cpuset") + if err == nil && len(b) > 1 { + pid ^= int(crc32.ChecksumIEEE(b)) + } +} + +// readMachineId generates machine id and puts it into the machineId global +// variable. If this function fails to get the hostname, it will cause +// a runtime error. +func readMachineID() []byte { + id := make([]byte, 3) + hid, err := readPlatformMachineID() + if err != nil || len(hid) == 0 { + hid, err = os.Hostname() + } + if err == nil && len(hid) != 0 { + hw := md5.New() + hw.Write([]byte(hid)) + copy(id, hw.Sum(nil)) + } else { + // Fallback to rand number if machine id can't be gathered + if _, randErr := rand.Reader.Read(id); randErr != nil { + panic(fmt.Errorf("xid: cannot get hostname nor generate a random number: %v; %v", err, randErr)) + } + } + return id +} + +// randInt generates a random uint32 +func randInt() uint32 { + b := make([]byte, 3) + if _, err := rand.Reader.Read(b); err != nil { + panic(fmt.Errorf("xid: cannot generate random number: %v;", err)) + } + return uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]) +} + +// New generates a globally unique ID +func New() ID { + return NewWithTime(time.Now()) +} + +// NewWithTime generates a globally unique ID with the passed in time +func NewWithTime(t time.Time) ID { + var id ID + // Timestamp, 4 bytes, big endian + binary.BigEndian.PutUint32(id[:], uint32(t.Unix())) + // Machine, first 3 bytes of md5(hostname) + id[4] = machineID[0] + id[5] = machineID[1] + id[6] = machineID[2] + // Pid, 2 bytes, specs don't specify endianness, but we use big endian. + id[7] = byte(pid >> 8) + id[8] = byte(pid) + // Increment, 3 bytes, big endian + i := atomic.AddUint32(&objectIDCounter, 1) + id[9] = byte(i >> 16) + id[10] = byte(i >> 8) + id[11] = byte(i) + return id +} + +// FromString reads an ID from its string representation +func FromString(id string) (ID, error) { + i := &ID{} + err := i.UnmarshalText([]byte(id)) + return *i, err +} + +// String returns a base32 hex lowercased with no padding representation of the id (char set is 0-9, a-v). +func (id ID) String() string { + text := make([]byte, encodedLen) + encode(text, id[:]) + return *(*string)(unsafe.Pointer(&text)) +} + +// Encode encodes the id using base32 encoding, writing 20 bytes to dst and return it. +func (id ID) Encode(dst []byte) []byte { + encode(dst, id[:]) + return dst +} + +// MarshalText implements encoding/text TextMarshaler interface +func (id ID) MarshalText() ([]byte, error) { + text := make([]byte, encodedLen) + encode(text, id[:]) + return text, nil +} + +// MarshalJSON implements encoding/json Marshaler interface +func (id ID) MarshalJSON() ([]byte, error) { + if id.IsNil() { + return []byte("null"), nil + } + text := make([]byte, encodedLen+2) + encode(text[1:encodedLen+1], id[:]) + text[0], text[encodedLen+1] = '"', '"' + return text, nil +} + +// encode by unrolling the stdlib base32 algorithm + removing all safe checks +func encode(dst, id []byte) { + _ = dst[19] + _ = id[11] + + dst[19] = encoding[(id[11]<<4)&0x1F] + dst[18] = encoding[(id[11]>>1)&0x1F] + dst[17] = encoding[(id[11]>>6)&0x1F|(id[10]<<2)&0x1F] + dst[16] = encoding[id[10]>>3] + dst[15] = encoding[id[9]&0x1F] + dst[14] = encoding[(id[9]>>5)|(id[8]<<3)&0x1F] + dst[13] = encoding[(id[8]>>2)&0x1F] + dst[12] = encoding[id[8]>>7|(id[7]<<1)&0x1F] + dst[11] = encoding[(id[7]>>4)&0x1F|(id[6]<<4)&0x1F] + dst[10] = encoding[(id[6]>>1)&0x1F] + dst[9] = encoding[(id[6]>>6)&0x1F|(id[5]<<2)&0x1F] + dst[8] = encoding[id[5]>>3] + dst[7] = encoding[id[4]&0x1F] + dst[6] = encoding[id[4]>>5|(id[3]<<3)&0x1F] + dst[5] = encoding[(id[3]>>2)&0x1F] + dst[4] = encoding[id[3]>>7|(id[2]<<1)&0x1F] + dst[3] = encoding[(id[2]>>4)&0x1F|(id[1]<<4)&0x1F] + dst[2] = encoding[(id[1]>>1)&0x1F] + dst[1] = encoding[(id[1]>>6)&0x1F|(id[0]<<2)&0x1F] + dst[0] = encoding[id[0]>>3] +} + +// UnmarshalText implements encoding/text TextUnmarshaler interface +func (id *ID) UnmarshalText(text []byte) error { + if len(text) != encodedLen { + return ErrInvalidID + } + for _, c := range text { + if dec[c] == 0xFF { + return ErrInvalidID + } + } + if !decode(id, text) { + return ErrInvalidID + } + return nil +} + +// UnmarshalJSON implements encoding/json Unmarshaler interface +func (id *ID) UnmarshalJSON(b []byte) error { + s := string(b) + if s == "null" { + *id = nilID + return nil + } + // Check the slice length to prevent panic on passing it to UnmarshalText() + if len(b) < 2 { + return ErrInvalidID + } + return id.UnmarshalText(b[1 : len(b)-1]) +} + +// decode by unrolling the stdlib base32 algorithm + customized safe check. +func decode(id *ID, src []byte) bool { + _ = src[19] + _ = id[11] + + id[11] = dec[src[17]]<<6 | dec[src[18]]<<1 | dec[src[19]]>>4 + id[10] = dec[src[16]]<<3 | dec[src[17]]>>2 + id[9] = dec[src[14]]<<5 | dec[src[15]] + id[8] = dec[src[12]]<<7 | dec[src[13]]<<2 | dec[src[14]]>>3 + id[7] = dec[src[11]]<<4 | dec[src[12]]>>1 + id[6] = dec[src[9]]<<6 | dec[src[10]]<<1 | dec[src[11]]>>4 + id[5] = dec[src[8]]<<3 | dec[src[9]]>>2 + id[4] = dec[src[6]]<<5 | dec[src[7]] + id[3] = dec[src[4]]<<7 | dec[src[5]]<<2 | dec[src[6]]>>3 + id[2] = dec[src[3]]<<4 | dec[src[4]]>>1 + id[1] = dec[src[1]]<<6 | dec[src[2]]<<1 | dec[src[3]]>>4 + id[0] = dec[src[0]]<<3 | dec[src[1]]>>2 + + // Validate that there are no discarer bits (padding) in src that would + // cause the string-encoded id not to equal src. + var check [4]byte + + check[3] = encoding[(id[11]<<4)&0x1F] + check[2] = encoding[(id[11]>>1)&0x1F] + check[1] = encoding[(id[11]>>6)&0x1F|(id[10]<<2)&0x1F] + check[0] = encoding[id[10]>>3] + return bytes.Equal([]byte(src[16:20]), check[:]) +} + +// Time returns the timestamp part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Time() time.Time { + // First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch. + secs := int64(binary.BigEndian.Uint32(id[0:4])) + return time.Unix(secs, 0) +} + +// Machine returns the 3-byte machine id part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Machine() []byte { + return id[4:7] +} + +// Pid returns the process id part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Pid() uint16 { + return binary.BigEndian.Uint16(id[7:9]) +} + +// Counter returns the incrementing value part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Counter() int32 { + b := id[9:12] + // Counter is stored as big-endian 3-byte value + return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2])) +} + +// Value implements the driver.Valuer interface. +func (id ID) Value() (driver.Value, error) { + if id.IsNil() { + return nil, nil + } + b, err := id.MarshalText() + return string(b), err +} + +// Scan implements the sql.Scanner interface. +func (id *ID) Scan(value interface{}) (err error) { + switch val := value.(type) { + case string: + return id.UnmarshalText([]byte(val)) + case []byte: + return id.UnmarshalText(val) + case nil: + *id = nilID + return nil + default: + return fmt.Errorf("xid: scanning unsupported type: %T", value) + } +} + +// IsNil Returns true if this is a "nil" ID +func (id ID) IsNil() bool { + return id == nilID +} + +// NilID returns a zero value for `xid.ID`. +func NilID() ID { + return nilID +} + +// Bytes returns the byte array representation of `ID` +func (id ID) Bytes() []byte { + return id[:] +} + +// FromBytes convert the byte array representation of `ID` back to `ID` +func FromBytes(b []byte) (ID, error) { + var id ID + if len(b) != rawLen { + return id, ErrInvalidID + } + copy(id[:], b) + return id, nil +} + +// Compare returns an integer comparing two IDs. It behaves just like `bytes.Compare`. +// The result will be 0 if two IDs are identical, -1 if current id is less than the other one, +// and 1 if current id is greater than the other. +func (id ID) Compare(other ID) int { + return bytes.Compare(id[:], other[:]) +} + +type sorter []ID + +func (s sorter) Len() int { + return len(s) +} + +func (s sorter) Less(i, j int) bool { + return s[i].Compare(s[j]) < 0 +} + +func (s sorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Sort sorts an array of IDs inplace. +// It works by wrapping `[]ID` and use `sort.Sort`. +func Sort(ids []ID) { + sort.Sort(sorter(ids)) +} diff --git a/vendor/github.com/rs/zerolog/.gitignore b/vendor/github.com/rs/zerolog/.gitignore new file mode 100644 index 000000000..8ebe58b15 --- /dev/null +++ b/vendor/github.com/rs/zerolog/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +tmp + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/rs/zerolog/CNAME b/vendor/github.com/rs/zerolog/CNAME new file mode 100644 index 000000000..9ce57a6eb --- /dev/null +++ b/vendor/github.com/rs/zerolog/CNAME @@ -0,0 +1 @@ +zerolog.io \ No newline at end of file diff --git a/vendor/github.com/rs/zerolog/LICENSE b/vendor/github.com/rs/zerolog/LICENSE new file mode 100644 index 000000000..677e07f7a --- /dev/null +++ b/vendor/github.com/rs/zerolog/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Olivier Poitrey + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/rs/zerolog/README.md b/vendor/github.com/rs/zerolog/README.md new file mode 100644 index 000000000..95666b32e --- /dev/null +++ b/vendor/github.com/rs/zerolog/README.md @@ -0,0 +1,716 @@ +# Zero Allocation JSON Logger + +[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/zerolog) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/zerolog/master/LICENSE) [![Build Status](https://travis-ci.org/rs/zerolog.svg?branch=master)](https://travis-ci.org/rs/zerolog) [![Coverage](http://gocover.io/_badge/github.com/rs/zerolog)](http://gocover.io/github.com/rs/zerolog) + +The zerolog package provides a fast and simple logger dedicated to JSON output. + +Zerolog's API is designed to provide both a great developer experience and stunning [performance](#benchmarks). Its unique chaining API allows zerolog to write JSON (or CBOR) log events by avoiding allocations and reflection. + +Uber's [zap](https://godoc.org/go.uber.org/zap) library pioneered this approach. Zerolog is taking this concept to the next level with a simpler to use API and even better performance. + +To keep the code base and the API simple, zerolog focuses on efficient structured logging only. Pretty logging on the console is made possible using the provided (but inefficient) [`zerolog.ConsoleWriter`](#pretty-logging). + +![Pretty Logging Image](pretty.png) + +## Who uses zerolog + +Find out [who uses zerolog](https://github.com/rs/zerolog/wiki/Who-uses-zerolog) and add your company / project to the list. + +## Features + +* [Blazing fast](#benchmarks) +* [Low to zero allocation](#benchmarks) +* [Leveled logging](#leveled-logging) +* [Sampling](#log-sampling) +* [Hooks](#hooks) +* [Contextual fields](#contextual-logging) +* `context.Context` integration +* [Integration with `net/http`](#integration-with-nethttp) +* [JSON and CBOR encoding formats](#binary-encoding) +* [Pretty logging for development](#pretty-logging) +* [Error Logging (with optional Stacktrace)](#error-logging) + +## Installation + +```bash +go get -u github.com/rs/zerolog/log +``` + +## Getting Started + +### Simple Logging Example + +For simple logging, import the global logger package **github.com/rs/zerolog/log** + +```go +package main + +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + // UNIX Time is faster and smaller than most timestamps + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + + log.Print("hello world") +} + +// Output: {"time":1516134303,"level":"debug","message":"hello world"} +``` +> Note: By default log writes to `os.Stderr` +> Note: The default log level for `log.Print` is *debug* + +### Contextual Logging + +**zerolog** allows data to be added to log messages in the form of key:value pairs. The data added to the message adds "context" about the log event that can be critical for debugging as well as myriad other purposes. An example of this is below: + +```go +package main + +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + + log.Debug(). + Str("Scale", "833 cents"). + Float64("Interval", 833.09). + Msg("Fibonacci is everywhere") + + log.Debug(). + Str("Name", "Tom"). + Send() +} + +// Output: {"level":"debug","Scale":"833 cents","Interval":833.09,"time":1562212768,"message":"Fibonacci is everywhere"} +// Output: {"level":"debug","Name":"Tom","time":1562212768} +``` + +> You'll note in the above example that when adding contextual fields, the fields are strongly typed. You can find the full list of supported fields [here](#standard-types) + +### Leveled Logging + +#### Simple Leveled Logging Example + +```go +package main + +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + + log.Info().Msg("hello world") +} + +// Output: {"time":1516134303,"level":"info","message":"hello world"} +``` + +> It is very important to note that when using the **zerolog** chaining API, as shown above (`log.Info().Msg("hello world"`), the chain must have either the `Msg` or `Msgf` method call. If you forget to add either of these, the log will not occur and there is no compile time error to alert you of this. + +**zerolog** allows for logging at the following levels (from highest to lowest): + +* panic (`zerolog.PanicLevel`, 5) +* fatal (`zerolog.FatalLevel`, 4) +* error (`zerolog.ErrorLevel`, 3) +* warn (`zerolog.WarnLevel`, 2) +* info (`zerolog.InfoLevel`, 1) +* debug (`zerolog.DebugLevel`, 0) +* trace (`zerolog.TraceLevel`, -1) + +You can set the Global logging level to any of these options using the `SetGlobalLevel` function in the zerolog package, passing in one of the given constants above, e.g. `zerolog.InfoLevel` would be the "info" level. Whichever level is chosen, all logs with a level greater than or equal to that level will be written. To turn off logging entirely, pass the `zerolog.Disabled` constant. + +#### Setting Global Log Level + +This example uses command-line flags to demonstrate various outputs depending on the chosen log level. + +```go +package main + +import ( + "flag" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + debug := flag.Bool("debug", false, "sets log level to debug") + + flag.Parse() + + // Default level for this example is info, unless debug flag is present + zerolog.SetGlobalLevel(zerolog.InfoLevel) + if *debug { + zerolog.SetGlobalLevel(zerolog.DebugLevel) + } + + log.Debug().Msg("This message appears only when log level set to Debug") + log.Info().Msg("This message appears when log level set to Debug or Info") + + if e := log.Debug(); e.Enabled() { + // Compute log output only if enabled. + value := "bar" + e.Str("foo", value).Msg("some debug message") + } +} +``` + +Info Output (no flag) + +```bash +$ ./logLevelExample +{"time":1516387492,"level":"info","message":"This message appears when log level set to Debug or Info"} +``` + +Debug Output (debug flag set) + +```bash +$ ./logLevelExample -debug +{"time":1516387573,"level":"debug","message":"This message appears only when log level set to Debug"} +{"time":1516387573,"level":"info","message":"This message appears when log level set to Debug or Info"} +{"time":1516387573,"level":"debug","foo":"bar","message":"some debug message"} +``` + +#### Logging without Level or Message + +You may choose to log without a specific level by using the `Log` method. You may also write without a message by setting an empty string in the `msg string` parameter of the `Msg` method. Both are demonstrated in the example below. + +```go +package main + +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + + log.Log(). + Str("foo", "bar"). + Msg("") +} + +// Output: {"time":1494567715,"foo":"bar"} +``` + +### Error Logging + +You can log errors using the `Err` method + +```go +package main + +import ( + "errors" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + + err := errors.New("seems we have an error here") + log.Error().Err(err).Msg("") +} + +// Output: {"level":"error","error":"seems we have an error here","time":1609085256} +``` + +> The default field name for errors is `error`, you can change this by setting `zerolog.ErrorFieldName` to meet your needs. + +#### Error Logging with Stacktrace + +Using `github.com/pkg/errors`, you can add a formatted stacktrace to your errors. + +```go +package main + +import ( + "github.com/pkg/errors" + "github.com/rs/zerolog/pkgerrors" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack + + err := outer() + log.Error().Stack().Err(err).Msg("") +} + +func inner() error { + return errors.New("seems we have an error here") +} + +func middle() error { + err := inner() + if err != nil { + return err + } + return nil +} + +func outer() error { + err := middle() + if err != nil { + return err + } + return nil +} + +// Output: {"level":"error","stack":[{"func":"inner","line":"20","source":"errors.go"},{"func":"middle","line":"24","source":"errors.go"},{"func":"outer","line":"32","source":"errors.go"},{"func":"main","line":"15","source":"errors.go"},{"func":"main","line":"204","source":"proc.go"},{"func":"goexit","line":"1374","source":"asm_amd64.s"}],"error":"seems we have an error here","time":1609086683} +``` + +> zerolog.ErrorStackMarshaler must be set in order for the stack to output anything. + +#### Logging Fatal Messages + +```go +package main + +import ( + "errors" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + err := errors.New("A repo man spends his life getting into tense situations") + service := "myservice" + + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + + log.Fatal(). + Err(err). + Str("service", service). + Msgf("Cannot start %s", service) +} + +// Output: {"time":1516133263,"level":"fatal","error":"A repo man spends his life getting into tense situations","service":"myservice","message":"Cannot start myservice"} +// exit status 1 +``` + +> NOTE: Using `Msgf` generates one allocation even when the logger is disabled. + + +### Create logger instance to manage different outputs + +```go +logger := zerolog.New(os.Stderr).With().Timestamp().Logger() + +logger.Info().Str("foo", "bar").Msg("hello world") + +// Output: {"level":"info","time":1494567715,"message":"hello world","foo":"bar"} +``` + +### Sub-loggers let you chain loggers with additional context + +```go +sublogger := log.With(). + Str("component", "foo"). + Logger() +sublogger.Info().Msg("hello world") + +// Output: {"level":"info","time":1494567715,"message":"hello world","component":"foo"} +``` + +### Pretty logging + +To log a human-friendly, colorized output, use `zerolog.ConsoleWriter`: + +```go +log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}) + +log.Info().Str("foo", "bar").Msg("Hello world") + +// Output: 3:04PM INF Hello World foo=bar +``` + +To customize the configuration and formatting: + +```go +output := zerolog.ConsoleWriter{Out: os.Stdout, TimeFormat: time.RFC3339} +output.FormatLevel = func(i interface{}) string { + return strings.ToUpper(fmt.Sprintf("| %-6s|", i)) +} +output.FormatMessage = func(i interface{}) string { + return fmt.Sprintf("***%s****", i) +} +output.FormatFieldName = func(i interface{}) string { + return fmt.Sprintf("%s:", i) +} +output.FormatFieldValue = func(i interface{}) string { + return strings.ToUpper(fmt.Sprintf("%s", i)) +} + +log := zerolog.New(output).With().Timestamp().Logger() + +log.Info().Str("foo", "bar").Msg("Hello World") + +// Output: 2006-01-02T15:04:05Z07:00 | INFO | ***Hello World**** foo:BAR +``` + +### Sub dictionary + +```go +log.Info(). + Str("foo", "bar"). + Dict("dict", zerolog.Dict(). + Str("bar", "baz"). + Int("n", 1), + ).Msg("hello world") + +// Output: {"level":"info","time":1494567715,"foo":"bar","dict":{"bar":"baz","n":1},"message":"hello world"} +``` + +### Customize automatic field names + +```go +zerolog.TimestampFieldName = "t" +zerolog.LevelFieldName = "l" +zerolog.MessageFieldName = "m" + +log.Info().Msg("hello world") + +// Output: {"l":"info","t":1494567715,"m":"hello world"} +``` + +### Add contextual fields to the global logger + +```go +log.Logger = log.With().Str("foo", "bar").Logger() +``` + +### Add file and line number to log + +Equivalent of `Llongfile`: + +```go +log.Logger = log.With().Caller().Logger() +log.Info().Msg("hello world") + +// Output: {"level": "info", "message": "hello world", "caller": "/go/src/your_project/some_file:21"} +``` + +Equivalent of `Lshortfile`: + +```go +zerolog.CallerMarshalFunc = func(file string, line int) string { + short := file + for i := len(file) - 1; i > 0; i-- { + if file[i] == '/' { + short = file[i+1:] + break + } + } + file = short + return file + ":" + strconv.Itoa(line) +} +log.Logger = log.With().Caller().Logger() +log.Info().Msg("hello world") + +// Output: {"level": "info", "message": "hello world", "caller": "some_file:21"} +``` + +### Thread-safe, lock-free, non-blocking writer + +If your writer might be slow or not thread-safe and you need your log producers to never get slowed down by a slow writer, you can use a `diode.Writer` as follows: + +```go +wr := diode.NewWriter(os.Stdout, 1000, 10*time.Millisecond, func(missed int) { + fmt.Printf("Logger Dropped %d messages", missed) + }) +log := zerolog.New(wr) +log.Print("test") +``` + +You will need to install `code.cloudfoundry.org/go-diodes` to use this feature. + +### Log Sampling + +```go +sampled := log.Sample(&zerolog.BasicSampler{N: 10}) +sampled.Info().Msg("will be logged every 10 messages") + +// Output: {"time":1494567715,"level":"info","message":"will be logged every 10 messages"} +``` + +More advanced sampling: + +```go +// Will let 5 debug messages per period of 1 second. +// Over 5 debug message, 1 every 100 debug messages are logged. +// Other levels are not sampled. +sampled := log.Sample(zerolog.LevelSampler{ + DebugSampler: &zerolog.BurstSampler{ + Burst: 5, + Period: 1*time.Second, + NextSampler: &zerolog.BasicSampler{N: 100}, + }, +}) +sampled.Debug().Msg("hello world") + +// Output: {"time":1494567715,"level":"debug","message":"hello world"} +``` + +### Hooks + +```go +type SeverityHook struct{} + +func (h SeverityHook) Run(e *zerolog.Event, level zerolog.Level, msg string) { + if level != zerolog.NoLevel { + e.Str("severity", level.String()) + } +} + +hooked := log.Hook(SeverityHook{}) +hooked.Warn().Msg("") + +// Output: {"level":"warn","severity":"warn"} +``` + +### Pass a sub-logger by context + +```go +ctx := log.With().Str("component", "module").Logger().WithContext(ctx) + +log.Ctx(ctx).Info().Msg("hello world") + +// Output: {"component":"module","level":"info","message":"hello world"} +``` + +### Set as standard logger output + +```go +log := zerolog.New(os.Stdout).With(). + Str("foo", "bar"). + Logger() + +stdlog.SetFlags(0) +stdlog.SetOutput(log) + +stdlog.Print("hello world") + +// Output: {"foo":"bar","message":"hello world"} +``` + +### Integration with `net/http` + +The `github.com/rs/zerolog/hlog` package provides some helpers to integrate zerolog with `http.Handler`. + +In this example we use [alice](https://github.com/justinas/alice) to install logger for better readability. + +```go +log := zerolog.New(os.Stdout).With(). + Timestamp(). + Str("role", "my-service"). + Str("host", host). + Logger() + +c := alice.New() + +// Install the logger handler with default output on the console +c = c.Append(hlog.NewHandler(log)) + +// Install some provided extra handler to set some request's context fields. +// Thanks to that handler, all our logs will come with some prepopulated fields. +c = c.Append(hlog.AccessHandler(func(r *http.Request, status, size int, duration time.Duration) { + hlog.FromRequest(r).Info(). + Str("method", r.Method). + Stringer("url", r.URL). + Int("status", status). + Int("size", size). + Dur("duration", duration). + Msg("") +})) +c = c.Append(hlog.RemoteAddrHandler("ip")) +c = c.Append(hlog.UserAgentHandler("user_agent")) +c = c.Append(hlog.RefererHandler("referer")) +c = c.Append(hlog.RequestIDHandler("req_id", "Request-Id")) + +// Here is your final handler +h := c.Then(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Get the logger from the request's context. You can safely assume it + // will be always there: if the handler is removed, hlog.FromRequest + // will return a no-op logger. + hlog.FromRequest(r).Info(). + Str("user", "current user"). + Str("status", "ok"). + Msg("Something happened") + + // Output: {"level":"info","time":"2001-02-03T04:05:06Z","role":"my-service","host":"local-hostname","req_id":"b4g0l5t6tfid6dtrapu0","user":"current user","status":"ok","message":"Something happened"} +})) +http.Handle("/", h) + +if err := http.ListenAndServe(":8080", nil); err != nil { + log.Fatal().Err(err).Msg("Startup failed") +} +``` + +## Multiple Log Output +`zerolog.MultiLevelWriter` may be used to send the log message to multiple outputs. +In this example, we send the log message to both `os.Stdout` and the in-built ConsoleWriter. +```go +func main() { + consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout} + + multi := zerolog.MultiLevelWriter(consoleWriter, os.Stdout) + + logger := zerolog.New(multi).With().Timestamp().Logger() + + logger.Info().Msg("Hello World!") +} + +// Output (Line 1: Console; Line 2: Stdout) +// 12:36PM INF Hello World! +// {"level":"info","time":"2019-11-07T12:36:38+03:00","message":"Hello World!"} +``` + +## Global Settings + +Some settings can be changed and will be applied to all loggers: + +* `log.Logger`: You can set this value to customize the global logger (the one used by package level methods). +* `zerolog.SetGlobalLevel`: Can raise the minimum level of all loggers. Call this with `zerolog.Disabled` to disable logging altogether (quiet mode). +* `zerolog.DisableSampling`: If argument is `true`, all sampled loggers will stop sampling and issue 100% of their log events. +* `zerolog.TimestampFieldName`: Can be set to customize `Timestamp` field name. +* `zerolog.LevelFieldName`: Can be set to customize level field name. +* `zerolog.MessageFieldName`: Can be set to customize message field name. +* `zerolog.ErrorFieldName`: Can be set to customize `Err` field name. +* `zerolog.TimeFieldFormat`: Can be set to customize `Time` field value formatting. If set with `zerolog.TimeFormatUnix`, `zerolog.TimeFormatUnixMs` or `zerolog.TimeFormatUnixMicro`, times are formated as UNIX timestamp. +* `zerolog.DurationFieldUnit`: Can be set to customize the unit for time.Duration type fields added by `Dur` (default: `time.Millisecond`). +* `zerolog.DurationFieldInteger`: If set to `true`, `Dur` fields are formatted as integers instead of floats (default: `false`). +* `zerolog.ErrorHandler`: Called whenever zerolog fails to write an event on its output. If not set, an error is printed on the stderr. This handler must be thread safe and non-blocking. + +## Field Types + +### Standard Types + +* `Str` +* `Bool` +* `Int`, `Int8`, `Int16`, `Int32`, `Int64` +* `Uint`, `Uint8`, `Uint16`, `Uint32`, `Uint64` +* `Float32`, `Float64` + +### Advanced Fields + +* `Err`: Takes an `error` and renders it as a string using the `zerolog.ErrorFieldName` field name. +* `Func`: Run a `func` only if the level is enabled. +* `Timestamp`: Inserts a timestamp field with `zerolog.TimestampFieldName` field name, formatted using `zerolog.TimeFieldFormat`. +* `Time`: Adds a field with time formatted with `zerolog.TimeFieldFormat`. +* `Dur`: Adds a field with `time.Duration`. +* `Dict`: Adds a sub-key/value as a field of the event. +* `RawJSON`: Adds a field with an already encoded JSON (`[]byte`) +* `Hex`: Adds a field with value formatted as a hexadecimal string (`[]byte`) +* `Interface`: Uses reflection to marshal the type. + +Most fields are also available in the slice format (`Strs` for `[]string`, `Errs` for `[]error` etc.) + +## Binary Encoding + +In addition to the default JSON encoding, `zerolog` can produce binary logs using [CBOR](https://cbor.io) encoding. The choice of encoding can be decided at compile time using the build tag `binary_log` as follows: + +```bash +go build -tags binary_log . +``` + +To Decode binary encoded log files you can use any CBOR decoder. One has been tested to work +with zerolog library is [CSD](https://github.com/toravir/csd/). + +## Related Projects + +* [grpc-zerolog](https://github.com/cheapRoc/grpc-zerolog): Implementation of `grpclog.LoggerV2` interface using `zerolog` +* [overlog](https://github.com/Trendyol/overlog): Implementation of `Mapped Diagnostic Context` interface using `zerolog` +* [zerologr](https://github.com/go-logr/zerologr): Implementation of `logr.LogSink` interface using `zerolog` + +## Benchmarks + +See [logbench](http://hackemist.com/logbench/) for more comprehensive and up-to-date benchmarks. + +All operations are allocation free (those numbers *include* JSON encoding): + +```text +BenchmarkLogEmpty-8 100000000 19.1 ns/op 0 B/op 0 allocs/op +BenchmarkDisabled-8 500000000 4.07 ns/op 0 B/op 0 allocs/op +BenchmarkInfo-8 30000000 42.5 ns/op 0 B/op 0 allocs/op +BenchmarkContextFields-8 30000000 44.9 ns/op 0 B/op 0 allocs/op +BenchmarkLogFields-8 10000000 184 ns/op 0 B/op 0 allocs/op +``` + +There are a few Go logging benchmarks and comparisons that include zerolog. + +* [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) +* [uber-common/zap](https://github.com/uber-go/zap#performance) + +Using Uber's zap comparison benchmark: + +Log a message and 10 fields: + +| Library | Time | Bytes Allocated | Objects Allocated | +| :--- | :---: | :---: | :---: | +| zerolog | 767 ns/op | 552 B/op | 6 allocs/op | +| :zap: zap | 848 ns/op | 704 B/op | 2 allocs/op | +| :zap: zap (sugared) | 1363 ns/op | 1610 B/op | 20 allocs/op | +| go-kit | 3614 ns/op | 2895 B/op | 66 allocs/op | +| lion | 5392 ns/op | 5807 B/op | 63 allocs/op | +| logrus | 5661 ns/op | 6092 B/op | 78 allocs/op | +| apex/log | 15332 ns/op | 3832 B/op | 65 allocs/op | +| log15 | 20657 ns/op | 5632 B/op | 93 allocs/op | + +Log a message with a logger that already has 10 fields of context: + +| Library | Time | Bytes Allocated | Objects Allocated | +| :--- | :---: | :---: | :---: | +| zerolog | 52 ns/op | 0 B/op | 0 allocs/op | +| :zap: zap | 283 ns/op | 0 B/op | 0 allocs/op | +| :zap: zap (sugared) | 337 ns/op | 80 B/op | 2 allocs/op | +| lion | 2702 ns/op | 4074 B/op | 38 allocs/op | +| go-kit | 3378 ns/op | 3046 B/op | 52 allocs/op | +| logrus | 4309 ns/op | 4564 B/op | 63 allocs/op | +| apex/log | 13456 ns/op | 2898 B/op | 51 allocs/op | +| log15 | 14179 ns/op | 2642 B/op | 44 allocs/op | + +Log a static string, without any context or `printf`-style templating: + +| Library | Time | Bytes Allocated | Objects Allocated | +| :--- | :---: | :---: | :---: | +| zerolog | 50 ns/op | 0 B/op | 0 allocs/op | +| :zap: zap | 236 ns/op | 0 B/op | 0 allocs/op | +| standard library | 453 ns/op | 80 B/op | 2 allocs/op | +| :zap: zap (sugared) | 337 ns/op | 80 B/op | 2 allocs/op | +| go-kit | 508 ns/op | 656 B/op | 13 allocs/op | +| lion | 771 ns/op | 1224 B/op | 10 allocs/op | +| logrus | 1244 ns/op | 1505 B/op | 27 allocs/op | +| apex/log | 2751 ns/op | 584 B/op | 11 allocs/op | +| log15 | 5181 ns/op | 1592 B/op | 26 allocs/op | + +## Caveats + +Note that zerolog does no de-duplication of fields. Using the same key multiple times creates multiple keys in final JSON: + +```go +logger := zerolog.New(os.Stderr).With().Timestamp().Logger() +logger.Info(). + Timestamp(). + Msg("dup") +// Output: {"level":"info","time":1494567715,"time":1494567715,"message":"dup"} +``` + +In this case, many consumers will take the last value, but this is not guaranteed; check yours if in doubt. diff --git a/vendor/github.com/rs/zerolog/_config.yml b/vendor/github.com/rs/zerolog/_config.yml new file mode 100644 index 000000000..a1e896d7b --- /dev/null +++ b/vendor/github.com/rs/zerolog/_config.yml @@ -0,0 +1 @@ +remote_theme: rs/gh-readme diff --git a/vendor/github.com/rs/zerolog/array.go b/vendor/github.com/rs/zerolog/array.go new file mode 100644 index 000000000..c75c05200 --- /dev/null +++ b/vendor/github.com/rs/zerolog/array.go @@ -0,0 +1,240 @@ +package zerolog + +import ( + "net" + "sync" + "time" +) + +var arrayPool = &sync.Pool{ + New: func() interface{} { + return &Array{ + buf: make([]byte, 0, 500), + } + }, +} + +// Array is used to prepopulate an array of items +// which can be re-used to add to log messages. +type Array struct { + buf []byte +} + +func putArray(a *Array) { + // Proper usage of a sync.Pool requires each entry to have approximately + // the same memory cost. To obtain this property when the stored type + // contains a variably-sized buffer, we add a hard limit on the maximum buffer + // to place back in the pool. + // + // See https://golang.org/issue/23199 + const maxSize = 1 << 16 // 64KiB + if cap(a.buf) > maxSize { + return + } + arrayPool.Put(a) +} + +// Arr creates an array to be added to an Event or Context. +func Arr() *Array { + a := arrayPool.Get().(*Array) + a.buf = a.buf[:0] + return a +} + +// MarshalZerologArray method here is no-op - since data is +// already in the needed format. +func (*Array) MarshalZerologArray(*Array) { +} + +func (a *Array) write(dst []byte) []byte { + dst = enc.AppendArrayStart(dst) + if len(a.buf) > 0 { + dst = append(dst, a.buf...) + } + dst = enc.AppendArrayEnd(dst) + putArray(a) + return dst +} + +// Object marshals an object that implement the LogObjectMarshaler +// interface and append append it to the array. +func (a *Array) Object(obj LogObjectMarshaler) *Array { + e := Dict() + obj.MarshalZerologObject(e) + e.buf = enc.AppendEndMarker(e.buf) + a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...) + putEvent(e) + return a +} + +// Str append append the val as a string to the array. +func (a *Array) Str(val string) *Array { + a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), val) + return a +} + +// Bytes append append the val as a string to the array. +func (a *Array) Bytes(val []byte) *Array { + a.buf = enc.AppendBytes(enc.AppendArrayDelim(a.buf), val) + return a +} + +// Hex append append the val as a hex string to the array. +func (a *Array) Hex(val []byte) *Array { + a.buf = enc.AppendHex(enc.AppendArrayDelim(a.buf), val) + return a +} + +// RawJSON adds already encoded JSON to the array. +func (a *Array) RawJSON(val []byte) *Array { + a.buf = appendJSON(enc.AppendArrayDelim(a.buf), val) + return a +} + +// Err serializes and appends the err to the array. +func (a *Array) Err(err error) *Array { + switch m := ErrorMarshalFunc(err).(type) { + case LogObjectMarshaler: + e := newEvent(nil, 0) + e.buf = e.buf[:0] + e.appendObject(m) + a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...) + putEvent(e) + case error: + if m == nil || isNilValue(m) { + a.buf = enc.AppendNil(enc.AppendArrayDelim(a.buf)) + } else { + a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m.Error()) + } + case string: + a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m) + default: + a.buf = enc.AppendInterface(enc.AppendArrayDelim(a.buf), m) + } + + return a +} + +// Bool append append the val as a bool to the array. +func (a *Array) Bool(b bool) *Array { + a.buf = enc.AppendBool(enc.AppendArrayDelim(a.buf), b) + return a +} + +// Int append append i as a int to the array. +func (a *Array) Int(i int) *Array { + a.buf = enc.AppendInt(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Int8 append append i as a int8 to the array. +func (a *Array) Int8(i int8) *Array { + a.buf = enc.AppendInt8(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Int16 append append i as a int16 to the array. +func (a *Array) Int16(i int16) *Array { + a.buf = enc.AppendInt16(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Int32 append append i as a int32 to the array. +func (a *Array) Int32(i int32) *Array { + a.buf = enc.AppendInt32(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Int64 append append i as a int64 to the array. +func (a *Array) Int64(i int64) *Array { + a.buf = enc.AppendInt64(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Uint append append i as a uint to the array. +func (a *Array) Uint(i uint) *Array { + a.buf = enc.AppendUint(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Uint8 append append i as a uint8 to the array. +func (a *Array) Uint8(i uint8) *Array { + a.buf = enc.AppendUint8(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Uint16 append append i as a uint16 to the array. +func (a *Array) Uint16(i uint16) *Array { + a.buf = enc.AppendUint16(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Uint32 append append i as a uint32 to the array. +func (a *Array) Uint32(i uint32) *Array { + a.buf = enc.AppendUint32(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Uint64 append append i as a uint64 to the array. +func (a *Array) Uint64(i uint64) *Array { + a.buf = enc.AppendUint64(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Float32 append append f as a float32 to the array. +func (a *Array) Float32(f float32) *Array { + a.buf = enc.AppendFloat32(enc.AppendArrayDelim(a.buf), f) + return a +} + +// Float64 append append f as a float64 to the array. +func (a *Array) Float64(f float64) *Array { + a.buf = enc.AppendFloat64(enc.AppendArrayDelim(a.buf), f) + return a +} + +// Time append append t formatted as string using zerolog.TimeFieldFormat. +func (a *Array) Time(t time.Time) *Array { + a.buf = enc.AppendTime(enc.AppendArrayDelim(a.buf), t, TimeFieldFormat) + return a +} + +// Dur append append d to the array. +func (a *Array) Dur(d time.Duration) *Array { + a.buf = enc.AppendDuration(enc.AppendArrayDelim(a.buf), d, DurationFieldUnit, DurationFieldInteger) + return a +} + +// Interface append append i marshaled using reflection. +func (a *Array) Interface(i interface{}) *Array { + if obj, ok := i.(LogObjectMarshaler); ok { + return a.Object(obj) + } + a.buf = enc.AppendInterface(enc.AppendArrayDelim(a.buf), i) + return a +} + +// IPAddr adds IPv4 or IPv6 address to the array +func (a *Array) IPAddr(ip net.IP) *Array { + a.buf = enc.AppendIPAddr(enc.AppendArrayDelim(a.buf), ip) + return a +} + +// IPPrefix adds IPv4 or IPv6 Prefix (IP + mask) to the array +func (a *Array) IPPrefix(pfx net.IPNet) *Array { + a.buf = enc.AppendIPPrefix(enc.AppendArrayDelim(a.buf), pfx) + return a +} + +// MACAddr adds a MAC (Ethernet) address to the array +func (a *Array) MACAddr(ha net.HardwareAddr) *Array { + a.buf = enc.AppendMACAddr(enc.AppendArrayDelim(a.buf), ha) + return a +} + +// Dict adds the dict Event to the array +func (a *Array) Dict(dict *Event) *Array { + dict.buf = enc.AppendEndMarker(dict.buf) + a.buf = append(enc.AppendArrayDelim(a.buf), dict.buf...) + return a +} diff --git a/vendor/github.com/rs/zerolog/console.go b/vendor/github.com/rs/zerolog/console.go new file mode 100644 index 000000000..ac34b7ebd --- /dev/null +++ b/vendor/github.com/rs/zerolog/console.go @@ -0,0 +1,446 @@ +package zerolog + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/mattn/go-colorable" +) + +const ( + colorBlack = iota + 30 + colorRed + colorGreen + colorYellow + colorBlue + colorMagenta + colorCyan + colorWhite + + colorBold = 1 + colorDarkGray = 90 +) + +var ( + consoleBufPool = sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(make([]byte, 0, 100)) + }, + } +) + +const ( + consoleDefaultTimeFormat = time.Kitchen +) + +// Formatter transforms the input into a formatted string. +type Formatter func(interface{}) string + +// ConsoleWriter parses the JSON input and writes it in an +// (optionally) colorized, human-friendly format to Out. +type ConsoleWriter struct { + // Out is the output destination. + Out io.Writer + + // NoColor disables the colorized output. + NoColor bool + + // TimeFormat specifies the format for timestamp in output. + TimeFormat string + + // PartsOrder defines the order of parts in output. + PartsOrder []string + + // PartsExclude defines parts to not display in output. + PartsExclude []string + + // FieldsExclude defines contextual fields to not display in output. + FieldsExclude []string + + FormatTimestamp Formatter + FormatLevel Formatter + FormatCaller Formatter + FormatMessage Formatter + FormatFieldName Formatter + FormatFieldValue Formatter + FormatErrFieldName Formatter + FormatErrFieldValue Formatter + + FormatExtra func(map[string]interface{}, *bytes.Buffer) error +} + +// NewConsoleWriter creates and initializes a new ConsoleWriter. +func NewConsoleWriter(options ...func(w *ConsoleWriter)) ConsoleWriter { + w := ConsoleWriter{ + Out: os.Stdout, + TimeFormat: consoleDefaultTimeFormat, + PartsOrder: consoleDefaultPartsOrder(), + } + + for _, opt := range options { + opt(&w) + } + + // Fix color on Windows + if w.Out == os.Stdout || w.Out == os.Stderr { + w.Out = colorable.NewColorable(w.Out.(*os.File)) + } + + return w +} + +// Write transforms the JSON input with formatters and appends to w.Out. +func (w ConsoleWriter) Write(p []byte) (n int, err error) { + // Fix color on Windows + if w.Out == os.Stdout || w.Out == os.Stderr { + w.Out = colorable.NewColorable(w.Out.(*os.File)) + } + + if w.PartsOrder == nil { + w.PartsOrder = consoleDefaultPartsOrder() + } + + var buf = consoleBufPool.Get().(*bytes.Buffer) + defer func() { + buf.Reset() + consoleBufPool.Put(buf) + }() + + var evt map[string]interface{} + p = decodeIfBinaryToBytes(p) + d := json.NewDecoder(bytes.NewReader(p)) + d.UseNumber() + err = d.Decode(&evt) + if err != nil { + return n, fmt.Errorf("cannot decode event: %s", err) + } + + for _, p := range w.PartsOrder { + w.writePart(buf, evt, p) + } + + w.writeFields(evt, buf) + + if w.FormatExtra != nil { + err = w.FormatExtra(evt, buf) + if err != nil { + return n, err + } + } + + err = buf.WriteByte('\n') + if err != nil { + return n, err + } + + _, err = buf.WriteTo(w.Out) + return len(p), err +} + +// writeFields appends formatted key-value pairs to buf. +func (w ConsoleWriter) writeFields(evt map[string]interface{}, buf *bytes.Buffer) { + var fields = make([]string, 0, len(evt)) + for field := range evt { + var isExcluded bool + for _, excluded := range w.FieldsExclude { + if field == excluded { + isExcluded = true + break + } + } + if isExcluded { + continue + } + + switch field { + case LevelFieldName, TimestampFieldName, MessageFieldName, CallerFieldName: + continue + } + fields = append(fields, field) + } + sort.Strings(fields) + + // Write space only if something has already been written to the buffer, and if there are fields. + if buf.Len() > 0 && len(fields) > 0 { + buf.WriteByte(' ') + } + + // Move the "error" field to the front + ei := sort.Search(len(fields), func(i int) bool { return fields[i] >= ErrorFieldName }) + if ei < len(fields) && fields[ei] == ErrorFieldName { + fields[ei] = "" + fields = append([]string{ErrorFieldName}, fields...) + var xfields = make([]string, 0, len(fields)) + for _, field := range fields { + if field == "" { // Skip empty fields + continue + } + xfields = append(xfields, field) + } + fields = xfields + } + + for i, field := range fields { + var fn Formatter + var fv Formatter + + if field == ErrorFieldName { + if w.FormatErrFieldName == nil { + fn = consoleDefaultFormatErrFieldName(w.NoColor) + } else { + fn = w.FormatErrFieldName + } + + if w.FormatErrFieldValue == nil { + fv = consoleDefaultFormatErrFieldValue(w.NoColor) + } else { + fv = w.FormatErrFieldValue + } + } else { + if w.FormatFieldName == nil { + fn = consoleDefaultFormatFieldName(w.NoColor) + } else { + fn = w.FormatFieldName + } + + if w.FormatFieldValue == nil { + fv = consoleDefaultFormatFieldValue + } else { + fv = w.FormatFieldValue + } + } + + buf.WriteString(fn(field)) + + switch fValue := evt[field].(type) { + case string: + if needsQuote(fValue) { + buf.WriteString(fv(strconv.Quote(fValue))) + } else { + buf.WriteString(fv(fValue)) + } + case json.Number: + buf.WriteString(fv(fValue)) + default: + b, err := InterfaceMarshalFunc(fValue) + if err != nil { + fmt.Fprintf(buf, colorize("[error: %v]", colorRed, w.NoColor), err) + } else { + fmt.Fprint(buf, fv(b)) + } + } + + if i < len(fields)-1 { // Skip space for last field + buf.WriteByte(' ') + } + } +} + +// writePart appends a formatted part to buf. +func (w ConsoleWriter) writePart(buf *bytes.Buffer, evt map[string]interface{}, p string) { + var f Formatter + + if w.PartsExclude != nil && len(w.PartsExclude) > 0 { + for _, exclude := range w.PartsExclude { + if exclude == p { + return + } + } + } + + switch p { + case LevelFieldName: + if w.FormatLevel == nil { + f = consoleDefaultFormatLevel(w.NoColor) + } else { + f = w.FormatLevel + } + case TimestampFieldName: + if w.FormatTimestamp == nil { + f = consoleDefaultFormatTimestamp(w.TimeFormat, w.NoColor) + } else { + f = w.FormatTimestamp + } + case MessageFieldName: + if w.FormatMessage == nil { + f = consoleDefaultFormatMessage + } else { + f = w.FormatMessage + } + case CallerFieldName: + if w.FormatCaller == nil { + f = consoleDefaultFormatCaller(w.NoColor) + } else { + f = w.FormatCaller + } + default: + if w.FormatFieldValue == nil { + f = consoleDefaultFormatFieldValue + } else { + f = w.FormatFieldValue + } + } + + var s = f(evt[p]) + + if len(s) > 0 { + if buf.Len() > 0 { + buf.WriteByte(' ') // Write space only if not the first part + } + buf.WriteString(s) + } +} + +// needsQuote returns true when the string s should be quoted in output. +func needsQuote(s string) bool { + for i := range s { + if s[i] < 0x20 || s[i] > 0x7e || s[i] == ' ' || s[i] == '\\' || s[i] == '"' { + return true + } + } + return false +} + +// colorize returns the string s wrapped in ANSI code c, unless disabled is true. +func colorize(s interface{}, c int, disabled bool) string { + if disabled { + return fmt.Sprintf("%s", s) + } + return fmt.Sprintf("\x1b[%dm%v\x1b[0m", c, s) +} + +// ----- DEFAULT FORMATTERS --------------------------------------------------- + +func consoleDefaultPartsOrder() []string { + return []string{ + TimestampFieldName, + LevelFieldName, + CallerFieldName, + MessageFieldName, + } +} + +func consoleDefaultFormatTimestamp(timeFormat string, noColor bool) Formatter { + if timeFormat == "" { + timeFormat = consoleDefaultTimeFormat + } + return func(i interface{}) string { + t := "" + switch tt := i.(type) { + case string: + ts, err := time.Parse(TimeFieldFormat, tt) + if err != nil { + t = tt + } else { + t = ts.Local().Format(timeFormat) + } + case json.Number: + i, err := tt.Int64() + if err != nil { + t = tt.String() + } else { + var sec, nsec int64 = i, 0 + switch TimeFieldFormat { + case TimeFormatUnixMs: + nsec = int64(time.Duration(i) * time.Millisecond) + sec = 0 + case TimeFormatUnixMicro: + nsec = int64(time.Duration(i) * time.Microsecond) + sec = 0 + } + ts := time.Unix(sec, nsec) + t = ts.Format(timeFormat) + } + } + return colorize(t, colorDarkGray, noColor) + } +} + +func consoleDefaultFormatLevel(noColor bool) Formatter { + return func(i interface{}) string { + var l string + if ll, ok := i.(string); ok { + switch ll { + case LevelTraceValue: + l = colorize("TRC", colorMagenta, noColor) + case LevelDebugValue: + l = colorize("DBG", colorYellow, noColor) + case LevelInfoValue: + l = colorize("INF", colorGreen, noColor) + case LevelWarnValue: + l = colorize("WRN", colorRed, noColor) + case LevelErrorValue: + l = colorize(colorize("ERR", colorRed, noColor), colorBold, noColor) + case LevelFatalValue: + l = colorize(colorize("FTL", colorRed, noColor), colorBold, noColor) + case LevelPanicValue: + l = colorize(colorize("PNC", colorRed, noColor), colorBold, noColor) + default: + l = colorize("???", colorBold, noColor) + } + } else { + if i == nil { + l = colorize("???", colorBold, noColor) + } else { + l = strings.ToUpper(fmt.Sprintf("%s", i))[0:3] + } + } + return l + } +} + +func consoleDefaultFormatCaller(noColor bool) Formatter { + return func(i interface{}) string { + var c string + if cc, ok := i.(string); ok { + c = cc + } + if len(c) > 0 { + if cwd, err := os.Getwd(); err == nil { + if rel, err := filepath.Rel(cwd, c); err == nil { + c = rel + } + } + c = colorize(c, colorBold, noColor) + colorize(" >", colorCyan, noColor) + } + return c + } +} + +func consoleDefaultFormatMessage(i interface{}) string { + if i == nil { + return "" + } + return fmt.Sprintf("%s", i) +} + +func consoleDefaultFormatFieldName(noColor bool) Formatter { + return func(i interface{}) string { + return colorize(fmt.Sprintf("%s=", i), colorCyan, noColor) + } +} + +func consoleDefaultFormatFieldValue(i interface{}) string { + return fmt.Sprintf("%s", i) +} + +func consoleDefaultFormatErrFieldName(noColor bool) Formatter { + return func(i interface{}) string { + return colorize(fmt.Sprintf("%s=", i), colorCyan, noColor) + } +} + +func consoleDefaultFormatErrFieldValue(noColor bool) Formatter { + return func(i interface{}) string { + return colorize(fmt.Sprintf("%s", i), colorRed, noColor) + } +} diff --git a/vendor/github.com/rs/zerolog/context.go b/vendor/github.com/rs/zerolog/context.go new file mode 100644 index 000000000..f398e3197 --- /dev/null +++ b/vendor/github.com/rs/zerolog/context.go @@ -0,0 +1,433 @@ +package zerolog + +import ( + "fmt" + "io/ioutil" + "math" + "net" + "time" +) + +// Context configures a new sub-logger with contextual fields. +type Context struct { + l Logger +} + +// Logger returns the logger with the context previously set. +func (c Context) Logger() Logger { + return c.l +} + +// Fields is a helper function to use a map or slice to set fields using type assertion. +// Only map[string]interface{} and []interface{} are accepted. []interface{} must +// alternate string keys and arbitrary values, and extraneous ones are ignored. +func (c Context) Fields(fields interface{}) Context { + c.l.context = appendFields(c.l.context, fields) + return c +} + +// Dict adds the field key with the dict to the logger context. +func (c Context) Dict(key string, dict *Event) Context { + dict.buf = enc.AppendEndMarker(dict.buf) + c.l.context = append(enc.AppendKey(c.l.context, key), dict.buf...) + putEvent(dict) + return c +} + +// Array adds the field key with an array to the event context. +// Use zerolog.Arr() to create the array or pass a type that +// implement the LogArrayMarshaler interface. +func (c Context) Array(key string, arr LogArrayMarshaler) Context { + c.l.context = enc.AppendKey(c.l.context, key) + if arr, ok := arr.(*Array); ok { + c.l.context = arr.write(c.l.context) + return c + } + var a *Array + if aa, ok := arr.(*Array); ok { + a = aa + } else { + a = Arr() + arr.MarshalZerologArray(a) + } + c.l.context = a.write(c.l.context) + return c +} + +// Object marshals an object that implement the LogObjectMarshaler interface. +func (c Context) Object(key string, obj LogObjectMarshaler) Context { + e := newEvent(levelWriterAdapter{ioutil.Discard}, 0) + e.Object(key, obj) + c.l.context = enc.AppendObjectData(c.l.context, e.buf) + putEvent(e) + return c +} + +// EmbedObject marshals and Embeds an object that implement the LogObjectMarshaler interface. +func (c Context) EmbedObject(obj LogObjectMarshaler) Context { + e := newEvent(levelWriterAdapter{ioutil.Discard}, 0) + e.EmbedObject(obj) + c.l.context = enc.AppendObjectData(c.l.context, e.buf) + putEvent(e) + return c +} + +// Str adds the field key with val as a string to the logger context. +func (c Context) Str(key, val string) Context { + c.l.context = enc.AppendString(enc.AppendKey(c.l.context, key), val) + return c +} + +// Strs adds the field key with val as a string to the logger context. +func (c Context) Strs(key string, vals []string) Context { + c.l.context = enc.AppendStrings(enc.AppendKey(c.l.context, key), vals) + return c +} + +// Stringer adds the field key with val.String() (or null if val is nil) to the logger context. +func (c Context) Stringer(key string, val fmt.Stringer) Context { + if val != nil { + c.l.context = enc.AppendString(enc.AppendKey(c.l.context, key), val.String()) + return c + } + + c.l.context = enc.AppendInterface(enc.AppendKey(c.l.context, key), nil) + return c +} + +// Bytes adds the field key with val as a []byte to the logger context. +func (c Context) Bytes(key string, val []byte) Context { + c.l.context = enc.AppendBytes(enc.AppendKey(c.l.context, key), val) + return c +} + +// Hex adds the field key with val as a hex string to the logger context. +func (c Context) Hex(key string, val []byte) Context { + c.l.context = enc.AppendHex(enc.AppendKey(c.l.context, key), val) + return c +} + +// RawJSON adds already encoded JSON to context. +// +// No sanity check is performed on b; it must not contain carriage returns and +// be valid JSON. +func (c Context) RawJSON(key string, b []byte) Context { + c.l.context = appendJSON(enc.AppendKey(c.l.context, key), b) + return c +} + +// AnErr adds the field key with serialized err to the logger context. +func (c Context) AnErr(key string, err error) Context { + switch m := ErrorMarshalFunc(err).(type) { + case nil: + return c + case LogObjectMarshaler: + return c.Object(key, m) + case error: + if m == nil || isNilValue(m) { + return c + } else { + return c.Str(key, m.Error()) + } + case string: + return c.Str(key, m) + default: + return c.Interface(key, m) + } +} + +// Errs adds the field key with errs as an array of serialized errors to the +// logger context. +func (c Context) Errs(key string, errs []error) Context { + arr := Arr() + for _, err := range errs { + switch m := ErrorMarshalFunc(err).(type) { + case LogObjectMarshaler: + arr = arr.Object(m) + case error: + if m == nil || isNilValue(m) { + arr = arr.Interface(nil) + } else { + arr = arr.Str(m.Error()) + } + case string: + arr = arr.Str(m) + default: + arr = arr.Interface(m) + } + } + + return c.Array(key, arr) +} + +// Err adds the field "error" with serialized err to the logger context. +func (c Context) Err(err error) Context { + return c.AnErr(ErrorFieldName, err) +} + +// Bool adds the field key with val as a bool to the logger context. +func (c Context) Bool(key string, b bool) Context { + c.l.context = enc.AppendBool(enc.AppendKey(c.l.context, key), b) + return c +} + +// Bools adds the field key with val as a []bool to the logger context. +func (c Context) Bools(key string, b []bool) Context { + c.l.context = enc.AppendBools(enc.AppendKey(c.l.context, key), b) + return c +} + +// Int adds the field key with i as a int to the logger context. +func (c Context) Int(key string, i int) Context { + c.l.context = enc.AppendInt(enc.AppendKey(c.l.context, key), i) + return c +} + +// Ints adds the field key with i as a []int to the logger context. +func (c Context) Ints(key string, i []int) Context { + c.l.context = enc.AppendInts(enc.AppendKey(c.l.context, key), i) + return c +} + +// Int8 adds the field key with i as a int8 to the logger context. +func (c Context) Int8(key string, i int8) Context { + c.l.context = enc.AppendInt8(enc.AppendKey(c.l.context, key), i) + return c +} + +// Ints8 adds the field key with i as a []int8 to the logger context. +func (c Context) Ints8(key string, i []int8) Context { + c.l.context = enc.AppendInts8(enc.AppendKey(c.l.context, key), i) + return c +} + +// Int16 adds the field key with i as a int16 to the logger context. +func (c Context) Int16(key string, i int16) Context { + c.l.context = enc.AppendInt16(enc.AppendKey(c.l.context, key), i) + return c +} + +// Ints16 adds the field key with i as a []int16 to the logger context. +func (c Context) Ints16(key string, i []int16) Context { + c.l.context = enc.AppendInts16(enc.AppendKey(c.l.context, key), i) + return c +} + +// Int32 adds the field key with i as a int32 to the logger context. +func (c Context) Int32(key string, i int32) Context { + c.l.context = enc.AppendInt32(enc.AppendKey(c.l.context, key), i) + return c +} + +// Ints32 adds the field key with i as a []int32 to the logger context. +func (c Context) Ints32(key string, i []int32) Context { + c.l.context = enc.AppendInts32(enc.AppendKey(c.l.context, key), i) + return c +} + +// Int64 adds the field key with i as a int64 to the logger context. +func (c Context) Int64(key string, i int64) Context { + c.l.context = enc.AppendInt64(enc.AppendKey(c.l.context, key), i) + return c +} + +// Ints64 adds the field key with i as a []int64 to the logger context. +func (c Context) Ints64(key string, i []int64) Context { + c.l.context = enc.AppendInts64(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uint adds the field key with i as a uint to the logger context. +func (c Context) Uint(key string, i uint) Context { + c.l.context = enc.AppendUint(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uints adds the field key with i as a []uint to the logger context. +func (c Context) Uints(key string, i []uint) Context { + c.l.context = enc.AppendUints(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uint8 adds the field key with i as a uint8 to the logger context. +func (c Context) Uint8(key string, i uint8) Context { + c.l.context = enc.AppendUint8(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uints8 adds the field key with i as a []uint8 to the logger context. +func (c Context) Uints8(key string, i []uint8) Context { + c.l.context = enc.AppendUints8(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uint16 adds the field key with i as a uint16 to the logger context. +func (c Context) Uint16(key string, i uint16) Context { + c.l.context = enc.AppendUint16(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uints16 adds the field key with i as a []uint16 to the logger context. +func (c Context) Uints16(key string, i []uint16) Context { + c.l.context = enc.AppendUints16(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uint32 adds the field key with i as a uint32 to the logger context. +func (c Context) Uint32(key string, i uint32) Context { + c.l.context = enc.AppendUint32(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uints32 adds the field key with i as a []uint32 to the logger context. +func (c Context) Uints32(key string, i []uint32) Context { + c.l.context = enc.AppendUints32(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uint64 adds the field key with i as a uint64 to the logger context. +func (c Context) Uint64(key string, i uint64) Context { + c.l.context = enc.AppendUint64(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uints64 adds the field key with i as a []uint64 to the logger context. +func (c Context) Uints64(key string, i []uint64) Context { + c.l.context = enc.AppendUints64(enc.AppendKey(c.l.context, key), i) + return c +} + +// Float32 adds the field key with f as a float32 to the logger context. +func (c Context) Float32(key string, f float32) Context { + c.l.context = enc.AppendFloat32(enc.AppendKey(c.l.context, key), f) + return c +} + +// Floats32 adds the field key with f as a []float32 to the logger context. +func (c Context) Floats32(key string, f []float32) Context { + c.l.context = enc.AppendFloats32(enc.AppendKey(c.l.context, key), f) + return c +} + +// Float64 adds the field key with f as a float64 to the logger context. +func (c Context) Float64(key string, f float64) Context { + c.l.context = enc.AppendFloat64(enc.AppendKey(c.l.context, key), f) + return c +} + +// Floats64 adds the field key with f as a []float64 to the logger context. +func (c Context) Floats64(key string, f []float64) Context { + c.l.context = enc.AppendFloats64(enc.AppendKey(c.l.context, key), f) + return c +} + +type timestampHook struct{} + +func (ts timestampHook) Run(e *Event, level Level, msg string) { + e.Timestamp() +} + +var th = timestampHook{} + +// Timestamp adds the current local time as UNIX timestamp to the logger context with the "time" key. +// To customize the key name, change zerolog.TimestampFieldName. +// +// NOTE: It won't dedupe the "time" key if the *Context has one already. +func (c Context) Timestamp() Context { + c.l = c.l.Hook(th) + return c +} + +// Time adds the field key with t formated as string using zerolog.TimeFieldFormat. +func (c Context) Time(key string, t time.Time) Context { + c.l.context = enc.AppendTime(enc.AppendKey(c.l.context, key), t, TimeFieldFormat) + return c +} + +// Times adds the field key with t formated as string using zerolog.TimeFieldFormat. +func (c Context) Times(key string, t []time.Time) Context { + c.l.context = enc.AppendTimes(enc.AppendKey(c.l.context, key), t, TimeFieldFormat) + return c +} + +// Dur adds the fields key with d divided by unit and stored as a float. +func (c Context) Dur(key string, d time.Duration) Context { + c.l.context = enc.AppendDuration(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger) + return c +} + +// Durs adds the fields key with d divided by unit and stored as a float. +func (c Context) Durs(key string, d []time.Duration) Context { + c.l.context = enc.AppendDurations(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger) + return c +} + +// Interface adds the field key with obj marshaled using reflection. +func (c Context) Interface(key string, i interface{}) Context { + c.l.context = enc.AppendInterface(enc.AppendKey(c.l.context, key), i) + return c +} + +type callerHook struct { + callerSkipFrameCount int +} + +func newCallerHook(skipFrameCount int) callerHook { + return callerHook{callerSkipFrameCount: skipFrameCount} +} + +func (ch callerHook) Run(e *Event, level Level, msg string) { + switch ch.callerSkipFrameCount { + case useGlobalSkipFrameCount: + // Extra frames to skip (added by hook infra). + e.caller(CallerSkipFrameCount + contextCallerSkipFrameCount) + default: + // Extra frames to skip (added by hook infra). + e.caller(ch.callerSkipFrameCount + contextCallerSkipFrameCount) + } +} + +// useGlobalSkipFrameCount acts as a flag to informat callerHook.Run +// to use the global CallerSkipFrameCount. +const useGlobalSkipFrameCount = math.MinInt32 + +// ch is the default caller hook using the global CallerSkipFrameCount. +var ch = newCallerHook(useGlobalSkipFrameCount) + +// Caller adds the file:line of the caller with the zerolog.CallerFieldName key. +func (c Context) Caller() Context { + c.l = c.l.Hook(ch) + return c +} + +// CallerWithSkipFrameCount adds the file:line of the caller with the zerolog.CallerFieldName key. +// The specified skipFrameCount int will override the global CallerSkipFrameCount for this context's respective logger. +// If set to -1 the global CallerSkipFrameCount will be used. +func (c Context) CallerWithSkipFrameCount(skipFrameCount int) Context { + c.l = c.l.Hook(newCallerHook(skipFrameCount)) + return c +} + +// Stack enables stack trace printing for the error passed to Err(). +func (c Context) Stack() Context { + c.l.stack = true + return c +} + +// IPAddr adds IPv4 or IPv6 Address to the context +func (c Context) IPAddr(key string, ip net.IP) Context { + c.l.context = enc.AppendIPAddr(enc.AppendKey(c.l.context, key), ip) + return c +} + +// IPPrefix adds IPv4 or IPv6 Prefix (address and mask) to the context +func (c Context) IPPrefix(key string, pfx net.IPNet) Context { + c.l.context = enc.AppendIPPrefix(enc.AppendKey(c.l.context, key), pfx) + return c +} + +// MACAddr adds MAC address to the context +func (c Context) MACAddr(key string, ha net.HardwareAddr) Context { + c.l.context = enc.AppendMACAddr(enc.AppendKey(c.l.context, key), ha) + return c +} diff --git a/vendor/github.com/rs/zerolog/ctx.go b/vendor/github.com/rs/zerolog/ctx.go new file mode 100644 index 000000000..44d3f4bc1 --- /dev/null +++ b/vendor/github.com/rs/zerolog/ctx.go @@ -0,0 +1,51 @@ +package zerolog + +import ( + "context" +) + +var disabledLogger *Logger + +func init() { + SetGlobalLevel(TraceLevel) + l := Nop() + disabledLogger = &l +} + +type ctxKey struct{} + +// WithContext returns a copy of ctx with l associated. If an instance of Logger +// is already in the context, the context is not updated. +// +// For instance, to add a field to an existing logger in the context, use this +// notation: +// +// ctx := r.Context() +// l := zerolog.Ctx(ctx) +// l.UpdateContext(func(c Context) Context { +// return c.Str("bar", "baz") +// }) +func (l Logger) WithContext(ctx context.Context) context.Context { + if lp, ok := ctx.Value(ctxKey{}).(*Logger); ok { + if lp == &l { + // Do not store same logger. + return ctx + } + } else if l.level == Disabled { + // Do not store disabled logger. + return ctx + } + return context.WithValue(ctx, ctxKey{}, &l) +} + +// Ctx returns the Logger associated with the ctx. If no logger +// is associated, DefaultContextLogger is returned, unless DefaultContextLogger +// is nil, in which case a disabled logger is returned. +func Ctx(ctx context.Context) *Logger { + if l, ok := ctx.Value(ctxKey{}).(*Logger); ok { + return l + } else if l = DefaultContextLogger; l != nil { + return l + } + return disabledLogger +} diff --git a/vendor/github.com/rs/zerolog/encoder.go b/vendor/github.com/rs/zerolog/encoder.go new file mode 100644 index 000000000..09b24e80c --- /dev/null +++ b/vendor/github.com/rs/zerolog/encoder.go @@ -0,0 +1,56 @@ +package zerolog + +import ( + "net" + "time" +) + +type encoder interface { + AppendArrayDelim(dst []byte) []byte + AppendArrayEnd(dst []byte) []byte + AppendArrayStart(dst []byte) []byte + AppendBeginMarker(dst []byte) []byte + AppendBool(dst []byte, val bool) []byte + AppendBools(dst []byte, vals []bool) []byte + AppendBytes(dst, s []byte) []byte + AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte + AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte + AppendEndMarker(dst []byte) []byte + AppendFloat32(dst []byte, val float32) []byte + AppendFloat64(dst []byte, val float64) []byte + AppendFloats32(dst []byte, vals []float32) []byte + AppendFloats64(dst []byte, vals []float64) []byte + AppendHex(dst, s []byte) []byte + AppendIPAddr(dst []byte, ip net.IP) []byte + AppendIPPrefix(dst []byte, pfx net.IPNet) []byte + AppendInt(dst []byte, val int) []byte + AppendInt16(dst []byte, val int16) []byte + AppendInt32(dst []byte, val int32) []byte + AppendInt64(dst []byte, val int64) []byte + AppendInt8(dst []byte, val int8) []byte + AppendInterface(dst []byte, i interface{}) []byte + AppendInts(dst []byte, vals []int) []byte + AppendInts16(dst []byte, vals []int16) []byte + AppendInts32(dst []byte, vals []int32) []byte + AppendInts64(dst []byte, vals []int64) []byte + AppendInts8(dst []byte, vals []int8) []byte + AppendKey(dst []byte, key string) []byte + AppendLineBreak(dst []byte) []byte + AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte + AppendNil(dst []byte) []byte + AppendObjectData(dst []byte, o []byte) []byte + AppendString(dst []byte, s string) []byte + AppendStrings(dst []byte, vals []string) []byte + AppendTime(dst []byte, t time.Time, format string) []byte + AppendTimes(dst []byte, vals []time.Time, format string) []byte + AppendUint(dst []byte, val uint) []byte + AppendUint16(dst []byte, val uint16) []byte + AppendUint32(dst []byte, val uint32) []byte + AppendUint64(dst []byte, val uint64) []byte + AppendUint8(dst []byte, val uint8) []byte + AppendUints(dst []byte, vals []uint) []byte + AppendUints16(dst []byte, vals []uint16) []byte + AppendUints32(dst []byte, vals []uint32) []byte + AppendUints64(dst []byte, vals []uint64) []byte + AppendUints8(dst []byte, vals []uint8) []byte +} diff --git a/vendor/github.com/rs/zerolog/encoder_cbor.go b/vendor/github.com/rs/zerolog/encoder_cbor.go new file mode 100644 index 000000000..7b0dafef8 --- /dev/null +++ b/vendor/github.com/rs/zerolog/encoder_cbor.go @@ -0,0 +1,42 @@ +// +build binary_log + +package zerolog + +// This file contains bindings to do binary encoding. + +import ( + "github.com/rs/zerolog/internal/cbor" +) + +var ( + _ encoder = (*cbor.Encoder)(nil) + + enc = cbor.Encoder{} +) + +func init() { + // using closure to reflect the changes at runtime. + cbor.JSONMarshalFunc = func(v interface{}) ([]byte, error) { + return InterfaceMarshalFunc(v) + } +} + +func appendJSON(dst []byte, j []byte) []byte { + return cbor.AppendEmbeddedJSON(dst, j) +} + +// decodeIfBinaryToString - converts a binary formatted log msg to a +// JSON formatted String Log message. +func decodeIfBinaryToString(in []byte) string { + return cbor.DecodeIfBinaryToString(in) +} + +func decodeObjectToStr(in []byte) string { + return cbor.DecodeObjectToStr(in) +} + +// decodeIfBinaryToBytes - converts a binary formatted log msg to a +// JSON formatted Bytes Log message. +func decodeIfBinaryToBytes(in []byte) []byte { + return cbor.DecodeIfBinaryToBytes(in) +} diff --git a/vendor/github.com/rs/zerolog/encoder_json.go b/vendor/github.com/rs/zerolog/encoder_json.go new file mode 100644 index 000000000..0e0450e26 --- /dev/null +++ b/vendor/github.com/rs/zerolog/encoder_json.go @@ -0,0 +1,39 @@ +// +build !binary_log + +package zerolog + +// encoder_json.go file contains bindings to generate +// JSON encoded byte stream. + +import ( + "github.com/rs/zerolog/internal/json" +) + +var ( + _ encoder = (*json.Encoder)(nil) + + enc = json.Encoder{} +) + +func init() { + // using closure to reflect the changes at runtime. + json.JSONMarshalFunc = func(v interface{}) ([]byte, error) { + return InterfaceMarshalFunc(v) + } +} + +func appendJSON(dst []byte, j []byte) []byte { + return append(dst, j...) +} + +func decodeIfBinaryToString(in []byte) string { + return string(in) +} + +func decodeObjectToStr(in []byte) string { + return string(in) +} + +func decodeIfBinaryToBytes(in []byte) []byte { + return in +} diff --git a/vendor/github.com/rs/zerolog/event.go b/vendor/github.com/rs/zerolog/event.go new file mode 100644 index 000000000..0e2eaa682 --- /dev/null +++ b/vendor/github.com/rs/zerolog/event.go @@ -0,0 +1,780 @@ +package zerolog + +import ( + "fmt" + "net" + "os" + "runtime" + "sync" + "time" +) + +var eventPool = &sync.Pool{ + New: func() interface{} { + return &Event{ + buf: make([]byte, 0, 500), + } + }, +} + +// Event represents a log event. It is instanced by one of the level method of +// Logger and finalized by the Msg or Msgf method. +type Event struct { + buf []byte + w LevelWriter + level Level + done func(msg string) + stack bool // enable error stack trace + ch []Hook // hooks from context + skipFrame int // The number of additional frames to skip when printing the caller. +} + +func putEvent(e *Event) { + // Proper usage of a sync.Pool requires each entry to have approximately + // the same memory cost. To obtain this property when the stored type + // contains a variably-sized buffer, we add a hard limit on the maximum buffer + // to place back in the pool. + // + // See https://golang.org/issue/23199 + const maxSize = 1 << 16 // 64KiB + if cap(e.buf) > maxSize { + return + } + eventPool.Put(e) +} + +// LogObjectMarshaler provides a strongly-typed and encoding-agnostic interface +// to be implemented by types used with Event/Context's Object methods. +type LogObjectMarshaler interface { + MarshalZerologObject(e *Event) +} + +// LogArrayMarshaler provides a strongly-typed and encoding-agnostic interface +// to be implemented by types used with Event/Context's Array methods. +type LogArrayMarshaler interface { + MarshalZerologArray(a *Array) +} + +func newEvent(w LevelWriter, level Level) *Event { + e := eventPool.Get().(*Event) + e.buf = e.buf[:0] + e.ch = nil + e.buf = enc.AppendBeginMarker(e.buf) + e.w = w + e.level = level + e.stack = false + e.skipFrame = 0 + return e +} + +func (e *Event) write() (err error) { + if e == nil { + return nil + } + if e.level != Disabled { + e.buf = enc.AppendEndMarker(e.buf) + e.buf = enc.AppendLineBreak(e.buf) + if e.w != nil { + _, err = e.w.WriteLevel(e.level, e.buf) + } + } + putEvent(e) + return +} + +// Enabled return false if the *Event is going to be filtered out by +// log level or sampling. +func (e *Event) Enabled() bool { + return e != nil && e.level != Disabled +} + +// Discard disables the event so Msg(f) won't print it. +func (e *Event) Discard() *Event { + if e == nil { + return e + } + e.level = Disabled + return nil +} + +// Msg sends the *Event with msg added as the message field if not empty. +// +// NOTICE: once this method is called, the *Event should be disposed. +// Calling Msg twice can have unexpected result. +func (e *Event) Msg(msg string) { + if e == nil { + return + } + e.msg(msg) +} + +// Send is equivalent to calling Msg(""). +// +// NOTICE: once this method is called, the *Event should be disposed. +func (e *Event) Send() { + if e == nil { + return + } + e.msg("") +} + +// Msgf sends the event with formatted msg added as the message field if not empty. +// +// NOTICE: once this method is called, the *Event should be disposed. +// Calling Msgf twice can have unexpected result. +func (e *Event) Msgf(format string, v ...interface{}) { + if e == nil { + return + } + e.msg(fmt.Sprintf(format, v...)) +} + +func (e *Event) MsgFunc(createMsg func() string) { + if e == nil { + return + } + e.msg(createMsg()) +} + +func (e *Event) msg(msg string) { + for _, hook := range e.ch { + hook.Run(e, e.level, msg) + } + if msg != "" { + e.buf = enc.AppendString(enc.AppendKey(e.buf, MessageFieldName), msg) + } + if e.done != nil { + defer e.done(msg) + } + if err := e.write(); err != nil { + if ErrorHandler != nil { + ErrorHandler(err) + } else { + fmt.Fprintf(os.Stderr, "zerolog: could not write event: %v\n", err) + } + } +} + +// Fields is a helper function to use a map or slice to set fields using type assertion. +// Only map[string]interface{} and []interface{} are accepted. []interface{} must +// alternate string keys and arbitrary values, and extraneous ones are ignored. +func (e *Event) Fields(fields interface{}) *Event { + if e == nil { + return e + } + e.buf = appendFields(e.buf, fields) + return e +} + +// Dict adds the field key with a dict to the event context. +// Use zerolog.Dict() to create the dictionary. +func (e *Event) Dict(key string, dict *Event) *Event { + if e == nil { + return e + } + dict.buf = enc.AppendEndMarker(dict.buf) + e.buf = append(enc.AppendKey(e.buf, key), dict.buf...) + putEvent(dict) + return e +} + +// Dict creates an Event to be used with the *Event.Dict method. +// Call usual field methods like Str, Int etc to add fields to this +// event and give it as argument the *Event.Dict method. +func Dict() *Event { + return newEvent(nil, 0) +} + +// Array adds the field key with an array to the event context. +// Use zerolog.Arr() to create the array or pass a type that +// implement the LogArrayMarshaler interface. +func (e *Event) Array(key string, arr LogArrayMarshaler) *Event { + if e == nil { + return e + } + e.buf = enc.AppendKey(e.buf, key) + var a *Array + if aa, ok := arr.(*Array); ok { + a = aa + } else { + a = Arr() + arr.MarshalZerologArray(a) + } + e.buf = a.write(e.buf) + return e +} + +func (e *Event) appendObject(obj LogObjectMarshaler) { + e.buf = enc.AppendBeginMarker(e.buf) + obj.MarshalZerologObject(e) + e.buf = enc.AppendEndMarker(e.buf) +} + +// Object marshals an object that implement the LogObjectMarshaler interface. +func (e *Event) Object(key string, obj LogObjectMarshaler) *Event { + if e == nil { + return e + } + e.buf = enc.AppendKey(e.buf, key) + if obj == nil { + e.buf = enc.AppendNil(e.buf) + + return e + } + + e.appendObject(obj) + return e +} + +// Func allows an anonymous func to run only if the event is enabled. +func (e *Event) Func(f func(e *Event)) *Event { + if e != nil && e.Enabled() { + f(e) + } + return e +} + +// EmbedObject marshals an object that implement the LogObjectMarshaler interface. +func (e *Event) EmbedObject(obj LogObjectMarshaler) *Event { + if e == nil { + return e + } + if obj == nil { + return e + } + obj.MarshalZerologObject(e) + return e +} + +// Str adds the field key with val as a string to the *Event context. +func (e *Event) Str(key, val string) *Event { + if e == nil { + return e + } + e.buf = enc.AppendString(enc.AppendKey(e.buf, key), val) + return e +} + +// Strs adds the field key with vals as a []string to the *Event context. +func (e *Event) Strs(key string, vals []string) *Event { + if e == nil { + return e + } + e.buf = enc.AppendStrings(enc.AppendKey(e.buf, key), vals) + return e +} + +// Stringer adds the field key with val.String() (or null if val is nil) +// to the *Event context. +func (e *Event) Stringer(key string, val fmt.Stringer) *Event { + if e == nil { + return e + } + e.buf = enc.AppendStringer(enc.AppendKey(e.buf, key), val) + return e +} + +// Stringers adds the field key with vals where each individual val +// is used as val.String() (or null if val is empty) to the *Event +// context. +func (e *Event) Stringers(key string, vals []fmt.Stringer) *Event { + if e == nil { + return e + } + e.buf = enc.AppendStringers(enc.AppendKey(e.buf, key), vals) + return e +} + +// Bytes adds the field key with val as a string to the *Event context. +// +// Runes outside of normal ASCII ranges will be hex-encoded in the resulting +// JSON. +func (e *Event) Bytes(key string, val []byte) *Event { + if e == nil { + return e + } + e.buf = enc.AppendBytes(enc.AppendKey(e.buf, key), val) + return e +} + +// Hex adds the field key with val as a hex string to the *Event context. +func (e *Event) Hex(key string, val []byte) *Event { + if e == nil { + return e + } + e.buf = enc.AppendHex(enc.AppendKey(e.buf, key), val) + return e +} + +// RawJSON adds already encoded JSON to the log line under key. +// +// No sanity check is performed on b; it must not contain carriage returns and +// be valid JSON. +func (e *Event) RawJSON(key string, b []byte) *Event { + if e == nil { + return e + } + e.buf = appendJSON(enc.AppendKey(e.buf, key), b) + return e +} + +// AnErr adds the field key with serialized err to the *Event context. +// If err is nil, no field is added. +func (e *Event) AnErr(key string, err error) *Event { + if e == nil { + return e + } + switch m := ErrorMarshalFunc(err).(type) { + case nil: + return e + case LogObjectMarshaler: + return e.Object(key, m) + case error: + if m == nil || isNilValue(m) { + return e + } else { + return e.Str(key, m.Error()) + } + case string: + return e.Str(key, m) + default: + return e.Interface(key, m) + } +} + +// Errs adds the field key with errs as an array of serialized errors to the +// *Event context. +func (e *Event) Errs(key string, errs []error) *Event { + if e == nil { + return e + } + arr := Arr() + for _, err := range errs { + switch m := ErrorMarshalFunc(err).(type) { + case LogObjectMarshaler: + arr = arr.Object(m) + case error: + arr = arr.Err(m) + case string: + arr = arr.Str(m) + default: + arr = arr.Interface(m) + } + } + + return e.Array(key, arr) +} + +// Err adds the field "error" with serialized err to the *Event context. +// If err is nil, no field is added. +// +// To customize the key name, change zerolog.ErrorFieldName. +// +// If Stack() has been called before and zerolog.ErrorStackMarshaler is defined, +// the err is passed to ErrorStackMarshaler and the result is appended to the +// zerolog.ErrorStackFieldName. +func (e *Event) Err(err error) *Event { + if e == nil { + return e + } + if e.stack && ErrorStackMarshaler != nil { + switch m := ErrorStackMarshaler(err).(type) { + case nil: + case LogObjectMarshaler: + e.Object(ErrorStackFieldName, m) + case error: + if m != nil && !isNilValue(m) { + e.Str(ErrorStackFieldName, m.Error()) + } + case string: + e.Str(ErrorStackFieldName, m) + default: + e.Interface(ErrorStackFieldName, m) + } + } + return e.AnErr(ErrorFieldName, err) +} + +// Stack enables stack trace printing for the error passed to Err(). +// +// ErrorStackMarshaler must be set for this method to do something. +func (e *Event) Stack() *Event { + if e != nil { + e.stack = true + } + return e +} + +// Bool adds the field key with val as a bool to the *Event context. +func (e *Event) Bool(key string, b bool) *Event { + if e == nil { + return e + } + e.buf = enc.AppendBool(enc.AppendKey(e.buf, key), b) + return e +} + +// Bools adds the field key with val as a []bool to the *Event context. +func (e *Event) Bools(key string, b []bool) *Event { + if e == nil { + return e + } + e.buf = enc.AppendBools(enc.AppendKey(e.buf, key), b) + return e +} + +// Int adds the field key with i as a int to the *Event context. +func (e *Event) Int(key string, i int) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInt(enc.AppendKey(e.buf, key), i) + return e +} + +// Ints adds the field key with i as a []int to the *Event context. +func (e *Event) Ints(key string, i []int) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInts(enc.AppendKey(e.buf, key), i) + return e +} + +// Int8 adds the field key with i as a int8 to the *Event context. +func (e *Event) Int8(key string, i int8) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInt8(enc.AppendKey(e.buf, key), i) + return e +} + +// Ints8 adds the field key with i as a []int8 to the *Event context. +func (e *Event) Ints8(key string, i []int8) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInts8(enc.AppendKey(e.buf, key), i) + return e +} + +// Int16 adds the field key with i as a int16 to the *Event context. +func (e *Event) Int16(key string, i int16) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInt16(enc.AppendKey(e.buf, key), i) + return e +} + +// Ints16 adds the field key with i as a []int16 to the *Event context. +func (e *Event) Ints16(key string, i []int16) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInts16(enc.AppendKey(e.buf, key), i) + return e +} + +// Int32 adds the field key with i as a int32 to the *Event context. +func (e *Event) Int32(key string, i int32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInt32(enc.AppendKey(e.buf, key), i) + return e +} + +// Ints32 adds the field key with i as a []int32 to the *Event context. +func (e *Event) Ints32(key string, i []int32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInts32(enc.AppendKey(e.buf, key), i) + return e +} + +// Int64 adds the field key with i as a int64 to the *Event context. +func (e *Event) Int64(key string, i int64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInt64(enc.AppendKey(e.buf, key), i) + return e +} + +// Ints64 adds the field key with i as a []int64 to the *Event context. +func (e *Event) Ints64(key string, i []int64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInts64(enc.AppendKey(e.buf, key), i) + return e +} + +// Uint adds the field key with i as a uint to the *Event context. +func (e *Event) Uint(key string, i uint) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUint(enc.AppendKey(e.buf, key), i) + return e +} + +// Uints adds the field key with i as a []int to the *Event context. +func (e *Event) Uints(key string, i []uint) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUints(enc.AppendKey(e.buf, key), i) + return e +} + +// Uint8 adds the field key with i as a uint8 to the *Event context. +func (e *Event) Uint8(key string, i uint8) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUint8(enc.AppendKey(e.buf, key), i) + return e +} + +// Uints8 adds the field key with i as a []int8 to the *Event context. +func (e *Event) Uints8(key string, i []uint8) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUints8(enc.AppendKey(e.buf, key), i) + return e +} + +// Uint16 adds the field key with i as a uint16 to the *Event context. +func (e *Event) Uint16(key string, i uint16) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUint16(enc.AppendKey(e.buf, key), i) + return e +} + +// Uints16 adds the field key with i as a []int16 to the *Event context. +func (e *Event) Uints16(key string, i []uint16) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUints16(enc.AppendKey(e.buf, key), i) + return e +} + +// Uint32 adds the field key with i as a uint32 to the *Event context. +func (e *Event) Uint32(key string, i uint32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUint32(enc.AppendKey(e.buf, key), i) + return e +} + +// Uints32 adds the field key with i as a []int32 to the *Event context. +func (e *Event) Uints32(key string, i []uint32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUints32(enc.AppendKey(e.buf, key), i) + return e +} + +// Uint64 adds the field key with i as a uint64 to the *Event context. +func (e *Event) Uint64(key string, i uint64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUint64(enc.AppendKey(e.buf, key), i) + return e +} + +// Uints64 adds the field key with i as a []int64 to the *Event context. +func (e *Event) Uints64(key string, i []uint64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUints64(enc.AppendKey(e.buf, key), i) + return e +} + +// Float32 adds the field key with f as a float32 to the *Event context. +func (e *Event) Float32(key string, f float32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendFloat32(enc.AppendKey(e.buf, key), f) + return e +} + +// Floats32 adds the field key with f as a []float32 to the *Event context. +func (e *Event) Floats32(key string, f []float32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendFloats32(enc.AppendKey(e.buf, key), f) + return e +} + +// Float64 adds the field key with f as a float64 to the *Event context. +func (e *Event) Float64(key string, f float64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendFloat64(enc.AppendKey(e.buf, key), f) + return e +} + +// Floats64 adds the field key with f as a []float64 to the *Event context. +func (e *Event) Floats64(key string, f []float64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendFloats64(enc.AppendKey(e.buf, key), f) + return e +} + +// Timestamp adds the current local time as UNIX timestamp to the *Event context with the "time" key. +// To customize the key name, change zerolog.TimestampFieldName. +// +// NOTE: It won't dedupe the "time" key if the *Event (or *Context) has one +// already. +func (e *Event) Timestamp() *Event { + if e == nil { + return e + } + e.buf = enc.AppendTime(enc.AppendKey(e.buf, TimestampFieldName), TimestampFunc(), TimeFieldFormat) + return e +} + +// Time adds the field key with t formatted as string using zerolog.TimeFieldFormat. +func (e *Event) Time(key string, t time.Time) *Event { + if e == nil { + return e + } + e.buf = enc.AppendTime(enc.AppendKey(e.buf, key), t, TimeFieldFormat) + return e +} + +// Times adds the field key with t formatted as string using zerolog.TimeFieldFormat. +func (e *Event) Times(key string, t []time.Time) *Event { + if e == nil { + return e + } + e.buf = enc.AppendTimes(enc.AppendKey(e.buf, key), t, TimeFieldFormat) + return e +} + +// Dur adds the field key with duration d stored as zerolog.DurationFieldUnit. +// If zerolog.DurationFieldInteger is true, durations are rendered as integer +// instead of float. +func (e *Event) Dur(key string, d time.Duration) *Event { + if e == nil { + return e + } + e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) + return e +} + +// Durs adds the field key with duration d stored as zerolog.DurationFieldUnit. +// If zerolog.DurationFieldInteger is true, durations are rendered as integer +// instead of float. +func (e *Event) Durs(key string, d []time.Duration) *Event { + if e == nil { + return e + } + e.buf = enc.AppendDurations(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) + return e +} + +// TimeDiff adds the field key with positive duration between time t and start. +// If time t is not greater than start, duration will be 0. +// Duration format follows the same principle as Dur(). +func (e *Event) TimeDiff(key string, t time.Time, start time.Time) *Event { + if e == nil { + return e + } + var d time.Duration + if t.After(start) { + d = t.Sub(start) + } + e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) + return e +} + +// Interface adds the field key with i marshaled using reflection. +func (e *Event) Interface(key string, i interface{}) *Event { + if e == nil { + return e + } + if obj, ok := i.(LogObjectMarshaler); ok { + return e.Object(key, obj) + } + e.buf = enc.AppendInterface(enc.AppendKey(e.buf, key), i) + return e +} + +// CallerSkipFrame instructs any future Caller calls to skip the specified number of frames. +// This includes those added via hooks from the context. +func (e *Event) CallerSkipFrame(skip int) *Event { + if e == nil { + return e + } + e.skipFrame += skip + return e +} + +// Caller adds the file:line of the caller with the zerolog.CallerFieldName key. +// The argument skip is the number of stack frames to ascend +// Skip If not passed, use the global variable CallerSkipFrameCount +func (e *Event) Caller(skip ...int) *Event { + sk := CallerSkipFrameCount + if len(skip) > 0 { + sk = skip[0] + CallerSkipFrameCount + } + return e.caller(sk) +} + +func (e *Event) caller(skip int) *Event { + if e == nil { + return e + } + pc, file, line, ok := runtime.Caller(skip + e.skipFrame) + if !ok { + return e + } + e.buf = enc.AppendString(enc.AppendKey(e.buf, CallerFieldName), CallerMarshalFunc(pc, file, line)) + return e +} + +// IPAddr adds IPv4 or IPv6 Address to the event +func (e *Event) IPAddr(key string, ip net.IP) *Event { + if e == nil { + return e + } + e.buf = enc.AppendIPAddr(enc.AppendKey(e.buf, key), ip) + return e +} + +// IPPrefix adds IPv4 or IPv6 Prefix (address and mask) to the event +func (e *Event) IPPrefix(key string, pfx net.IPNet) *Event { + if e == nil { + return e + } + e.buf = enc.AppendIPPrefix(enc.AppendKey(e.buf, key), pfx) + return e +} + +// MACAddr adds MAC address to the event +func (e *Event) MACAddr(key string, ha net.HardwareAddr) *Event { + if e == nil { + return e + } + e.buf = enc.AppendMACAddr(enc.AppendKey(e.buf, key), ha) + return e +} diff --git a/vendor/github.com/rs/zerolog/fields.go b/vendor/github.com/rs/zerolog/fields.go new file mode 100644 index 000000000..c1eb5ce79 --- /dev/null +++ b/vendor/github.com/rs/zerolog/fields.go @@ -0,0 +1,277 @@ +package zerolog + +import ( + "encoding/json" + "net" + "sort" + "time" + "unsafe" +) + +func isNilValue(i interface{}) bool { + return (*[2]uintptr)(unsafe.Pointer(&i))[1] == 0 +} + +func appendFields(dst []byte, fields interface{}) []byte { + switch fields := fields.(type) { + case []interface{}: + if n := len(fields); n&0x1 == 1 { // odd number + fields = fields[:n-1] + } + dst = appendFieldList(dst, fields) + case map[string]interface{}: + keys := make([]string, 0, len(fields)) + for key := range fields { + keys = append(keys, key) + } + sort.Strings(keys) + kv := make([]interface{}, 2) + for _, key := range keys { + kv[0], kv[1] = key, fields[key] + dst = appendFieldList(dst, kv) + } + } + return dst +} + +func appendFieldList(dst []byte, kvList []interface{}) []byte { + for i, n := 0, len(kvList); i < n; i += 2 { + key, val := kvList[i], kvList[i+1] + if key, ok := key.(string); ok { + dst = enc.AppendKey(dst, key) + } else { + continue + } + if val, ok := val.(LogObjectMarshaler); ok { + e := newEvent(nil, 0) + e.buf = e.buf[:0] + e.appendObject(val) + dst = append(dst, e.buf...) + putEvent(e) + continue + } + switch val := val.(type) { + case string: + dst = enc.AppendString(dst, val) + case []byte: + dst = enc.AppendBytes(dst, val) + case error: + switch m := ErrorMarshalFunc(val).(type) { + case LogObjectMarshaler: + e := newEvent(nil, 0) + e.buf = e.buf[:0] + e.appendObject(m) + dst = append(dst, e.buf...) + putEvent(e) + case error: + if m == nil || isNilValue(m) { + dst = enc.AppendNil(dst) + } else { + dst = enc.AppendString(dst, m.Error()) + } + case string: + dst = enc.AppendString(dst, m) + default: + dst = enc.AppendInterface(dst, m) + } + case []error: + dst = enc.AppendArrayStart(dst) + for i, err := range val { + switch m := ErrorMarshalFunc(err).(type) { + case LogObjectMarshaler: + e := newEvent(nil, 0) + e.buf = e.buf[:0] + e.appendObject(m) + dst = append(dst, e.buf...) + putEvent(e) + case error: + if m == nil || isNilValue(m) { + dst = enc.AppendNil(dst) + } else { + dst = enc.AppendString(dst, m.Error()) + } + case string: + dst = enc.AppendString(dst, m) + default: + dst = enc.AppendInterface(dst, m) + } + + if i < (len(val) - 1) { + enc.AppendArrayDelim(dst) + } + } + dst = enc.AppendArrayEnd(dst) + case bool: + dst = enc.AppendBool(dst, val) + case int: + dst = enc.AppendInt(dst, val) + case int8: + dst = enc.AppendInt8(dst, val) + case int16: + dst = enc.AppendInt16(dst, val) + case int32: + dst = enc.AppendInt32(dst, val) + case int64: + dst = enc.AppendInt64(dst, val) + case uint: + dst = enc.AppendUint(dst, val) + case uint8: + dst = enc.AppendUint8(dst, val) + case uint16: + dst = enc.AppendUint16(dst, val) + case uint32: + dst = enc.AppendUint32(dst, val) + case uint64: + dst = enc.AppendUint64(dst, val) + case float32: + dst = enc.AppendFloat32(dst, val) + case float64: + dst = enc.AppendFloat64(dst, val) + case time.Time: + dst = enc.AppendTime(dst, val, TimeFieldFormat) + case time.Duration: + dst = enc.AppendDuration(dst, val, DurationFieldUnit, DurationFieldInteger) + case *string: + if val != nil { + dst = enc.AppendString(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *bool: + if val != nil { + dst = enc.AppendBool(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int: + if val != nil { + dst = enc.AppendInt(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int8: + if val != nil { + dst = enc.AppendInt8(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int16: + if val != nil { + dst = enc.AppendInt16(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int32: + if val != nil { + dst = enc.AppendInt32(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int64: + if val != nil { + dst = enc.AppendInt64(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint: + if val != nil { + dst = enc.AppendUint(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint8: + if val != nil { + dst = enc.AppendUint8(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint16: + if val != nil { + dst = enc.AppendUint16(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint32: + if val != nil { + dst = enc.AppendUint32(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint64: + if val != nil { + dst = enc.AppendUint64(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *float32: + if val != nil { + dst = enc.AppendFloat32(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *float64: + if val != nil { + dst = enc.AppendFloat64(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *time.Time: + if val != nil { + dst = enc.AppendTime(dst, *val, TimeFieldFormat) + } else { + dst = enc.AppendNil(dst) + } + case *time.Duration: + if val != nil { + dst = enc.AppendDuration(dst, *val, DurationFieldUnit, DurationFieldInteger) + } else { + dst = enc.AppendNil(dst) + } + case []string: + dst = enc.AppendStrings(dst, val) + case []bool: + dst = enc.AppendBools(dst, val) + case []int: + dst = enc.AppendInts(dst, val) + case []int8: + dst = enc.AppendInts8(dst, val) + case []int16: + dst = enc.AppendInts16(dst, val) + case []int32: + dst = enc.AppendInts32(dst, val) + case []int64: + dst = enc.AppendInts64(dst, val) + case []uint: + dst = enc.AppendUints(dst, val) + // case []uint8: + // dst = enc.AppendUints8(dst, val) + case []uint16: + dst = enc.AppendUints16(dst, val) + case []uint32: + dst = enc.AppendUints32(dst, val) + case []uint64: + dst = enc.AppendUints64(dst, val) + case []float32: + dst = enc.AppendFloats32(dst, val) + case []float64: + dst = enc.AppendFloats64(dst, val) + case []time.Time: + dst = enc.AppendTimes(dst, val, TimeFieldFormat) + case []time.Duration: + dst = enc.AppendDurations(dst, val, DurationFieldUnit, DurationFieldInteger) + case nil: + dst = enc.AppendNil(dst) + case net.IP: + dst = enc.AppendIPAddr(dst, val) + case net.IPNet: + dst = enc.AppendIPPrefix(dst, val) + case net.HardwareAddr: + dst = enc.AppendMACAddr(dst, val) + case json.RawMessage: + dst = appendJSON(dst, val) + default: + dst = enc.AppendInterface(dst, val) + } + } + return dst +} diff --git a/vendor/github.com/rs/zerolog/globals.go b/vendor/github.com/rs/zerolog/globals.go new file mode 100644 index 000000000..e1067deb6 --- /dev/null +++ b/vendor/github.com/rs/zerolog/globals.go @@ -0,0 +1,142 @@ +package zerolog + +import ( + "encoding/json" + "strconv" + "sync/atomic" + "time" +) + +const ( + // TimeFormatUnix defines a time format that makes time fields to be + // serialized as Unix timestamp integers. + TimeFormatUnix = "" + + // TimeFormatUnixMs defines a time format that makes time fields to be + // serialized as Unix timestamp integers in milliseconds. + TimeFormatUnixMs = "UNIXMS" + + // TimeFormatUnixMicro defines a time format that makes time fields to be + // serialized as Unix timestamp integers in microseconds. + TimeFormatUnixMicro = "UNIXMICRO" + + // TimeFormatUnixNano defines a time format that makes time fields to be + // serialized as Unix timestamp integers in nanoseconds. + TimeFormatUnixNano = "UNIXNANO" +) + +var ( + // TimestampFieldName is the field name used for the timestamp field. + TimestampFieldName = "time" + + // LevelFieldName is the field name used for the level field. + LevelFieldName = "level" + + // LevelTraceValue is the value used for the trace level field. + LevelTraceValue = "trace" + // LevelDebugValue is the value used for the debug level field. + LevelDebugValue = "debug" + // LevelInfoValue is the value used for the info level field. + LevelInfoValue = "info" + // LevelWarnValue is the value used for the warn level field. + LevelWarnValue = "warn" + // LevelErrorValue is the value used for the error level field. + LevelErrorValue = "error" + // LevelFatalValue is the value used for the fatal level field. + LevelFatalValue = "fatal" + // LevelPanicValue is the value used for the panic level field. + LevelPanicValue = "panic" + + // LevelFieldMarshalFunc allows customization of global level field marshaling. + LevelFieldMarshalFunc = func(l Level) string { + return l.String() + } + + // MessageFieldName is the field name used for the message field. + MessageFieldName = "message" + + // ErrorFieldName is the field name used for error fields. + ErrorFieldName = "error" + + // CallerFieldName is the field name used for caller field. + CallerFieldName = "caller" + + // CallerSkipFrameCount is the number of stack frames to skip to find the caller. + CallerSkipFrameCount = 2 + + // CallerMarshalFunc allows customization of global caller marshaling + CallerMarshalFunc = func(pc uintptr, file string, line int) string { + return file + ":" + strconv.Itoa(line) + } + + // ErrorStackFieldName is the field name used for error stacks. + ErrorStackFieldName = "stack" + + // ErrorStackMarshaler extract the stack from err if any. + ErrorStackMarshaler func(err error) interface{} + + // ErrorMarshalFunc allows customization of global error marshaling + ErrorMarshalFunc = func(err error) interface{} { + return err + } + + // InterfaceMarshalFunc allows customization of interface marshaling. + // Default: "encoding/json.Marshal" + InterfaceMarshalFunc = json.Marshal + + // TimeFieldFormat defines the time format of the Time field type. If set to + // TimeFormatUnix, TimeFormatUnixMs, TimeFormatUnixMicro or TimeFormatUnixNano, the time is formatted as a UNIX + // timestamp as integer. + TimeFieldFormat = time.RFC3339 + + // TimestampFunc defines the function called to generate a timestamp. + TimestampFunc = time.Now + + // DurationFieldUnit defines the unit for time.Duration type fields added + // using the Dur method. + DurationFieldUnit = time.Millisecond + + // DurationFieldInteger renders Dur fields as integer instead of float if + // set to true. + DurationFieldInteger = false + + // ErrorHandler is called whenever zerolog fails to write an event on its + // output. If not set, an error is printed on the stderr. This handler must + // be thread safe and non-blocking. + ErrorHandler func(err error) + + // DefaultContextLogger is returned from Ctx() if there is no logger associated + // with the context. + DefaultContextLogger *Logger +) + +var ( + gLevel = new(int32) + disableSampling = new(int32) +) + +// SetGlobalLevel sets the global override for log level. If this +// values is raised, all Loggers will use at least this value. +// +// To globally disable logs, set GlobalLevel to Disabled. +func SetGlobalLevel(l Level) { + atomic.StoreInt32(gLevel, int32(l)) +} + +// GlobalLevel returns the current global log level +func GlobalLevel() Level { + return Level(atomic.LoadInt32(gLevel)) +} + +// DisableSampling will disable sampling in all Loggers if true. +func DisableSampling(v bool) { + var i int32 + if v { + i = 1 + } + atomic.StoreInt32(disableSampling, i) +} + +func samplingDisabled() bool { + return atomic.LoadInt32(disableSampling) == 1 +} diff --git a/vendor/github.com/rs/zerolog/go112.go b/vendor/github.com/rs/zerolog/go112.go new file mode 100644 index 000000000..e7b5a1bdc --- /dev/null +++ b/vendor/github.com/rs/zerolog/go112.go @@ -0,0 +1,7 @@ +// +build go1.12 + +package zerolog + +// Since go 1.12, some auto generated init functions are hidden from +// runtime.Caller. +const contextCallerSkipFrameCount = 2 diff --git a/vendor/github.com/rs/zerolog/hook.go b/vendor/github.com/rs/zerolog/hook.go new file mode 100644 index 000000000..ec6effc1a --- /dev/null +++ b/vendor/github.com/rs/zerolog/hook.go @@ -0,0 +1,64 @@ +package zerolog + +// Hook defines an interface to a log hook. +type Hook interface { + // Run runs the hook with the event. + Run(e *Event, level Level, message string) +} + +// HookFunc is an adaptor to allow the use of an ordinary function +// as a Hook. +type HookFunc func(e *Event, level Level, message string) + +// Run implements the Hook interface. +func (h HookFunc) Run(e *Event, level Level, message string) { + h(e, level, message) +} + +// LevelHook applies a different hook for each level. +type LevelHook struct { + NoLevelHook, TraceHook, DebugHook, InfoHook, WarnHook, ErrorHook, FatalHook, PanicHook Hook +} + +// Run implements the Hook interface. +func (h LevelHook) Run(e *Event, level Level, message string) { + switch level { + case TraceLevel: + if h.TraceHook != nil { + h.TraceHook.Run(e, level, message) + } + case DebugLevel: + if h.DebugHook != nil { + h.DebugHook.Run(e, level, message) + } + case InfoLevel: + if h.InfoHook != nil { + h.InfoHook.Run(e, level, message) + } + case WarnLevel: + if h.WarnHook != nil { + h.WarnHook.Run(e, level, message) + } + case ErrorLevel: + if h.ErrorHook != nil { + h.ErrorHook.Run(e, level, message) + } + case FatalLevel: + if h.FatalHook != nil { + h.FatalHook.Run(e, level, message) + } + case PanicLevel: + if h.PanicHook != nil { + h.PanicHook.Run(e, level, message) + } + case NoLevel: + if h.NoLevelHook != nil { + h.NoLevelHook.Run(e, level, message) + } + } +} + +// NewLevelHook returns a new LevelHook. +func NewLevelHook() LevelHook { + return LevelHook{} +} diff --git a/vendor/github.com/rs/zerolog/internal/cbor/README.md b/vendor/github.com/rs/zerolog/internal/cbor/README.md new file mode 100644 index 000000000..92c2e8c7f --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/cbor/README.md @@ -0,0 +1,56 @@ +## Reference: + CBOR Encoding is described in [RFC7049](https://tools.ietf.org/html/rfc7049) + +## Comparison of JSON vs CBOR + +Two main areas of reduction are: + +1. CPU usage to write a log msg +2. Size (in bytes) of log messages. + + +CPU Usage savings are below: +``` +name JSON time/op CBOR time/op delta +Info-32 15.3ns ± 1% 11.7ns ± 3% -23.78% (p=0.000 n=9+10) +ContextFields-32 16.2ns ± 2% 12.3ns ± 3% -23.97% (p=0.000 n=9+9) +ContextAppend-32 6.70ns ± 0% 6.20ns ± 0% -7.44% (p=0.000 n=9+9) +LogFields-32 66.4ns ± 0% 24.6ns ± 2% -62.89% (p=0.000 n=10+9) +LogArrayObject-32 911ns ±11% 768ns ± 6% -15.64% (p=0.000 n=10+10) +LogFieldType/Floats-32 70.3ns ± 2% 29.5ns ± 1% -57.98% (p=0.000 n=10+10) +LogFieldType/Err-32 14.0ns ± 3% 12.1ns ± 8% -13.20% (p=0.000 n=8+10) +LogFieldType/Dur-32 17.2ns ± 2% 13.1ns ± 1% -24.27% (p=0.000 n=10+9) +LogFieldType/Object-32 54.3ns ±11% 52.3ns ± 7% ~ (p=0.239 n=10+10) +LogFieldType/Ints-32 20.3ns ± 2% 15.1ns ± 2% -25.50% (p=0.000 n=9+10) +LogFieldType/Interfaces-32 642ns ±11% 621ns ± 9% ~ (p=0.118 n=10+10) +LogFieldType/Interface(Objects)-32 635ns ±13% 632ns ± 9% ~ (p=0.592 n=10+10) +LogFieldType/Times-32 294ns ± 0% 27ns ± 1% -90.71% (p=0.000 n=10+9) +LogFieldType/Durs-32 121ns ± 0% 33ns ± 2% -72.44% (p=0.000 n=9+9) +LogFieldType/Interface(Object)-32 56.6ns ± 8% 52.3ns ± 8% -7.54% (p=0.007 n=10+10) +LogFieldType/Errs-32 17.8ns ± 3% 16.1ns ± 2% -9.71% (p=0.000 n=10+9) +LogFieldType/Time-32 40.5ns ± 1% 12.7ns ± 6% -68.66% (p=0.000 n=8+9) +LogFieldType/Bool-32 12.0ns ± 5% 10.2ns ± 2% -15.18% (p=0.000 n=10+8) +LogFieldType/Bools-32 17.2ns ± 2% 12.6ns ± 4% -26.63% (p=0.000 n=10+10) +LogFieldType/Int-32 12.3ns ± 2% 11.2ns ± 4% -9.27% (p=0.000 n=9+10) +LogFieldType/Float-32 16.7ns ± 1% 12.6ns ± 2% -24.42% (p=0.000 n=7+9) +LogFieldType/Str-32 12.7ns ± 7% 11.3ns ± 7% -10.88% (p=0.000 n=10+9) +LogFieldType/Strs-32 20.3ns ± 3% 18.2ns ± 3% -10.25% (p=0.000 n=9+10) +LogFieldType/Interface-32 183ns ±12% 175ns ± 9% ~ (p=0.078 n=10+10) +``` + +Log message size savings is greatly dependent on the number and type of fields in the log message. +Assuming this log message (with an Integer, timestamp and string, in addition to level). + +`{"level":"error","Fault":41650,"time":"2018-04-01T15:18:19-07:00","message":"Some Message"}` + +Two measurements were done for the log file sizes - one without any compression, second +using [compress/zlib](https://golang.org/pkg/compress/zlib/). + +Results for 10,000 log messages: + +| Log Format | Plain File Size (in KB) | Compressed File Size (in KB) | +| :--- | :---: | :---: | +| JSON | 920 | 28 | +| CBOR | 550 | 28 | + +The example used to calculate the above data is available in [Examples](examples). diff --git a/vendor/github.com/rs/zerolog/internal/cbor/base.go b/vendor/github.com/rs/zerolog/internal/cbor/base.go new file mode 100644 index 000000000..51fe86c9b --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/cbor/base.go @@ -0,0 +1,19 @@ +package cbor + +// JSONMarshalFunc is used to marshal interface to JSON encoded byte slice. +// Making it package level instead of embedded in Encoder brings +// some extra efforts at importing, but avoids value copy when the functions +// of Encoder being invoked. +// DO REMEMBER to set this variable at importing, or +// you might get a nil pointer dereference panic at runtime. +var JSONMarshalFunc func(v interface{}) ([]byte, error) + +type Encoder struct{} + +// AppendKey adds a key (string) to the binary encoded log message +func (e Encoder) AppendKey(dst []byte, key string) []byte { + if len(dst) < 1 { + dst = e.AppendBeginMarker(dst) + } + return e.AppendString(dst, key) +} diff --git a/vendor/github.com/rs/zerolog/internal/cbor/cbor.go b/vendor/github.com/rs/zerolog/internal/cbor/cbor.go new file mode 100644 index 000000000..bc54e37a7 --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/cbor/cbor.go @@ -0,0 +1,101 @@ +// Package cbor provides primitives for storing different data +// in the CBOR (binary) format. CBOR is defined in RFC7049. +package cbor + +import "time" + +const ( + majorOffset = 5 + additionalMax = 23 + + // Non Values. + additionalTypeBoolFalse byte = 20 + additionalTypeBoolTrue byte = 21 + additionalTypeNull byte = 22 + + // Integer (+ve and -ve) Sub-types. + additionalTypeIntUint8 byte = 24 + additionalTypeIntUint16 byte = 25 + additionalTypeIntUint32 byte = 26 + additionalTypeIntUint64 byte = 27 + + // Float Sub-types. + additionalTypeFloat16 byte = 25 + additionalTypeFloat32 byte = 26 + additionalTypeFloat64 byte = 27 + additionalTypeBreak byte = 31 + + // Tag Sub-types. + additionalTypeTimestamp byte = 01 + + // Extended Tags - from https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml + additionalTypeTagNetworkAddr uint16 = 260 + additionalTypeTagNetworkPrefix uint16 = 261 + additionalTypeEmbeddedJSON uint16 = 262 + additionalTypeTagHexString uint16 = 263 + + // Unspecified number of elements. + additionalTypeInfiniteCount byte = 31 +) +const ( + majorTypeUnsignedInt byte = iota << majorOffset // Major type 0 + majorTypeNegativeInt // Major type 1 + majorTypeByteString // Major type 2 + majorTypeUtf8String // Major type 3 + majorTypeArray // Major type 4 + majorTypeMap // Major type 5 + majorTypeTags // Major type 6 + majorTypeSimpleAndFloat // Major type 7 +) + +const ( + maskOutAdditionalType byte = (7 << majorOffset) + maskOutMajorType byte = 31 +) + +const ( + float32Nan = "\xfa\x7f\xc0\x00\x00" + float32PosInfinity = "\xfa\x7f\x80\x00\x00" + float32NegInfinity = "\xfa\xff\x80\x00\x00" + float64Nan = "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00" + float64PosInfinity = "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00" + float64NegInfinity = "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00" +) + +// IntegerTimeFieldFormat indicates the format of timestamp decoded +// from an integer (time in seconds). +var IntegerTimeFieldFormat = time.RFC3339 + +// NanoTimeFieldFormat indicates the format of timestamp decoded +// from a float value (time in seconds and nanoseconds). +var NanoTimeFieldFormat = time.RFC3339Nano + +func appendCborTypePrefix(dst []byte, major byte, number uint64) []byte { + byteCount := 8 + var minor byte + switch { + case number < 256: + byteCount = 1 + minor = additionalTypeIntUint8 + + case number < 65536: + byteCount = 2 + minor = additionalTypeIntUint16 + + case number < 4294967296: + byteCount = 4 + minor = additionalTypeIntUint32 + + default: + byteCount = 8 + minor = additionalTypeIntUint64 + + } + + dst = append(dst, major|minor) + byteCount-- + for ; byteCount >= 0; byteCount-- { + dst = append(dst, byte(number>>(uint(byteCount)*8))) + } + return dst +} diff --git a/vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go b/vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go new file mode 100644 index 000000000..fc16f98c7 --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go @@ -0,0 +1,614 @@ +package cbor + +// This file contains code to decode a stream of CBOR Data into JSON. + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "net" + "runtime" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +var decodeTimeZone *time.Location + +const hexTable = "0123456789abcdef" + +const isFloat32 = 4 +const isFloat64 = 8 + +func readNBytes(src *bufio.Reader, n int) []byte { + ret := make([]byte, n) + for i := 0; i < n; i++ { + ch, e := src.ReadByte() + if e != nil { + panic(fmt.Errorf("Tried to Read %d Bytes.. But hit end of file", n)) + } + ret[i] = ch + } + return ret +} + +func readByte(src *bufio.Reader) byte { + b, e := src.ReadByte() + if e != nil { + panic(fmt.Errorf("Tried to Read 1 Byte.. But hit end of file")) + } + return b +} + +func decodeIntAdditionalType(src *bufio.Reader, minor byte) int64 { + val := int64(0) + if minor <= 23 { + val = int64(minor) + } else { + bytesToRead := 0 + switch minor { + case additionalTypeIntUint8: + bytesToRead = 1 + case additionalTypeIntUint16: + bytesToRead = 2 + case additionalTypeIntUint32: + bytesToRead = 4 + case additionalTypeIntUint64: + bytesToRead = 8 + default: + panic(fmt.Errorf("Invalid Additional Type: %d in decodeInteger (expected <28)", minor)) + } + pb := readNBytes(src, bytesToRead) + for i := 0; i < bytesToRead; i++ { + val = val * 256 + val += int64(pb[i]) + } + } + return val +} + +func decodeInteger(src *bufio.Reader) int64 { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeUnsignedInt && major != majorTypeNegativeInt { + panic(fmt.Errorf("Major type is: %d in decodeInteger!! (expected 0 or 1)", major)) + } + val := decodeIntAdditionalType(src, minor) + if major == 0 { + return val + } + return (-1 - val) +} + +func decodeFloat(src *bufio.Reader) (float64, int) { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeSimpleAndFloat { + panic(fmt.Errorf("Incorrect Major type is: %d in decodeFloat", major)) + } + + switch minor { + case additionalTypeFloat16: + panic(fmt.Errorf("float16 is not suppported in decodeFloat")) + + case additionalTypeFloat32: + pb := readNBytes(src, 4) + switch string(pb) { + case float32Nan: + return math.NaN(), isFloat32 + case float32PosInfinity: + return math.Inf(0), isFloat32 + case float32NegInfinity: + return math.Inf(-1), isFloat32 + } + n := uint32(0) + for i := 0; i < 4; i++ { + n = n * 256 + n += uint32(pb[i]) + } + val := math.Float32frombits(n) + return float64(val), isFloat32 + case additionalTypeFloat64: + pb := readNBytes(src, 8) + switch string(pb) { + case float64Nan: + return math.NaN(), isFloat64 + case float64PosInfinity: + return math.Inf(0), isFloat64 + case float64NegInfinity: + return math.Inf(-1), isFloat64 + } + n := uint64(0) + for i := 0; i < 8; i++ { + n = n * 256 + n += uint64(pb[i]) + } + val := math.Float64frombits(n) + return val, isFloat64 + } + panic(fmt.Errorf("Invalid Additional Type: %d in decodeFloat", minor)) +} + +func decodeStringComplex(dst []byte, s string, pos uint) []byte { + i := int(pos) + start := 0 + + for i < len(s) { + b := s[i] + if b >= utf8.RuneSelf { + r, size := utf8.DecodeRuneInString(s[i:]) + if r == utf8.RuneError && size == 1 { + // In case of error, first append previous simple characters to + // the byte slice if any and append a replacement character code + // in place of the invalid sequence. + if start < i { + dst = append(dst, s[start:i]...) + } + dst = append(dst, `\ufffd`...) + i += size + start = i + continue + } + i += size + continue + } + if b >= 0x20 && b <= 0x7e && b != '\\' && b != '"' { + i++ + continue + } + // We encountered a character that needs to be encoded. + // Let's append the previous simple characters to the byte slice + // and switch our operation to read and encode the remainder + // characters byte-by-byte. + if start < i { + dst = append(dst, s[start:i]...) + } + switch b { + case '"', '\\': + dst = append(dst, '\\', b) + case '\b': + dst = append(dst, '\\', 'b') + case '\f': + dst = append(dst, '\\', 'f') + case '\n': + dst = append(dst, '\\', 'n') + case '\r': + dst = append(dst, '\\', 'r') + case '\t': + dst = append(dst, '\\', 't') + default: + dst = append(dst, '\\', 'u', '0', '0', hexTable[b>>4], hexTable[b&0xF]) + } + i++ + start = i + } + if start < len(s) { + dst = append(dst, s[start:]...) + } + return dst +} + +func decodeString(src *bufio.Reader, noQuotes bool) []byte { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeByteString { + panic(fmt.Errorf("Major type is: %d in decodeString", major)) + } + result := []byte{} + if !noQuotes { + result = append(result, '"') + } + length := decodeIntAdditionalType(src, minor) + len := int(length) + pbs := readNBytes(src, len) + result = append(result, pbs...) + if noQuotes { + return result + } + return append(result, '"') +} + +func decodeUTF8String(src *bufio.Reader) []byte { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeUtf8String { + panic(fmt.Errorf("Major type is: %d in decodeUTF8String", major)) + } + result := []byte{'"'} + length := decodeIntAdditionalType(src, minor) + len := int(length) + pbs := readNBytes(src, len) + + for i := 0; i < len; i++ { + // Check if the character needs encoding. Control characters, slashes, + // and the double quote need json encoding. Bytes above the ascii + // boundary needs utf8 encoding. + if pbs[i] < 0x20 || pbs[i] > 0x7e || pbs[i] == '\\' || pbs[i] == '"' { + // We encountered a character that needs to be encoded. Switch + // to complex version of the algorithm. + dst := []byte{'"'} + dst = decodeStringComplex(dst, string(pbs), uint(i)) + return append(dst, '"') + } + } + // The string has no need for encoding and therefore is directly + // appended to the byte slice. + result = append(result, pbs...) + return append(result, '"') +} + +func array2Json(src *bufio.Reader, dst io.Writer) { + dst.Write([]byte{'['}) + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeArray { + panic(fmt.Errorf("Major type is: %d in array2Json", major)) + } + len := 0 + unSpecifiedCount := false + if minor == additionalTypeInfiniteCount { + unSpecifiedCount = true + } else { + length := decodeIntAdditionalType(src, minor) + len = int(length) + } + for i := 0; unSpecifiedCount || i < len; i++ { + if unSpecifiedCount { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak { + readByte(src) + break + } + } + cbor2JsonOneObject(src, dst) + if unSpecifiedCount { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak { + readByte(src) + break + } + dst.Write([]byte{','}) + } else if i+1 < len { + dst.Write([]byte{','}) + } + } + dst.Write([]byte{']'}) +} + +func map2Json(src *bufio.Reader, dst io.Writer) { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeMap { + panic(fmt.Errorf("Major type is: %d in map2Json", major)) + } + len := 0 + unSpecifiedCount := false + if minor == additionalTypeInfiniteCount { + unSpecifiedCount = true + } else { + length := decodeIntAdditionalType(src, minor) + len = int(length) + } + dst.Write([]byte{'{'}) + for i := 0; unSpecifiedCount || i < len; i++ { + if unSpecifiedCount { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak { + readByte(src) + break + } + } + cbor2JsonOneObject(src, dst) + if i%2 == 0 { + // Even position values are keys. + dst.Write([]byte{':'}) + } else { + if unSpecifiedCount { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak { + readByte(src) + break + } + dst.Write([]byte{','}) + } else if i+1 < len { + dst.Write([]byte{','}) + } + } + } + dst.Write([]byte{'}'}) +} + +func decodeTagData(src *bufio.Reader) []byte { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeTags { + panic(fmt.Errorf("Major type is: %d in decodeTagData", major)) + } + switch minor { + case additionalTypeTimestamp: + return decodeTimeStamp(src) + + // Tag value is larger than 256 (so uint16). + case additionalTypeIntUint16: + val := decodeIntAdditionalType(src, minor) + + switch uint16(val) { + case additionalTypeEmbeddedJSON: + pb := readByte(src) + dataMajor := pb & maskOutAdditionalType + if dataMajor != majorTypeByteString { + panic(fmt.Errorf("Unsupported embedded Type: %d in decodeEmbeddedJSON", dataMajor)) + } + src.UnreadByte() + return decodeString(src, true) + + case additionalTypeTagNetworkAddr: + octets := decodeString(src, true) + ss := []byte{'"'} + switch len(octets) { + case 6: // MAC address. + ha := net.HardwareAddr(octets) + ss = append(append(ss, ha.String()...), '"') + case 4: // IPv4 address. + fallthrough + case 16: // IPv6 address. + ip := net.IP(octets) + ss = append(append(ss, ip.String()...), '"') + default: + panic(fmt.Errorf("Unexpected Network Address length: %d (expected 4,6,16)", len(octets))) + } + return ss + + case additionalTypeTagNetworkPrefix: + pb := readByte(src) + if pb != majorTypeMap|0x1 { + panic(fmt.Errorf("IP Prefix is NOT of MAP of 1 elements as expected")) + } + octets := decodeString(src, true) + val := decodeInteger(src) + ip := net.IP(octets) + var mask net.IPMask + pfxLen := int(val) + if len(octets) == 4 { + mask = net.CIDRMask(pfxLen, 32) + } else { + mask = net.CIDRMask(pfxLen, 128) + } + ipPfx := net.IPNet{IP: ip, Mask: mask} + ss := []byte{'"'} + ss = append(append(ss, ipPfx.String()...), '"') + return ss + + case additionalTypeTagHexString: + octets := decodeString(src, true) + ss := []byte{'"'} + for _, v := range octets { + ss = append(ss, hexTable[v>>4], hexTable[v&0x0f]) + } + return append(ss, '"') + + default: + panic(fmt.Errorf("Unsupported Additional Tag Type: %d in decodeTagData", val)) + } + } + panic(fmt.Errorf("Unsupported Additional Type: %d in decodeTagData", minor)) +} + +func decodeTimeStamp(src *bufio.Reader) []byte { + pb := readByte(src) + src.UnreadByte() + tsMajor := pb & maskOutAdditionalType + if tsMajor == majorTypeUnsignedInt || tsMajor == majorTypeNegativeInt { + n := decodeInteger(src) + t := time.Unix(n, 0) + if decodeTimeZone != nil { + t = t.In(decodeTimeZone) + } else { + t = t.In(time.UTC) + } + tsb := []byte{} + tsb = append(tsb, '"') + tsb = t.AppendFormat(tsb, IntegerTimeFieldFormat) + tsb = append(tsb, '"') + return tsb + } else if tsMajor == majorTypeSimpleAndFloat { + n, _ := decodeFloat(src) + secs := int64(n) + n -= float64(secs) + n *= float64(1e9) + t := time.Unix(secs, int64(n)) + if decodeTimeZone != nil { + t = t.In(decodeTimeZone) + } else { + t = t.In(time.UTC) + } + tsb := []byte{} + tsb = append(tsb, '"') + tsb = t.AppendFormat(tsb, NanoTimeFieldFormat) + tsb = append(tsb, '"') + return tsb + } + panic(fmt.Errorf("TS format is neigther int nor float: %d", tsMajor)) +} + +func decodeSimpleFloat(src *bufio.Reader) []byte { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeSimpleAndFloat { + panic(fmt.Errorf("Major type is: %d in decodeSimpleFloat", major)) + } + switch minor { + case additionalTypeBoolTrue: + return []byte("true") + case additionalTypeBoolFalse: + return []byte("false") + case additionalTypeNull: + return []byte("null") + case additionalTypeFloat16: + fallthrough + case additionalTypeFloat32: + fallthrough + case additionalTypeFloat64: + src.UnreadByte() + v, bc := decodeFloat(src) + ba := []byte{} + switch { + case math.IsNaN(v): + return []byte("\"NaN\"") + case math.IsInf(v, 1): + return []byte("\"+Inf\"") + case math.IsInf(v, -1): + return []byte("\"-Inf\"") + } + if bc == isFloat32 { + ba = strconv.AppendFloat(ba, v, 'f', -1, 32) + } else if bc == isFloat64 { + ba = strconv.AppendFloat(ba, v, 'f', -1, 64) + } else { + panic(fmt.Errorf("Invalid Float precision from decodeFloat: %d", bc)) + } + return ba + default: + panic(fmt.Errorf("Invalid Additional Type: %d in decodeSimpleFloat", minor)) + } +} + +func cbor2JsonOneObject(src *bufio.Reader, dst io.Writer) { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + major := (pb[0] & maskOutAdditionalType) + + switch major { + case majorTypeUnsignedInt: + fallthrough + case majorTypeNegativeInt: + n := decodeInteger(src) + dst.Write([]byte(strconv.Itoa(int(n)))) + + case majorTypeByteString: + s := decodeString(src, false) + dst.Write(s) + + case majorTypeUtf8String: + s := decodeUTF8String(src) + dst.Write(s) + + case majorTypeArray: + array2Json(src, dst) + + case majorTypeMap: + map2Json(src, dst) + + case majorTypeTags: + s := decodeTagData(src) + dst.Write(s) + + case majorTypeSimpleAndFloat: + s := decodeSimpleFloat(src) + dst.Write(s) + } +} + +func moreBytesToRead(src *bufio.Reader) bool { + _, e := src.ReadByte() + if e == nil { + src.UnreadByte() + return true + } + return false +} + +// Cbor2JsonManyObjects decodes all the CBOR Objects read from src +// reader. It keeps on decoding until reader returns EOF (error when reading). +// Decoded string is written to the dst. At the end of every CBOR Object +// newline is written to the output stream. +// +// Returns error (if any) that was encountered during decode. +// The child functions will generate a panic when error is encountered and +// this function will recover non-runtime Errors and return the reason as error. +func Cbor2JsonManyObjects(src io.Reader, dst io.Writer) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + bufRdr := bufio.NewReader(src) + for moreBytesToRead(bufRdr) { + cbor2JsonOneObject(bufRdr, dst) + dst.Write([]byte("\n")) + } + return nil +} + +// Detect if the bytes to be printed is Binary or not. +func binaryFmt(p []byte) bool { + if len(p) > 0 && p[0] > 0x7F { + return true + } + return false +} + +func getReader(str string) *bufio.Reader { + return bufio.NewReader(strings.NewReader(str)) +} + +// DecodeIfBinaryToString converts a binary formatted log msg to a +// JSON formatted String Log message - suitable for printing to Console/Syslog. +func DecodeIfBinaryToString(in []byte) string { + if binaryFmt(in) { + var b bytes.Buffer + Cbor2JsonManyObjects(strings.NewReader(string(in)), &b) + return b.String() + } + return string(in) +} + +// DecodeObjectToStr checks if the input is a binary format, if so, +// it will decode a single Object and return the decoded string. +func DecodeObjectToStr(in []byte) string { + if binaryFmt(in) { + var b bytes.Buffer + cbor2JsonOneObject(getReader(string(in)), &b) + return b.String() + } + return string(in) +} + +// DecodeIfBinaryToBytes checks if the input is a binary format, if so, +// it will decode all Objects and return the decoded string as byte array. +func DecodeIfBinaryToBytes(in []byte) []byte { + if binaryFmt(in) { + var b bytes.Buffer + Cbor2JsonManyObjects(bytes.NewReader(in), &b) + return b.Bytes() + } + return in +} diff --git a/vendor/github.com/rs/zerolog/internal/cbor/string.go b/vendor/github.com/rs/zerolog/internal/cbor/string.go new file mode 100644 index 000000000..a33890a5d --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/cbor/string.go @@ -0,0 +1,95 @@ +package cbor + +import "fmt" + +// AppendStrings encodes and adds an array of strings to the dst byte array. +func (e Encoder) AppendStrings(dst []byte, vals []string) []byte { + major := majorTypeArray + l := len(vals) + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendString(dst, v) + } + return dst +} + +// AppendString encodes and adds a string to the dst byte array. +func (Encoder) AppendString(dst []byte, s string) []byte { + major := majorTypeUtf8String + + l := len(s) + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, majorTypeUtf8String, uint64(l)) + } + return append(dst, s...) +} + +// AppendStringers encodes and adds an array of Stringer values +// to the dst byte array. +func (e Encoder) AppendStringers(dst []byte, vals []fmt.Stringer) []byte { + if len(vals) == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + dst = e.AppendArrayStart(dst) + dst = e.AppendStringer(dst, vals[0]) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = e.AppendStringer(dst, val) + } + } + return e.AppendArrayEnd(dst) +} + +// AppendStringer encodes and adds the Stringer value to the dst +// byte array. +func (e Encoder) AppendStringer(dst []byte, val fmt.Stringer) []byte { + if val == nil { + return e.AppendNil(dst) + } + return e.AppendString(dst, val.String()) +} + +// AppendBytes encodes and adds an array of bytes to the dst byte array. +func (Encoder) AppendBytes(dst, s []byte) []byte { + major := majorTypeByteString + + l := len(s) + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + return append(dst, s...) +} + +// AppendEmbeddedJSON adds a tag and embeds input JSON as such. +func AppendEmbeddedJSON(dst, s []byte) []byte { + major := majorTypeTags + minor := additionalTypeEmbeddedJSON + + // Append the TAG to indicate this is Embedded JSON. + dst = append(dst, major|additionalTypeIntUint16) + dst = append(dst, byte(minor>>8)) + dst = append(dst, byte(minor&0xff)) + + // Append the JSON Object as Byte String. + major = majorTypeByteString + + l := len(s) + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + return append(dst, s...) +} diff --git a/vendor/github.com/rs/zerolog/internal/cbor/time.go b/vendor/github.com/rs/zerolog/internal/cbor/time.go new file mode 100644 index 000000000..d81fb1257 --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/cbor/time.go @@ -0,0 +1,93 @@ +package cbor + +import ( + "time" +) + +func appendIntegerTimestamp(dst []byte, t time.Time) []byte { + major := majorTypeTags + minor := additionalTypeTimestamp + dst = append(dst, major|minor) + secs := t.Unix() + var val uint64 + if secs < 0 { + major = majorTypeNegativeInt + val = uint64(-secs - 1) + } else { + major = majorTypeUnsignedInt + val = uint64(secs) + } + dst = appendCborTypePrefix(dst, major, val) + return dst +} + +func (e Encoder) appendFloatTimestamp(dst []byte, t time.Time) []byte { + major := majorTypeTags + minor := additionalTypeTimestamp + dst = append(dst, major|minor) + secs := t.Unix() + nanos := t.Nanosecond() + var val float64 + val = float64(secs)*1.0 + float64(nanos)*1e-9 + return e.AppendFloat64(dst, val) +} + +// AppendTime encodes and adds a timestamp to the dst byte array. +func (e Encoder) AppendTime(dst []byte, t time.Time, unused string) []byte { + utc := t.UTC() + if utc.Nanosecond() == 0 { + return appendIntegerTimestamp(dst, utc) + } + return e.appendFloatTimestamp(dst, utc) +} + +// AppendTimes encodes and adds an array of timestamps to the dst byte array. +func (e Encoder) AppendTimes(dst []byte, vals []time.Time, unused string) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + + for _, t := range vals { + dst = e.AppendTime(dst, t, unused) + } + return dst +} + +// AppendDuration encodes and adds a duration to the dst byte array. +// useInt field indicates whether to store the duration as seconds (integer) or +// as seconds+nanoseconds (float). +func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte { + if useInt { + return e.AppendInt64(dst, int64(d/unit)) + } + return e.AppendFloat64(dst, float64(d)/float64(unit)) +} + +// AppendDurations encodes and adds an array of durations to the dst byte array. +// useInt field indicates whether to store the duration as seconds (integer) or +// as seconds+nanoseconds (float). +func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, d := range vals { + dst = e.AppendDuration(dst, d, unit, useInt) + } + return dst +} diff --git a/vendor/github.com/rs/zerolog/internal/cbor/types.go b/vendor/github.com/rs/zerolog/internal/cbor/types.go new file mode 100644 index 000000000..49316aa5b --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/cbor/types.go @@ -0,0 +1,477 @@ +package cbor + +import ( + "fmt" + "math" + "net" +) + +// AppendNil inserts a 'Nil' object into the dst byte array. +func (Encoder) AppendNil(dst []byte) []byte { + return append(dst, majorTypeSimpleAndFloat|additionalTypeNull) +} + +// AppendBeginMarker inserts a map start into the dst byte array. +func (Encoder) AppendBeginMarker(dst []byte) []byte { + return append(dst, majorTypeMap|additionalTypeInfiniteCount) +} + +// AppendEndMarker inserts a map end into the dst byte array. +func (Encoder) AppendEndMarker(dst []byte) []byte { + return append(dst, majorTypeSimpleAndFloat|additionalTypeBreak) +} + +// AppendObjectData takes an object in form of a byte array and appends to dst. +func (Encoder) AppendObjectData(dst []byte, o []byte) []byte { + // BeginMarker is present in the dst, which + // should not be copied when appending to existing data. + return append(dst, o[1:]...) +} + +// AppendArrayStart adds markers to indicate the start of an array. +func (Encoder) AppendArrayStart(dst []byte) []byte { + return append(dst, majorTypeArray|additionalTypeInfiniteCount) +} + +// AppendArrayEnd adds markers to indicate the end of an array. +func (Encoder) AppendArrayEnd(dst []byte) []byte { + return append(dst, majorTypeSimpleAndFloat|additionalTypeBreak) +} + +// AppendArrayDelim adds markers to indicate end of a particular array element. +func (Encoder) AppendArrayDelim(dst []byte) []byte { + //No delimiters needed in cbor + return dst +} + +// AppendLineBreak is a noop that keep API compat with json encoder. +func (Encoder) AppendLineBreak(dst []byte) []byte { + // No line breaks needed in binary format. + return dst +} + +// AppendBool encodes and inserts a boolean value into the dst byte array. +func (Encoder) AppendBool(dst []byte, val bool) []byte { + b := additionalTypeBoolFalse + if val { + b = additionalTypeBoolTrue + } + return append(dst, majorTypeSimpleAndFloat|b) +} + +// AppendBools encodes and inserts an array of boolean values into the dst byte array. +func (e Encoder) AppendBools(dst []byte, vals []bool) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendBool(dst, v) + } + return dst +} + +// AppendInt encodes and inserts an integer value into the dst byte array. +func (Encoder) AppendInt(dst []byte, val int) []byte { + major := majorTypeUnsignedInt + contentVal := val + if val < 0 { + major = majorTypeNegativeInt + contentVal = -val - 1 + } + if contentVal <= additionalMax { + lb := byte(contentVal) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(contentVal)) + } + return dst +} + +// AppendInts encodes and inserts an array of integer values into the dst byte array. +func (e Encoder) AppendInts(dst []byte, vals []int) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt(dst, v) + } + return dst +} + +// AppendInt8 encodes and inserts an int8 value into the dst byte array. +func (e Encoder) AppendInt8(dst []byte, val int8) []byte { + return e.AppendInt(dst, int(val)) +} + +// AppendInts8 encodes and inserts an array of integer values into the dst byte array. +func (e Encoder) AppendInts8(dst []byte, vals []int8) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt(dst, int(v)) + } + return dst +} + +// AppendInt16 encodes and inserts a int16 value into the dst byte array. +func (e Encoder) AppendInt16(dst []byte, val int16) []byte { + return e.AppendInt(dst, int(val)) +} + +// AppendInts16 encodes and inserts an array of int16 values into the dst byte array. +func (e Encoder) AppendInts16(dst []byte, vals []int16) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt(dst, int(v)) + } + return dst +} + +// AppendInt32 encodes and inserts a int32 value into the dst byte array. +func (e Encoder) AppendInt32(dst []byte, val int32) []byte { + return e.AppendInt(dst, int(val)) +} + +// AppendInts32 encodes and inserts an array of int32 values into the dst byte array. +func (e Encoder) AppendInts32(dst []byte, vals []int32) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt(dst, int(v)) + } + return dst +} + +// AppendInt64 encodes and inserts a int64 value into the dst byte array. +func (Encoder) AppendInt64(dst []byte, val int64) []byte { + major := majorTypeUnsignedInt + contentVal := val + if val < 0 { + major = majorTypeNegativeInt + contentVal = -val - 1 + } + if contentVal <= additionalMax { + lb := byte(contentVal) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(contentVal)) + } + return dst +} + +// AppendInts64 encodes and inserts an array of int64 values into the dst byte array. +func (e Encoder) AppendInts64(dst []byte, vals []int64) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt64(dst, v) + } + return dst +} + +// AppendUint encodes and inserts an unsigned integer value into the dst byte array. +func (e Encoder) AppendUint(dst []byte, val uint) []byte { + return e.AppendInt64(dst, int64(val)) +} + +// AppendUints encodes and inserts an array of unsigned integer values into the dst byte array. +func (e Encoder) AppendUints(dst []byte, vals []uint) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint(dst, v) + } + return dst +} + +// AppendUint8 encodes and inserts a unsigned int8 value into the dst byte array. +func (e Encoder) AppendUint8(dst []byte, val uint8) []byte { + return e.AppendUint(dst, uint(val)) +} + +// AppendUints8 encodes and inserts an array of uint8 values into the dst byte array. +func (e Encoder) AppendUints8(dst []byte, vals []uint8) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint8(dst, v) + } + return dst +} + +// AppendUint16 encodes and inserts a uint16 value into the dst byte array. +func (e Encoder) AppendUint16(dst []byte, val uint16) []byte { + return e.AppendUint(dst, uint(val)) +} + +// AppendUints16 encodes and inserts an array of uint16 values into the dst byte array. +func (e Encoder) AppendUints16(dst []byte, vals []uint16) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint16(dst, v) + } + return dst +} + +// AppendUint32 encodes and inserts a uint32 value into the dst byte array. +func (e Encoder) AppendUint32(dst []byte, val uint32) []byte { + return e.AppendUint(dst, uint(val)) +} + +// AppendUints32 encodes and inserts an array of uint32 values into the dst byte array. +func (e Encoder) AppendUints32(dst []byte, vals []uint32) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint32(dst, v) + } + return dst +} + +// AppendUint64 encodes and inserts a uint64 value into the dst byte array. +func (Encoder) AppendUint64(dst []byte, val uint64) []byte { + major := majorTypeUnsignedInt + contentVal := val + if contentVal <= additionalMax { + lb := byte(contentVal) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, contentVal) + } + return dst +} + +// AppendUints64 encodes and inserts an array of uint64 values into the dst byte array. +func (e Encoder) AppendUints64(dst []byte, vals []uint64) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint64(dst, v) + } + return dst +} + +// AppendFloat32 encodes and inserts a single precision float value into the dst byte array. +func (Encoder) AppendFloat32(dst []byte, val float32) []byte { + switch { + case math.IsNaN(float64(val)): + return append(dst, "\xfa\x7f\xc0\x00\x00"...) + case math.IsInf(float64(val), 1): + return append(dst, "\xfa\x7f\x80\x00\x00"...) + case math.IsInf(float64(val), -1): + return append(dst, "\xfa\xff\x80\x00\x00"...) + } + major := majorTypeSimpleAndFloat + subType := additionalTypeFloat32 + n := math.Float32bits(val) + var buf [4]byte + for i := uint(0); i < 4; i++ { + buf[i] = byte(n >> ((3 - i) * 8)) + } + return append(append(dst, major|subType), buf[0], buf[1], buf[2], buf[3]) +} + +// AppendFloats32 encodes and inserts an array of single precision float value into the dst byte array. +func (e Encoder) AppendFloats32(dst []byte, vals []float32) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendFloat32(dst, v) + } + return dst +} + +// AppendFloat64 encodes and inserts a double precision float value into the dst byte array. +func (Encoder) AppendFloat64(dst []byte, val float64) []byte { + switch { + case math.IsNaN(val): + return append(dst, "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00"...) + case math.IsInf(val, 1): + return append(dst, "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00"...) + case math.IsInf(val, -1): + return append(dst, "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00"...) + } + major := majorTypeSimpleAndFloat + subType := additionalTypeFloat64 + n := math.Float64bits(val) + dst = append(dst, major|subType) + for i := uint(1); i <= 8; i++ { + b := byte(n >> ((8 - i) * 8)) + dst = append(dst, b) + } + return dst +} + +// AppendFloats64 encodes and inserts an array of double precision float values into the dst byte array. +func (e Encoder) AppendFloats64(dst []byte, vals []float64) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendFloat64(dst, v) + } + return dst +} + +// AppendInterface takes an arbitrary object and converts it to JSON and embeds it dst. +func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte { + marshaled, err := JSONMarshalFunc(i) + if err != nil { + return e.AppendString(dst, fmt.Sprintf("marshaling error: %v", err)) + } + return AppendEmbeddedJSON(dst, marshaled) +} + +// AppendIPAddr encodes and inserts an IP Address (IPv4 or IPv6). +func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte { + dst = append(dst, majorTypeTags|additionalTypeIntUint16) + dst = append(dst, byte(additionalTypeTagNetworkAddr>>8)) + dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff)) + return e.AppendBytes(dst, ip) +} + +// AppendIPPrefix encodes and inserts an IP Address Prefix (Address + Mask Length). +func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte { + dst = append(dst, majorTypeTags|additionalTypeIntUint16) + dst = append(dst, byte(additionalTypeTagNetworkPrefix>>8)) + dst = append(dst, byte(additionalTypeTagNetworkPrefix&0xff)) + + // Prefix is a tuple (aka MAP of 1 pair of elements) - + // first element is prefix, second is mask length. + dst = append(dst, majorTypeMap|0x1) + dst = e.AppendBytes(dst, pfx.IP) + maskLen, _ := pfx.Mask.Size() + return e.AppendUint8(dst, uint8(maskLen)) +} + +// AppendMACAddr encodes and inserts a Hardware (MAC) address. +func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte { + dst = append(dst, majorTypeTags|additionalTypeIntUint16) + dst = append(dst, byte(additionalTypeTagNetworkAddr>>8)) + dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff)) + return e.AppendBytes(dst, ha) +} + +// AppendHex adds a TAG and inserts a hex bytes as a string. +func (e Encoder) AppendHex(dst []byte, val []byte) []byte { + dst = append(dst, majorTypeTags|additionalTypeIntUint16) + dst = append(dst, byte(additionalTypeTagHexString>>8)) + dst = append(dst, byte(additionalTypeTagHexString&0xff)) + return e.AppendBytes(dst, val) +} diff --git a/vendor/github.com/rs/zerolog/internal/json/base.go b/vendor/github.com/rs/zerolog/internal/json/base.go new file mode 100644 index 000000000..09ec59f4e --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/json/base.go @@ -0,0 +1,19 @@ +package json + +// JSONMarshalFunc is used to marshal interface to JSON encoded byte slice. +// Making it package level instead of embedded in Encoder brings +// some extra efforts at importing, but avoids value copy when the functions +// of Encoder being invoked. +// DO REMEMBER to set this variable at importing, or +// you might get a nil pointer dereference panic at runtime. +var JSONMarshalFunc func(v interface{}) ([]byte, error) + +type Encoder struct{} + +// AppendKey appends a new key to the output JSON. +func (e Encoder) AppendKey(dst []byte, key string) []byte { + if dst[len(dst)-1] != '{' { + dst = append(dst, ',') + } + return append(e.AppendString(dst, key), ':') +} diff --git a/vendor/github.com/rs/zerolog/internal/json/bytes.go b/vendor/github.com/rs/zerolog/internal/json/bytes.go new file mode 100644 index 000000000..de64120d1 --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/json/bytes.go @@ -0,0 +1,85 @@ +package json + +import "unicode/utf8" + +// AppendBytes is a mirror of appendString with []byte arg +func (Encoder) AppendBytes(dst, s []byte) []byte { + dst = append(dst, '"') + for i := 0; i < len(s); i++ { + if !noEscapeTable[s[i]] { + dst = appendBytesComplex(dst, s, i) + return append(dst, '"') + } + } + dst = append(dst, s...) + return append(dst, '"') +} + +// AppendHex encodes the input bytes to a hex string and appends +// the encoded string to the input byte slice. +// +// The operation loops though each byte and encodes it as hex using +// the hex lookup table. +func (Encoder) AppendHex(dst, s []byte) []byte { + dst = append(dst, '"') + for _, v := range s { + dst = append(dst, hex[v>>4], hex[v&0x0f]) + } + return append(dst, '"') +} + +// appendBytesComplex is a mirror of the appendStringComplex +// with []byte arg +func appendBytesComplex(dst, s []byte, i int) []byte { + start := 0 + for i < len(s) { + b := s[i] + if b >= utf8.RuneSelf { + r, size := utf8.DecodeRune(s[i:]) + if r == utf8.RuneError && size == 1 { + if start < i { + dst = append(dst, s[start:i]...) + } + dst = append(dst, `\ufffd`...) + i += size + start = i + continue + } + i += size + continue + } + if noEscapeTable[b] { + i++ + continue + } + // We encountered a character that needs to be encoded. + // Let's append the previous simple characters to the byte slice + // and switch our operation to read and encode the remainder + // characters byte-by-byte. + if start < i { + dst = append(dst, s[start:i]...) + } + switch b { + case '"', '\\': + dst = append(dst, '\\', b) + case '\b': + dst = append(dst, '\\', 'b') + case '\f': + dst = append(dst, '\\', 'f') + case '\n': + dst = append(dst, '\\', 'n') + case '\r': + dst = append(dst, '\\', 'r') + case '\t': + dst = append(dst, '\\', 't') + default: + dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF]) + } + i++ + start = i + } + if start < len(s) { + dst = append(dst, s[start:]...) + } + return dst +} diff --git a/vendor/github.com/rs/zerolog/internal/json/string.go b/vendor/github.com/rs/zerolog/internal/json/string.go new file mode 100644 index 000000000..fd7770f2f --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/json/string.go @@ -0,0 +1,149 @@ +package json + +import ( + "fmt" + "unicode/utf8" +) + +const hex = "0123456789abcdef" + +var noEscapeTable = [256]bool{} + +func init() { + for i := 0; i <= 0x7e; i++ { + noEscapeTable[i] = i >= 0x20 && i != '\\' && i != '"' + } +} + +// AppendStrings encodes the input strings to json and +// appends the encoded string list to the input byte slice. +func (e Encoder) AppendStrings(dst []byte, vals []string) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = e.AppendString(dst, vals[0]) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = e.AppendString(append(dst, ','), val) + } + } + dst = append(dst, ']') + return dst +} + +// AppendString encodes the input string to json and appends +// the encoded string to the input byte slice. +// +// The operation loops though each byte in the string looking +// for characters that need json or utf8 encoding. If the string +// does not need encoding, then the string is appended in its +// entirety to the byte slice. +// If we encounter a byte that does need encoding, switch up +// the operation and perform a byte-by-byte read-encode-append. +func (Encoder) AppendString(dst []byte, s string) []byte { + // Start with a double quote. + dst = append(dst, '"') + // Loop through each character in the string. + for i := 0; i < len(s); i++ { + // Check if the character needs encoding. Control characters, slashes, + // and the double quote need json encoding. Bytes above the ascii + // boundary needs utf8 encoding. + if !noEscapeTable[s[i]] { + // We encountered a character that needs to be encoded. Switch + // to complex version of the algorithm. + dst = appendStringComplex(dst, s, i) + return append(dst, '"') + } + } + // The string has no need for encoding and therefore is directly + // appended to the byte slice. + dst = append(dst, s...) + // End with a double quote + return append(dst, '"') +} + +// AppendStringers encodes the provided Stringer list to json and +// appends the encoded Stringer list to the input byte slice. +func (e Encoder) AppendStringers(dst []byte, vals []fmt.Stringer) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = e.AppendStringer(dst, vals[0]) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = e.AppendStringer(append(dst, ','), val) + } + } + return append(dst, ']') +} + +// AppendStringer encodes the input Stringer to json and appends the +// encoded Stringer value to the input byte slice. +func (e Encoder) AppendStringer(dst []byte, val fmt.Stringer) []byte { + if val == nil { + return e.AppendInterface(dst, nil) + } + return e.AppendString(dst, val.String()) +} + +//// appendStringComplex is used by appendString to take over an in +// progress JSON string encoding that encountered a character that needs +// to be encoded. +func appendStringComplex(dst []byte, s string, i int) []byte { + start := 0 + for i < len(s) { + b := s[i] + if b >= utf8.RuneSelf { + r, size := utf8.DecodeRuneInString(s[i:]) + if r == utf8.RuneError && size == 1 { + // In case of error, first append previous simple characters to + // the byte slice if any and append a replacement character code + // in place of the invalid sequence. + if start < i { + dst = append(dst, s[start:i]...) + } + dst = append(dst, `\ufffd`...) + i += size + start = i + continue + } + i += size + continue + } + if noEscapeTable[b] { + i++ + continue + } + // We encountered a character that needs to be encoded. + // Let's append the previous simple characters to the byte slice + // and switch our operation to read and encode the remainder + // characters byte-by-byte. + if start < i { + dst = append(dst, s[start:i]...) + } + switch b { + case '"', '\\': + dst = append(dst, '\\', b) + case '\b': + dst = append(dst, '\\', 'b') + case '\f': + dst = append(dst, '\\', 'f') + case '\n': + dst = append(dst, '\\', 'n') + case '\r': + dst = append(dst, '\\', 'r') + case '\t': + dst = append(dst, '\\', 't') + default: + dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF]) + } + i++ + start = i + } + if start < len(s) { + dst = append(dst, s[start:]...) + } + return dst +} diff --git a/vendor/github.com/rs/zerolog/internal/json/time.go b/vendor/github.com/rs/zerolog/internal/json/time.go new file mode 100644 index 000000000..6a8dc912d --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/json/time.go @@ -0,0 +1,113 @@ +package json + +import ( + "strconv" + "time" +) + +const ( + // Import from zerolog/global.go + timeFormatUnix = "" + timeFormatUnixMs = "UNIXMS" + timeFormatUnixMicro = "UNIXMICRO" + timeFormatUnixNano = "UNIXNANO" +) + +// AppendTime formats the input time with the given format +// and appends the encoded string to the input byte slice. +func (e Encoder) AppendTime(dst []byte, t time.Time, format string) []byte { + switch format { + case timeFormatUnix: + return e.AppendInt64(dst, t.Unix()) + case timeFormatUnixMs: + return e.AppendInt64(dst, t.UnixNano()/1000000) + case timeFormatUnixMicro: + return e.AppendInt64(dst, t.UnixNano()/1000) + case timeFormatUnixNano: + return e.AppendInt64(dst, t.UnixNano()) + } + return append(t.AppendFormat(append(dst, '"'), format), '"') +} + +// AppendTimes converts the input times with the given format +// and appends the encoded string list to the input byte slice. +func (Encoder) AppendTimes(dst []byte, vals []time.Time, format string) []byte { + switch format { + case timeFormatUnix: + return appendUnixTimes(dst, vals) + case timeFormatUnixMs: + return appendUnixNanoTimes(dst, vals, 1000000) + case timeFormatUnixMicro: + return appendUnixNanoTimes(dst, vals, 1000) + case timeFormatUnixNano: + return appendUnixNanoTimes(dst, vals, 1) + } + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = append(vals[0].AppendFormat(append(dst, '"'), format), '"') + if len(vals) > 1 { + for _, t := range vals[1:] { + dst = append(t.AppendFormat(append(dst, ',', '"'), format), '"') + } + } + dst = append(dst, ']') + return dst +} + +func appendUnixTimes(dst []byte, vals []time.Time) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, vals[0].Unix(), 10) + if len(vals) > 1 { + for _, t := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), t.Unix(), 10) + } + } + dst = append(dst, ']') + return dst +} + +func appendUnixNanoTimes(dst []byte, vals []time.Time, div int64) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, vals[0].UnixNano()/div, 10) + if len(vals) > 1 { + for _, t := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), t.UnixNano()/div, 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendDuration formats the input duration with the given unit & format +// and appends the encoded string to the input byte slice. +func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte { + if useInt { + return strconv.AppendInt(dst, int64(d/unit), 10) + } + return e.AppendFloat64(dst, float64(d)/float64(unit)) +} + +// AppendDurations formats the input durations with the given unit & format +// and appends the encoded string list to the input byte slice. +func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = e.AppendDuration(dst, vals[0], unit, useInt) + if len(vals) > 1 { + for _, d := range vals[1:] { + dst = e.AppendDuration(append(dst, ','), d, unit, useInt) + } + } + dst = append(dst, ']') + return dst +} diff --git a/vendor/github.com/rs/zerolog/internal/json/types.go b/vendor/github.com/rs/zerolog/internal/json/types.go new file mode 100644 index 000000000..ad7f7a88f --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/json/types.go @@ -0,0 +1,405 @@ +package json + +import ( + "fmt" + "math" + "net" + "strconv" +) + +// AppendNil inserts a 'Nil' object into the dst byte array. +func (Encoder) AppendNil(dst []byte) []byte { + return append(dst, "null"...) +} + +// AppendBeginMarker inserts a map start into the dst byte array. +func (Encoder) AppendBeginMarker(dst []byte) []byte { + return append(dst, '{') +} + +// AppendEndMarker inserts a map end into the dst byte array. +func (Encoder) AppendEndMarker(dst []byte) []byte { + return append(dst, '}') +} + +// AppendLineBreak appends a line break. +func (Encoder) AppendLineBreak(dst []byte) []byte { + return append(dst, '\n') +} + +// AppendArrayStart adds markers to indicate the start of an array. +func (Encoder) AppendArrayStart(dst []byte) []byte { + return append(dst, '[') +} + +// AppendArrayEnd adds markers to indicate the end of an array. +func (Encoder) AppendArrayEnd(dst []byte) []byte { + return append(dst, ']') +} + +// AppendArrayDelim adds markers to indicate end of a particular array element. +func (Encoder) AppendArrayDelim(dst []byte) []byte { + if len(dst) > 0 { + return append(dst, ',') + } + return dst +} + +// AppendBool converts the input bool to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendBool(dst []byte, val bool) []byte { + return strconv.AppendBool(dst, val) +} + +// AppendBools encodes the input bools to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendBools(dst []byte, vals []bool) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendBool(dst, vals[0]) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendBool(append(dst, ','), val) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInt converts the input int to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt(dst []byte, val int) []byte { + return strconv.AppendInt(dst, int64(val), 10) +} + +// AppendInts encodes the input ints to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts(dst []byte, vals []int) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, int64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), int64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInt8 converts the input []int8 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt8(dst []byte, val int8) []byte { + return strconv.AppendInt(dst, int64(val), 10) +} + +// AppendInts8 encodes the input int8s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts8(dst []byte, vals []int8) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, int64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), int64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInt16 converts the input int16 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt16(dst []byte, val int16) []byte { + return strconv.AppendInt(dst, int64(val), 10) +} + +// AppendInts16 encodes the input int16s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts16(dst []byte, vals []int16) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, int64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), int64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInt32 converts the input int32 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt32(dst []byte, val int32) []byte { + return strconv.AppendInt(dst, int64(val), 10) +} + +// AppendInts32 encodes the input int32s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts32(dst []byte, vals []int32) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, int64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), int64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInt64 converts the input int64 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt64(dst []byte, val int64) []byte { + return strconv.AppendInt(dst, val, 10) +} + +// AppendInts64 encodes the input int64s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts64(dst []byte, vals []int64) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, vals[0], 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), val, 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendUint converts the input uint to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint(dst []byte, val uint) []byte { + return strconv.AppendUint(dst, uint64(val), 10) +} + +// AppendUints encodes the input uints to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints(dst []byte, vals []uint) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendUint(dst, uint64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendUint8 converts the input uint8 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint8(dst []byte, val uint8) []byte { + return strconv.AppendUint(dst, uint64(val), 10) +} + +// AppendUints8 encodes the input uint8s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints8(dst []byte, vals []uint8) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendUint(dst, uint64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendUint16 converts the input uint16 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint16(dst []byte, val uint16) []byte { + return strconv.AppendUint(dst, uint64(val), 10) +} + +// AppendUints16 encodes the input uint16s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints16(dst []byte, vals []uint16) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendUint(dst, uint64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendUint32 converts the input uint32 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint32(dst []byte, val uint32) []byte { + return strconv.AppendUint(dst, uint64(val), 10) +} + +// AppendUints32 encodes the input uint32s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints32(dst []byte, vals []uint32) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendUint(dst, uint64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendUint64 converts the input uint64 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint64(dst []byte, val uint64) []byte { + return strconv.AppendUint(dst, val, 10) +} + +// AppendUints64 encodes the input uint64s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints64(dst []byte, vals []uint64) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendUint(dst, vals[0], 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendUint(append(dst, ','), val, 10) + } + } + dst = append(dst, ']') + return dst +} + +func appendFloat(dst []byte, val float64, bitSize int) []byte { + // JSON does not permit NaN or Infinity. A typical JSON encoder would fail + // with an error, but a logging library wants the data to get through so we + // make a tradeoff and store those types as string. + switch { + case math.IsNaN(val): + return append(dst, `"NaN"`...) + case math.IsInf(val, 1): + return append(dst, `"+Inf"`...) + case math.IsInf(val, -1): + return append(dst, `"-Inf"`...) + } + return strconv.AppendFloat(dst, val, 'f', -1, bitSize) +} + +// AppendFloat32 converts the input float32 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendFloat32(dst []byte, val float32) []byte { + return appendFloat(dst, float64(val), 32) +} + +// AppendFloats32 encodes the input float32s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendFloats32(dst []byte, vals []float32) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = appendFloat(dst, float64(vals[0]), 32) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = appendFloat(append(dst, ','), float64(val), 32) + } + } + dst = append(dst, ']') + return dst +} + +// AppendFloat64 converts the input float64 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendFloat64(dst []byte, val float64) []byte { + return appendFloat(dst, val, 64) +} + +// AppendFloats64 encodes the input float64s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendFloats64(dst []byte, vals []float64) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = appendFloat(dst, vals[0], 64) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = appendFloat(append(dst, ','), val, 64) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInterface marshals the input interface to a string and +// appends the encoded string to the input byte slice. +func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte { + marshaled, err := JSONMarshalFunc(i) + if err != nil { + return e.AppendString(dst, fmt.Sprintf("marshaling error: %v", err)) + } + return append(dst, marshaled...) +} + +// AppendObjectData takes in an object that is already in a byte array +// and adds it to the dst. +func (Encoder) AppendObjectData(dst []byte, o []byte) []byte { + // Three conditions apply here: + // 1. new content starts with '{' - which should be dropped OR + // 2. new content starts with '{' - which should be replaced with ',' + // to separate with existing content OR + // 3. existing content has already other fields + if o[0] == '{' { + if len(dst) > 1 { + dst = append(dst, ',') + } + o = o[1:] + } else if len(dst) > 1 { + dst = append(dst, ',') + } + return append(dst, o...) +} + +// AppendIPAddr adds IPv4 or IPv6 address to dst. +func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte { + return e.AppendString(dst, ip.String()) +} + +// AppendIPPrefix adds IPv4 or IPv6 Prefix (address & mask) to dst. +func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte { + return e.AppendString(dst, pfx.String()) + +} + +// AppendMACAddr adds MAC address to dst. +func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte { + return e.AppendString(dst, ha.String()) +} diff --git a/vendor/github.com/rs/zerolog/log.go b/vendor/github.com/rs/zerolog/log.go new file mode 100644 index 000000000..efd2a330f --- /dev/null +++ b/vendor/github.com/rs/zerolog/log.go @@ -0,0 +1,476 @@ +// Package zerolog provides a lightweight logging library dedicated to JSON logging. +// +// A global Logger can be use for simple logging: +// +// import "github.com/rs/zerolog/log" +// +// log.Info().Msg("hello world") +// // Output: {"time":1494567715,"level":"info","message":"hello world"} +// +// NOTE: To import the global logger, import the "log" subpackage "github.com/rs/zerolog/log". +// +// Fields can be added to log messages: +// +// log.Info().Str("foo", "bar").Msg("hello world") +// // Output: {"time":1494567715,"level":"info","message":"hello world","foo":"bar"} +// +// Create logger instance to manage different outputs: +// +// logger := zerolog.New(os.Stderr).With().Timestamp().Logger() +// logger.Info(). +// Str("foo", "bar"). +// Msg("hello world") +// // Output: {"time":1494567715,"level":"info","message":"hello world","foo":"bar"} +// +// Sub-loggers let you chain loggers with additional context: +// +// sublogger := log.With().Str("component": "foo").Logger() +// sublogger.Info().Msg("hello world") +// // Output: {"time":1494567715,"level":"info","message":"hello world","component":"foo"} +// +// Level logging +// +// zerolog.SetGlobalLevel(zerolog.InfoLevel) +// +// log.Debug().Msg("filtered out message") +// log.Info().Msg("routed message") +// +// if e := log.Debug(); e.Enabled() { +// // Compute log output only if enabled. +// value := compute() +// e.Str("foo": value).Msg("some debug message") +// } +// // Output: {"level":"info","time":1494567715,"routed message"} +// +// Customize automatic field names: +// +// log.TimestampFieldName = "t" +// log.LevelFieldName = "p" +// log.MessageFieldName = "m" +// +// log.Info().Msg("hello world") +// // Output: {"t":1494567715,"p":"info","m":"hello world"} +// +// Log with no level and message: +// +// log.Log().Str("foo","bar").Msg("") +// // Output: {"time":1494567715,"foo":"bar"} +// +// Add contextual fields to global Logger: +// +// log.Logger = log.With().Str("foo", "bar").Logger() +// +// Sample logs: +// +// sampled := log.Sample(&zerolog.BasicSampler{N: 10}) +// sampled.Info().Msg("will be logged every 10 messages") +// +// Log with contextual hooks: +// +// // Create the hook: +// type SeverityHook struct{} +// +// func (h SeverityHook) Run(e *zerolog.Event, level zerolog.Level, msg string) { +// if level != zerolog.NoLevel { +// e.Str("severity", level.String()) +// } +// } +// +// // And use it: +// var h SeverityHook +// log := zerolog.New(os.Stdout).Hook(h) +// log.Warn().Msg("") +// // Output: {"level":"warn","severity":"warn"} +// +// +// Caveats +// +// There is no fields deduplication out-of-the-box. +// Using the same key multiple times creates new key in final JSON each time. +// +// logger := zerolog.New(os.Stderr).With().Timestamp().Logger() +// logger.Info(). +// Timestamp(). +// Msg("dup") +// // Output: {"level":"info","time":1494567715,"time":1494567715,"message":"dup"} +// +// In this case, many consumers will take the last value, +// but this is not guaranteed; check yours if in doubt. +package zerolog + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strconv" +) + +// Level defines log levels. +type Level int8 + +const ( + // DebugLevel defines debug log level. + DebugLevel Level = iota + // InfoLevel defines info log level. + InfoLevel + // WarnLevel defines warn log level. + WarnLevel + // ErrorLevel defines error log level. + ErrorLevel + // FatalLevel defines fatal log level. + FatalLevel + // PanicLevel defines panic log level. + PanicLevel + // NoLevel defines an absent log level. + NoLevel + // Disabled disables the logger. + Disabled + + // TraceLevel defines trace log level. + TraceLevel Level = -1 + // Values less than TraceLevel are handled as numbers. +) + +func (l Level) String() string { + switch l { + case TraceLevel: + return LevelTraceValue + case DebugLevel: + return LevelDebugValue + case InfoLevel: + return LevelInfoValue + case WarnLevel: + return LevelWarnValue + case ErrorLevel: + return LevelErrorValue + case FatalLevel: + return LevelFatalValue + case PanicLevel: + return LevelPanicValue + case Disabled: + return "disabled" + case NoLevel: + return "" + } + return strconv.Itoa(int(l)) +} + +// ParseLevel converts a level string into a zerolog Level value. +// returns an error if the input string does not match known values. +func ParseLevel(levelStr string) (Level, error) { + switch levelStr { + case LevelFieldMarshalFunc(TraceLevel): + return TraceLevel, nil + case LevelFieldMarshalFunc(DebugLevel): + return DebugLevel, nil + case LevelFieldMarshalFunc(InfoLevel): + return InfoLevel, nil + case LevelFieldMarshalFunc(WarnLevel): + return WarnLevel, nil + case LevelFieldMarshalFunc(ErrorLevel): + return ErrorLevel, nil + case LevelFieldMarshalFunc(FatalLevel): + return FatalLevel, nil + case LevelFieldMarshalFunc(PanicLevel): + return PanicLevel, nil + case LevelFieldMarshalFunc(Disabled): + return Disabled, nil + case LevelFieldMarshalFunc(NoLevel): + return NoLevel, nil + } + i, err := strconv.Atoi(levelStr) + if err != nil { + return NoLevel, fmt.Errorf("Unknown Level String: '%s', defaulting to NoLevel", levelStr) + } + if i > 127 || i < -128 { + return NoLevel, fmt.Errorf("Out-Of-Bounds Level: '%d', defaulting to NoLevel", i) + } + return Level(i), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler to allow for easy reading from toml/yaml/json formats +func (l *Level) UnmarshalText(text []byte) error { + if l == nil { + return errors.New("can't unmarshal a nil *Level") + } + var err error + *l, err = ParseLevel(string(text)) + return err +} + +// MarshalText implements encoding.TextMarshaler to allow for easy writing into toml/yaml/json formats +func (l Level) MarshalText() ([]byte, error) { + return []byte(LevelFieldMarshalFunc(l)), nil +} + +// A Logger represents an active logging object that generates lines +// of JSON output to an io.Writer. Each logging operation makes a single +// call to the Writer's Write method. There is no guarantee on access +// serialization to the Writer. If your Writer is not thread safe, +// you may consider a sync wrapper. +type Logger struct { + w LevelWriter + level Level + sampler Sampler + context []byte + hooks []Hook + stack bool +} + +// New creates a root logger with given output writer. If the output writer implements +// the LevelWriter interface, the WriteLevel method will be called instead of the Write +// one. +// +// Each logging operation makes a single call to the Writer's Write method. There is no +// guarantee on access serialization to the Writer. If your Writer is not thread safe, +// you may consider using sync wrapper. +func New(w io.Writer) Logger { + if w == nil { + w = ioutil.Discard + } + lw, ok := w.(LevelWriter) + if !ok { + lw = levelWriterAdapter{w} + } + return Logger{w: lw, level: TraceLevel} +} + +// Nop returns a disabled logger for which all operation are no-op. +func Nop() Logger { + return New(nil).Level(Disabled) +} + +// Output duplicates the current logger and sets w as its output. +func (l Logger) Output(w io.Writer) Logger { + l2 := New(w) + l2.level = l.level + l2.sampler = l.sampler + l2.stack = l.stack + if len(l.hooks) > 0 { + l2.hooks = append(l2.hooks, l.hooks...) + } + if l.context != nil { + l2.context = make([]byte, len(l.context), cap(l.context)) + copy(l2.context, l.context) + } + return l2 +} + +// With creates a child logger with the field added to its context. +func (l Logger) With() Context { + context := l.context + l.context = make([]byte, 0, 500) + if context != nil { + l.context = append(l.context, context...) + } else { + // This is needed for AppendKey to not check len of input + // thus making it inlinable + l.context = enc.AppendBeginMarker(l.context) + } + return Context{l} +} + +// UpdateContext updates the internal logger's context. +// +// Use this method with caution. If unsure, prefer the With method. +func (l *Logger) UpdateContext(update func(c Context) Context) { + if l == disabledLogger { + return + } + if cap(l.context) == 0 { + l.context = make([]byte, 0, 500) + } + if len(l.context) == 0 { + l.context = enc.AppendBeginMarker(l.context) + } + c := update(Context{*l}) + l.context = c.l.context +} + +// Level creates a child logger with the minimum accepted level set to level. +func (l Logger) Level(lvl Level) Logger { + l.level = lvl + return l +} + +// GetLevel returns the current Level of l. +func (l Logger) GetLevel() Level { + return l.level +} + +// Sample returns a logger with the s sampler. +func (l Logger) Sample(s Sampler) Logger { + l.sampler = s + return l +} + +// Hook returns a logger with the h Hook. +func (l Logger) Hook(h Hook) Logger { + l.hooks = append(l.hooks, h) + return l +} + +// Trace starts a new message with trace level. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Trace() *Event { + return l.newEvent(TraceLevel, nil) +} + +// Debug starts a new message with debug level. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Debug() *Event { + return l.newEvent(DebugLevel, nil) +} + +// Info starts a new message with info level. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Info() *Event { + return l.newEvent(InfoLevel, nil) +} + +// Warn starts a new message with warn level. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Warn() *Event { + return l.newEvent(WarnLevel, nil) +} + +// Error starts a new message with error level. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Error() *Event { + return l.newEvent(ErrorLevel, nil) +} + +// Err starts a new message with error level with err as a field if not nil or +// with info level if err is nil. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Err(err error) *Event { + if err != nil { + return l.Error().Err(err) + } + + return l.Info() +} + +// Fatal starts a new message with fatal level. The os.Exit(1) function +// is called by the Msg method, which terminates the program immediately. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Fatal() *Event { + return l.newEvent(FatalLevel, func(msg string) { os.Exit(1) }) +} + +// Panic starts a new message with panic level. The panic() function +// is called by the Msg method, which stops the ordinary flow of a goroutine. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Panic() *Event { + return l.newEvent(PanicLevel, func(msg string) { panic(msg) }) +} + +// WithLevel starts a new message with level. Unlike Fatal and Panic +// methods, WithLevel does not terminate the program or stop the ordinary +// flow of a goroutine when used with their respective levels. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) WithLevel(level Level) *Event { + switch level { + case TraceLevel: + return l.Trace() + case DebugLevel: + return l.Debug() + case InfoLevel: + return l.Info() + case WarnLevel: + return l.Warn() + case ErrorLevel: + return l.Error() + case FatalLevel: + return l.newEvent(FatalLevel, nil) + case PanicLevel: + return l.newEvent(PanicLevel, nil) + case NoLevel: + return l.Log() + case Disabled: + return nil + default: + return l.newEvent(level, nil) + } +} + +// Log starts a new message with no level. Setting GlobalLevel to Disabled +// will still disable events produced by this method. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Log() *Event { + return l.newEvent(NoLevel, nil) +} + +// Print sends a log event using debug level and no extra field. +// Arguments are handled in the manner of fmt.Print. +func (l *Logger) Print(v ...interface{}) { + if e := l.Debug(); e.Enabled() { + e.CallerSkipFrame(1).Msg(fmt.Sprint(v...)) + } +} + +// Printf sends a log event using debug level and no extra field. +// Arguments are handled in the manner of fmt.Printf. +func (l *Logger) Printf(format string, v ...interface{}) { + if e := l.Debug(); e.Enabled() { + e.CallerSkipFrame(1).Msg(fmt.Sprintf(format, v...)) + } +} + +// Write implements the io.Writer interface. This is useful to set as a writer +// for the standard library log. +func (l Logger) Write(p []byte) (n int, err error) { + n = len(p) + if n > 0 && p[n-1] == '\n' { + // Trim CR added by stdlog. + p = p[0 : n-1] + } + l.Log().CallerSkipFrame(1).Msg(string(p)) + return +} + +func (l *Logger) newEvent(level Level, done func(string)) *Event { + enabled := l.should(level) + if !enabled { + if done != nil { + done("") + } + return nil + } + e := newEvent(l.w, level) + e.done = done + e.ch = l.hooks + if level != NoLevel && LevelFieldName != "" { + e.Str(LevelFieldName, LevelFieldMarshalFunc(level)) + } + if l.context != nil && len(l.context) > 1 { + e.buf = enc.AppendObjectData(e.buf, l.context) + } + if l.stack { + e.Stack() + } + return e +} + +// should returns true if the log event should be logged. +func (l *Logger) should(lvl Level) bool { + if lvl < l.level || lvl < GlobalLevel() { + return false + } + if l.sampler != nil && !samplingDisabled() { + return l.sampler.Sample(lvl) + } + return true +} diff --git a/vendor/github.com/rs/zerolog/not_go112.go b/vendor/github.com/rs/zerolog/not_go112.go new file mode 100644 index 000000000..4c43c9e76 --- /dev/null +++ b/vendor/github.com/rs/zerolog/not_go112.go @@ -0,0 +1,5 @@ +// +build !go1.12 + +package zerolog + +const contextCallerSkipFrameCount = 3 diff --git a/vendor/github.com/rs/zerolog/pretty.png b/vendor/github.com/rs/zerolog/pretty.png new file mode 100644 index 0000000000000000000000000000000000000000..242033686debf04c5b1756c166d05d09767594e3 GIT binary patch literal 84064 zcmc$^Wl&vB69#w_Ah^2|+}$-;aCdiicM0z9?(Q1g-GVy=cXzm4miNnUZEfxU-9B}z z&Y3$k=S+9c%+pVIxV)?wJPZyD004lO5EoGd0KSz00AHY>zJ9h;J?*{%05EkH!ouy zB31z6QCag`#I8A6u@lXSbVw0j7$^2!8$Aqv1w4&vNKrD#Pfx*2=$6xu^c#1I&<;daPf9$p?mnOYS6}HEl+qT>TYR6w2rTGzcV;6 ze_*iaU?&cZE8nU!6biu|hJ&|NL3|uJ0PV&mlB13o6KTk#un1`MT+okFSU42NfcF-& zdy{)nud$9Pq`U@Ms|9%r4u^FCB*|V*rJ~VJ-x!k0qex7LV;k*ltC;!K&`>z0X?LEq z6S9H;Cc5-W*o}-d{1#cT?&0&p8cC>_FPjwN7MPNT$!um&!pVGx;h;Ip*z$W!v!PHL zFJqtDXc|e?rQV%yp_CUu@Z#7Xcx6!xqtOblPEG?!t_p%#jwO?$tG{F8k36Upe18pOJBiNI|`)^!tn)1Q^@B#c)q|qX##cDJ_7vLlc z#4-OxfHk&FVO0l-X%A_49f}m@ZypwkmIiM<8o=vuaHo$<8)`@y{7{EYZ9s7z@@@|m zBZS)2ukjm7ZHId!1d=G&*Zqs|HNj6L@b5&*K_SQAhr=KYLw^&&4PrtI7R0HOLP!WR zBl8mrVtl8GS1f`n7jga0Hj2Ct?ugMA81S3FUz-(Wd{y7tzG!i*ZV#HW46q6Jq zS(>9XTM5E^z)A1Mpg0o)Q*NC*Exgpono(E%&I&j!Zf2}bfKFib;N=Lr;f#@;p;x`| ziXA8QG{7wMuAj+a?6>p=uQpQ+UMuFZe}{iZ^w~()$leXNJ547fKVELY%rL`XZJ*u9 z^4|LO<}*K#@z;+(BrC|?P_y6|g3^@a)?|33S>HE^?os(sRKkWuFbyf0l1C|Oh*D4y zk>pVAkRZcINL>;j#v;*VcH=POmf|1dG{u+2-;0nNzO6=_3&&8HC8j3QjJoW@?uYKT zQd3ixQ0Gw3tGH87C{rsVRB9`psJw{G7Of}-P_j^=jBAebOX4079MC08HW$4rDU};%l`YY^_Q0(8Bl<^&)rCVli7$bvd5~kAhdXYw{zNO*z8P5yjB*#L7n5qq1rI z>={%z3UKMavg&;Kw*Ar2gw7PFuV;=fQqE^Cc4xjzg=fLX2gf#ltmeFmVi#K%b87_T z{6awx@2RNGAwx~O+d)GBJNH=JktQx;eN-jrOsF$Hi{?9w>ZP~FgF8oJiDeskWk?jqMObDK4otIStE zl4D4yucxnPwSAVihjdWapFg&fT-at*vG;8RVZ=_>R<=#nD~o{Bid&4c*51~^chB&w z-?iMa?Lg86-bLqh?fliVVbl0>A3yP`eRiXyz1q>r?d|k(hu*o?iSVBBN&If*aCYF2 z;UMD@BfZBiFBo5{XN#xrt>Y7l7yiTR{Q4Hp$eaIDBbBPsZ>2}+lRKsdgO~H$_}4j~ zd2heR$IhZksDA6ZjA~9z?vOhGYrgX35PBm2?v@p2PRX)HX zWNMu=D==?kdNaw>SY*CP@I^S3=JuI6tnBqzak&=?+iF*u2pQ*l zT+v!#-~?sKWC_+Fwnp*9?IbFs+|bTbI2j%4r|7AAEJ^7i)h0Y~^YD6^J%(l-XZdh7 zY!q$0@*3)DZb5gG3xxXl*353aICVg{1{nTg-KOz#0>v>Z{#FYLGY!ue(!A{}RWhV| z5uDTdt0k{;?V!8cX>`YScz5JRU#C;&+4sZ1e1`$H9(EOW9W$L)v%AdAp?6nDDvs<# zYArQ`j!tu}ht_%(bg+NGHQ}eT(^OzR)R8PfqZI)cnS%cCj`S#OQ*T*c_uDM_sUlmz z&~N{JrFqTeVQFz&l9NnLGFx&&s;A1Jw(U0YP7YmQY{|6hLi4EGcEl6t33}zh1+t$r z%u~|Po~?7b810e2mL$krvCph}w|P6hOw;YtCD6@9ltmm?A8IYzy(^}xcyv(LRrqH1 zwJfglTOV2X)DQTHZORU07xM^wOuoHb*3{`lFQzOWblh~nx(aM2{x03a9A;qidU#5? zd~c8Kk!u%l{dmEN&By@z2D!9Y(44T=7s!IBD}d%*);8U3tJWp!b7H_r%LN3Auf_9- zk;JwT8sSy)*gAM?fmP=hAzNtw;ao^sB(;&Xxa3E6DCipMDBdX=81tznuU08-lB?#C z;n#82bS=4_w9m}DI9Q!*^to_aBg~TGcRK^#-|1iyGibMCdERJ?{TNA?;!o9N@Y3OT z2zDqwsd!r)mTjF5(cAR3Id;6V>^yX+erZ2?NV%liEVX;y)7<6 z*qPqUN#~pE$m*E6JlOK`>G=y94KzV?K$IdV7ohgDzW)8nk-2?Zd^xAo)z_6Rx0}Oz zZ1mLitaJaC3MwdFC_Mlh24kDi&S^#nLD2Q%-}Cr@?Td_09r7~=)?Xw)RPOn`eynmO z@h*I5K5IUjPHACY`R;# zBY}_dDHMs&>OxD$l>YHF8S!7PO$h;P5dP%3-$z-74-XUshfRSB0Uv`#EDvSB!4}3a zhs@CyzS*nSq)Y%7fM3vz6gma+YHB3kOT(D8b%qTU_LWF43#A{Iko^CAy;h-2jJ8f4 z>mKj=59Aj={+@oti*vMcr|*32;KhuC zb=$J_-oScoS6H6`arM0V@%0SMga?iCN>72*1qVo>>WkMd&lqBh|4=`+mWs0Qy*d3p zHC4AMbY`xlowXs-O)?Po0?xy6_aUv|SRJ(GKp43Ca`Cj(V#EI?$5cjQIO% zt%#_UM{XEO3Q+oY?)%@suqZ@WIgFeu)J4mv2DiwJF!Yzk+#;8`!EFlSV?{~dGm04j z8~C0WMbd7$qUNwJ`{NOR-DETyyZ7@v_@KglH@&QI(h1*fO&T&EM_01R2jMie zw|JZ$SN-$9fufErM6c{bsLgWS1Erz@qS0DGZJmf^9o}ij{#iwc&>rJAxc7da)16&U zq52ykx9#Js6z7fAJAOT`MX)%X;stGo8oZ7(9Guy`vTKkO7)B9G9YvCChw|wEoq$_+ zj$BWarj5H2C*ATdYAeddV_pfW$QMy7S)MPo>6+6weNYU$KnRi?BlvAz$frrrP@MpO z#+#0i2mIX|%kaaA;0dBD&Qqs1x61WSThW0pW5}j$WJ%0`Q7~ZUq`G@kwIi`*VTWa+R-Ig|X!0)D~`5tD4T#J4~V zrk&UzhB;^ydU4%;s!hC~oF1w1?k^&AjWG6dclJL7)NE0bH`pCRtoAa-oQ^#0`Eb+I z?8p+Hf~KSOQQ7*^lfY~gC`j;Vzq(UWy=oO*5OJ5s%c|NVN|{I^k)vumHi+Qq(}R0v{PZxzU09c9Hz~ki^E+^>f@4X8|l2ryu z^GPcjq+tIc@9jruY_)GXCUu}ltho~lpyw)H8tA8!3ST2WSl<8DJkU>=>AYgRP40Ot zl{Y-lBFME==g_y6+0gXYdV9W7wBqXI(h>KB`W|(BwzLg*YZHb_ z=h`pEND*#&@^z>Ox}6BzTaU{xfF ze{*%MGY)vRvUr%Nix*J(smKn@o|2XVoISs`qNpb~NQ|=-pF`5}h11Gb-bv1hB`nIh z6!#Al&*@(>TOb6oFx}_G{HY0SNq(LiRJ2mY-di^$Cfi~2QdAV@389tnmtZCzx*4-3 z`xCU=*Q~qlJad&({5x8jea$!1>89%mOEx-H-ONr> zt3Qj-1OC@3l<9-t80tDHTGO(se+Oz~Yp7Cn%RbZyevca$dbU@<^Y$7aX;F5PW$*Rb z+32as`f{|v>jXEI9tS-Ba_);t&qxM~hxE3m(@>{6;UAle=3_)}DODil zDv`fTo3UiXw5A-Do%MxA_x%AzEgblD+{9*j|Af-bAJBXvkE?45FFLLtCaD}EKT^<3 zG88~NiP;(Ic(jv~>8#q1l;$(;6Q{phkJ@)BBySQ!UzBv6L`#AZNrFMFzw86uc1;rvcj?g7tI4=S))_(BKAwDfNg zQz{evYm0ID^Dv{zoPsHQODg`}l)M;FfbwAE;7LRF6#(!+7IKh@c4yAGkvNe$O-G_8 zBOil{iKC6P5qot&@n`(i-Kmj7ic~Un?X{=PPn0{jV4eJ<3NOdH@x>~&yF|+I>31QL zCT?{P=k86Wru=#Qi;7;3HG{mDousKgZ@Gpqu{=V3znmAacw?;vqu8Ip?djp8fA~`! z#eAn6jiM+MU{)ruiy}ApYAt^Wf0X6Z3{$7JJlyw4Ji`nQrm(P?6iWAC8ezTCo3gOv zCAcpR_IEum2C&4Q>?$5S4?`w`w71BP^xFgA{G(6&E_ys6B;gMU6pMBWHNR#3ypr=BEW{l`k^|h_;D4zL`}PF z;HWrDXhmTe!kKQE)@{?xzQr?y-DchA2bg<*fTnzIeWf6wzbi)>5N*Q5A383uL@^?U zn`)?9x)4DE4^Wn$c3rXqwTEQQ%@|cAw2}To3cc`3oeE#)>&#*PhkQobu0@>NexZ>|G*||8A4kHz{ zFp*McKnevM9gCM*4|mcWqzQYA;7Oxg(qEJ&DDCc@82F1^@Zo|x&#%~8@;dK$Fk;%* z$v>W1bOQC^dQ|MXMHje6>83)=| z9BmrNgM-=j)t6w~B)(k1 z(ifu)B6TWQ-t=fQx5mr2mvv}IzXW^ix6+R@zeZR8iuVB1#l>0OO{4}t8dy8ZrnRq^ z0AL|wGad#H6F*^oflz+?#wM8htLN8J^!4ThIkO;#!zRMri-;KJG-`*t2)&3~(PSsD zm=n<$#MfVLKdkE9Y-(LR_ymP5sAniOLKpJEGXiP16mARID6-HZblv}WE0Ab~T(R|y zy67s}Uvk&x@5L@d*K%m~+IQ)(a{h67sxo}agc^N+IW36NyQ=IFxEbL|%=OXRUf#9^ zu6~?#J+;QSnq(Cdpmyo<;2R)adS=Y<@#;16>6!KDW(t=*U1ZG&RP%P%65{vpQ&NGe zgI+wz9q&E0F_+(L5GCJku8&(qyJJpyYJZ)(vgX|Y)k0i)D2N$vmT6v>C*N*rF--($o;zDw4X>%HRMT`$%K&0*u2c)>YgNqUeKfn#(w*s zUjBaXU;l!8dT*K;jM`Dn$3tI+I!>oqxM*6hZvEKO{xxTBH&LmmVeY3*<*aeNIxTyL z4Anu_&k++E8`qXx6Xq7nv*M1HI&H9VzM7Mb1wu=N7;7=@0pkvGQq&;#$-L+o)@R3@dJbPS`i(C>Cp}kxB*7%!_58} z0n1iJ4`VE&>%*C={zu!O9?{_MTDQ%VU`~mXsaGq1j4y-~&_t`}o>ls~|2~XyxxkzF zdLG6SzJyD;>CH^fza4g39qvr&QKJxhWE`Bt^5=M;9oCe7aCg^oTTi2Vel_aKbi%#q zo>x!93#%MI;qp1yX&mE9Ers;ucn6-hR#(Y<8{X=4@-%TZJw)y znYA>i2!7P{*zrCB?P=Ez6Ncml|8vbCSmdqJmK6FVL@jwt>zC$lr_$tfZwbuw9plrk zCcN|JmK9T?rS%pwrmf{>@#7*smb1xBJ&npnB?!k27JDgg&7DA2AFaJO&4U9yqt1?N zHJ`5dSJp()qCy07d%G|+yjoQcfo$#xEdS9+HjCle);lc38c5MfPB6;u_Io@5a%&Tu zdNpwg9Vfz$Ys^HoMz^c2F0ZEojL7%VA+As5o+pd%FdVy)0O_cC!#C2J%w}@uIcQMj)xt%g&+NzQR@p6kEC45$f|d z<=;wI55MMaUQFwdN4sdkTCi+R5o~PXXEWDoDoEJRW@U=8k~pc^UA1BFu-~Z_E>&1k zU~R&ZzWUYZIB5O4wCkNx_OV6NI9m~L#AD2s+oJ&c^ zrK{*G8!lz%0;V!FB6Zy}AvbE+`e%S{GpHvck~t)*o(MSP0pq9QUODb`&~)YYruOP| z(sJqoV(7h!PXv9)d^{*U@tbI*D^t_eE_$yz-CiCSsX><epRL<9upbw~G4n;?Np;A0gk_oQc<#adcgeM%Y zcZNjXLLdt2L68v~t+fekFN77H9d3X@y-Hwa(z0MZt)P)?bhFjzfD50fL8S*SN^y94 zH?d2s*^&g{*}O)S1-lqQMil6jlzhv^7M=Bc@)>-VpnDxQH8NvNNey4))lO5w+n(}C z%DgVcv6Nmx9dZDaUZ<9?V=aVM@Vo$WTjT_I$LeiQe*4~G=7ja+@GuPMK6^n6p$ug- z3GW68xa$%k(-_?iLi>FWwEz2IxWgdq_e*hOoP-b0dU{hWNS5Q;rTk4SL8@qt*G3gbZc36+ z$t-Y0gp6ubT$l#^UJS+$KHbA*a0xxbcb{D)oO2G~Ab6y2s+%f=b8Iyu~t|4b8 z{QC8tY2>jL&@LLxmqA*Dj>~Okzil0JZ3e6l;xng$J;;QyT9IvC0h68z`$ zdQw78n^WcW7*1PM6S_@|Hg={NeD$^1yCqt4>LrnG6l#C2XtS$E#;jB8l`Kg_fw|ag zDN3@(O>oaz<*klaq>8ZRqWn~b2`waT&b1in)5~8xGy)xTY>JPyo#PW%PP`Zr$PPRH z>^?^8t%oPM&CPA@cKjII(87+=^&NXWYFT}WrY7?01>d5l@hLC4oM@T>s0YC15-^5jEcVDh(Bm|%Pb)JM4 z){59=;x)yt?H|9?_tEftNI5-Oxt@#+96~>KX7~3XqaJTFWqmrBQuXRYmJw5UgE|ND2C`F7y+@zu4Yr3>5Q6f4T`lvCp1rX;K!gun@JmvWfQ`U6p zpM`ySc8bfXN`{nSuVv^%7(%58Q#X=FI6)?bI5k@{qTN(OcGK+oL?k@&&PCjQFxcUtF1L z>O(0P?w+o^(yKMS!sR~N^>wP8!UMf^A=XK0kil1z60~Y8sZ z&R9J^2WD+yyf&M!(Fsv*(mb)b zv1`PxKqCEVgb*4gAiD4x6!N{$>Abe_!lL;u+O}!~PTjczeoc*8jZyGy!I;02M$Ke^VjIESCIa_Y`ITRwtmXiK2(EWoXSB994}LIWx3OyJD!G2V0lw5_HL!)HSd( zmZ4A9^{FHX@yd28P`|Kt6(r7%SJc8!(;JPIdohfy!E(4Kkjgi#(XlhtAa%)-`}2r= z+-~Qfc-b*r&Jj=UxnezokHJHygz`H}r5mXIQ&QV2 zP+{F%Kv_XATM|LUP;faZm%CYj_Ip6nDMw`uqV0rkJ&9{+{Rj1& zeumGF>GI`%{kwB~Jh-5RC^#NVGq@hZ_isp9?>jiPE-6H+i@padr8}&UDL$M|Ul8y` zYrtql==D+98lI~5=A6F@pc;w0}mOIWj6`?TS}+)IftI?PQNSV9u7I}xgQ&GpAUM%WgO}p?v;Z#X^r79)bqKnKzlxXcFLehh2 zveH^XSFvEZJSDZ2@^8M@v*>&j5Kb-6D z^6|T1Xup0fWEskL2N}(i?m)hgw>K~R&Gz;`W_kbO6!n6B!bEy4uQun*Wz;IE+Pqdp zOt0*O9-!0$9IzDB|*I zMfh?R;fkVy#s1(Peaxrl(>W*VEX(V24&a-OM&CBj&mIA0bdodD2S5UQRl zUG~~=Q!UZ;mgY^-v*G&Xu4_1b(DM-T{zHW_8+uN>E( zB_JHUFm9j@+JN=y-eB(RQ$T)+xC|^*fL~Q=iAH9mo40JP#j&+cRjnwPA+Bvk?#l|L zhSI^w6=d)x={Q8!p?&`X$(^yU>+a-t^qz<#smkE4uj^{GJidDjrUU#|Owz~yK>i@C zpF0#TW=IIJ-F98EiGiin^yazfG1Y9^f5(_rdAN04uz;!m`sp}1OPjBiEa|cZf#~<} z&2Gi9Z{E4J-4Qj3A%WMSM7b(DHqLgtI?t87YP#N?BEAI+dwM4`#p@Nz5WA(Upo4C* zyNiXpj%9j09`j__37(&e`Uk5EbDzDv?BA? zW;PE7uLw#z1?;QAvDXyWwnXC77ixsSa4-~5=j~J~bfHP5gp5t3mw-b~jdPfn?(Y9= zsQyf%Q?IsT7ntjkNC|n#wi!g4xZ-JHL2vZNTb+#*Sy8wWOsaLWB)-%*pEyhmrC3m( z*I&=!r0ua-=$0hrbD=Wx{<@J{6HN4Hc6QdR)-1g4I5g*#Ie_T)?J{09S&TPfP=Odh zDt|>jXM%92me{Czq|_m(Kx`lPX5Q0Dd@AE*mxWqlYm6NDVu$+~Zpd}JO-=tN?b^z! zq07s1s}Nhn)M-g<)~FvQPutUX`;u542T<}fyR&jzuRCv*sLuvhX4&)evYx-s2)8Zt zWfA`!1hUWTMa)=>D#htknMOv-B4T2-;hVpW)Z$fpt{XbrXgAm4ytGWRB`Z}gx6a8N z&2&7`op#WacWT>cY3P~KNy*eyZS$bBADZm0kwDjWb;p)TQ>L2Ky>%#{xF*@oK{1=j z*MqOY0y%z{Ctrt9vA4&QTda5ck`JR6znb6(xOr7e@B~3>@tI0t$VyObSmpHU<=6Ia zZ#1PE%!4h?;Q_SIDePE6KIx_SgxSzHI` zHfb*0p?J`LRC8Dd3lKkUY-Rl3VAz?-G%M+UuM*89(TMHcmd!W!IxMA0bL%xN3*RMh zP-Z=$=xLd;~hvQF#&Uz4EQ)qzWo*}zn(Yvw~Un9HOeeCyeN^bAmR@pYh;kT=_as)5nX z*3YHaq?yga;{xMe{z)*wNAD3BQX9Ed2d$Xo%zW=tT`NHM2`(!0Z7&&aCLbRLvX^=L zml(9Xg?lWmmJ;HfYKbCN7`U%llLva=k z0;B+uNT8O#9Sj|`)HP7X4RDp~{PL#=&gL+K(URYZ$ZPMgv8^9$J$JYMPH6=W%#?1$ zzma`xY1J>iKi9Sm{?5N*0~%^m&Y;6Z(%JR59%VRntDnR^)I?2vNBfET#(qbfOoi2g zj9vTsaN0bP%AnWn{jzjz#Jq&^_7~5fgx;j^h6_1!CV{P==50p75YLMlK38*S-=$}d zPFlnwDY3QPK=a?yKoZ}Xd6#hk81{``FovK#`GkGP1f6?UGY2APjcRjHhXwZuCCA#M zf+$66eSaWRNk7B7#t4vippP?n$!_^F{+tIKd7dsqNUByceYhE))6EPQsCoAjZYP7E z!uTY&KNqvo@5BN{&-FkGuzn7bo<3m9`;DP$)4AN8%bMA_$h}9RsN}k_a9+26`vT*4 zb}07j#;gNj{Blx{onGv^3}}F&WV2@?tLqO_%_=4@Uan*k;O{?w^%Ew?R8N>%E_u#v zY;065p2C^N&dKRs{osW1i@zC$76PVo9py+=FL*~pwY-jWbs)cmS*+XHwWpq$Z2RJl zJ3)*YO5~?AOyx4}FwBfAAbq1K>?C7tF}UawO?FtW(j}XGk4TwQv`eId(4gUYTcBp{ zPNsHZb$usVr)>Oy$EfRP_~)iXz_$8oaT-#@7^E_34$1P;>n2`LL^;yn+x(9*skA?&PZ?XX(i?&nu*jc9H6PhsBT4 z5yLW<08>f{3wb*kbO8F}*88a6mOmIL>vue@jf773QtkKPin8kfVeI%sQK92G z=*5USGUoL)yd2Mji1Wh!dm&$XKf08&;N4vqKfwlgQ({rovg&5S61!*3%$$=|?^_^V zHHLK{$qGx{Y~KInJP=yXs6ovd1e`kztB5uFRQ5iK|;#S*_z!bNs)x<@@~EH>*M6!X!&#Z1Cc53sJ$+K9uWok zeC9FL-4Db5)(^$)-^Ak0Nqd%%gw}~3mF{5u^mII@butFzpyjkHvpV(%0YL))rj9J~ zJnya*kCW?qn!M^w^KlVO_bqX(j3L!_kDI)-VaSl#@yE3P1xDd#K%TB?c?(`eD)_AR zyP|!adNGdM&h8t*JLYh|-n0$FofkF#gcY`JAY(#*_nvw0`nVC-TRa7hY$3PMKKyam3@uuv5bAq0mCus zNmwe+Ot`3=6)<-y) zbyeCjbVH5{3xyH~wj5c}`a_<}g#qceHgr%(L>`^o$UfRPXnDA^X> zm*XAtnfiqZDkH;A{d#t^`S)lP{%S_(`R?_EfPn4fwcaD}+6w8Zb1-|t?+hJw=athK zO}1s9<5llFwcZc=lsHXz&f*{>BZkeJaH#K8gIpj`V3QmjX{)otm8Bi&BAzSH2jZNh zs)NgVuGG+KeaKXf@A-Y$EbL30h>+CbN=}O**Pc=VTJ5I#L}R3C-h*oMPRCsJC)P>d z&L?TD3MCpLWP_1+NRoz`J;Qnu|DG1ijL)%J%)PCtpvLcUh7=q7{z=GnabW+?@${il z``#=jW3si{_}7I~3WP6g#x|JH5+zcv$QU#FH$ z4Fq!y-E3}%Il%bkmXxw0l&ipgV8}f4cs^Xd*nJLFtQb4PFEHaWxl@U1yvyz@7!;P? zU&mw$F%SW^j9Ky8a8YtE#*OS9oMU`Elh6_C1x8OgyQEwb`L@YEA(bT1y;tpUPPpLV z6FG$B&VeMNs5-3Dl?p|4Q2UJt=rOCPx4DOaD7|P&1N8d`*CT>G5seR_JB%9WyZU8+ zyK!CpszgO?t9Y^{lAYo~iN~e3(LLz=WMm5JY9%J{39u3q8q!t7Vd}3_?Ye)=n0Z7h zoCF7;P1i}ZtgP2K$%RFg%`a~hCqmGf{w+(`3K~!8l^(+x|K!Ychl-m$F$Xl)MsM|~ zKw1nB_vtrv$~9e79H`ENh6rSZXl|7;)h+3Hpqz$`}8;ex){O% z+%`=OdH|85;}5fNVcwsrH@8&nNdGg&-)&*=tI|wO?pb;%FCP^P)A6O~`ufl4VRn{j z-Sxl2n?Thy0BwuyI*kYM#^ zt%sr4b1*J1g#B}_xWs0Tj$?NS`bjE=)Qi)}HDT|hht<~up&$Y9a9Tb!LWF+89E;09 zr`;zdF_nWCWp%OmhO@O%*e?z=us2Y5jedS+Hq$bt;`+4T!YJ8~?keH2l{0!40x9gK z46s7l4z;shCf?&^gY>?){tSuM{mB1Jr%=-TC&`y^LD!+5hE{ZQ|BTz)PEKxaEM7@^ zx(lHp1(86 zfLel8(B7T^9!^Tn^q&Ye#BgD0z{?avbZ`vUrnq>yJ~*9k&u3}qBz(r!3HaXxjm{Eq(T?OdyYLF z_hGLguq}ADHV%jP-Il=EPn=TTKTklc6E<_ua~)bxR^s?QNmo;%Yr*=Vg-N^aAz`uX zlL#n78o4jv=DEQ6YJkfXyFFvHvY|Jy9E;yXJ#nkVF*GtVVkT}ClMM`1rOax)r#q-} z54b#udHOV2?43YHlgs}Ud-JK==)-{lBbE)xrr)Zzd!wCa!G+nUCgt0DK0+;CU&ws^ z?T;c*ej+U-1R0f1T{%W_Z7IsTXvE|ux}WhwLci?QNv+L2zV0VLVluo}|6m*LIC10c zCu|6g!q(Q_seQ^pbs9NvY}@#7ys8R*;vqdEn9-+YY)w|+nP!9=*pii#lj9k4YV&PY z$;EPT3=Vvu$cE|>l2bJAb8GUt_b1pV))W^qXG*@D%zkf49Ua63|3o*6<M^IPsA1?PL+eXS&MB&dG7*HalU*quh}j zY{O7^7LH5HV5gbCtLPH-sFg|3>RP5*`5{3;8L?)AkvL}Duu|6JtdY@a!CFb9Rb4ey zu)_XPL$A_2Pd{wRoKB08ncXqGU!e;PC-?ly)w#k+4P7od-0Y%e_|jT>nG5= zX8+MfaZi6(^iVsd6(cH;D$t+XUeGU2+5g`gblis@BR02$TY@zhP?D9c9~de6Gq@`% zGQOJO-i|bVj}2Vrd&pW%Dc6_&5{_1$<|+G=9`7?`|4Q8u+XPRsS-?xY`p-OoU;W#b z9zK=pnxP@9d8>^o#}$qq&XdFH@z|+ds@r$&E;GN@n^AfJs%efBw}8Y#=vej_3O_qMUZq`r(Ix={-!}Pv3m<1JRXnx9db_< z6=&xzUoY4O?*78V0wK$@&vf9fCVgZ;jr+{C9@eiQCspnUChj4^w7IHe?3CXaDxQU3 zV$_29w}-<|vY`}x@bRFsY=!K<_FlF zy`(VY5enkHImdYj(fQ>xKhM{Q;zpp|>x%<(e+;_SmA}}>P+fXoqh9b>o5&2!X)$hL zLd3d_?G+~v-8YS4&tH0Ey`B!?@@uA!eqt7O3~M)I6wpK2zZ#=!a<+4#dK~MeES>z| z^@<>R8sQL(*^1JQl9_aMUT<(?4>_mrO@Wu)?bW}mntQhrThqwdSRTSKz@Yv?y=QV@ ze1wAG?IoWFbGkHrDh*mhK|mJiSgHLLz)yT!sY&aP9ezxAcQ**sO#)ecn++|7_P^j*c{P|U&ab8^f@bNBxV?=LA&y&gU4WW`LIm%_QP;+~#4%Iz$Cd^*(0 z!y~m&e>cOTCT#v>BDy@?KRi5ew|Brj^iAL9e6Oy@=e^a@)2khD(63cd$?0L8J`kv{ zM+rC9v9_)fAr6nhctuMQ8>vbZH#GVbGHr&46fwJv7AG~`U!R|zIttV9nHk32R|cJt zN_5=8L>%0rq|>_1)Leu+NcF#?94;&$asDr~{a%$_)aSojEiaYW3ql`vf(}IiQ5tYt zn7P(#>k`w9I?rrj{WudvBIW1mk|$4{Flp;(`R1fnQPinwwCUBmWB>W(TzH|J3laRv( z1b&GO07wxaf&TUO_e1>e!~W_Ce`c}l)GN37+a8oYYh&yK0f1+YI!VQ_%~mamg|R?z0}^ zc0qADt`0^pTfAuM9Rs=k4xeY+E^J zFHUlrLrtx2T3z)`8C$8SPQ6^~Z;&ms8PHlePqAz{Ng{3Rm-H^3vb0veO4TKQdEZE# zK7-U_0KQr73N2R?OrM|d% zuAeOid{;||csH^#jqJ9#vSp9XCl|VIN74U$$Bdk6i~h`zP_BJeGhH+2X9Ttx+fd`H~A9ke#K4x@>8Fcqq-M81P`&kor62uy;01PvfvhJczI!Y@qAsZc8kr&voceCLHt>9tM#cW^Zt37O|EgugqF|qw_vz+VRB=2GXwQA_w zGqWWcrJ9O)!t@w@s36CRb@|U4kEzew$xhH0KuVn`M2Fwd{^I#&$A48;WkZGnj>u0d zwfw$2avN8giLS>}BTkV2m1PYuPCgJN^>2R$Jqvl$E~#^@+n-_e@83S1(H9;rGX~zB zK91opEU_EzW9v{IZY?xfIap-C)qb&ORF@;CpIcK#U+rad){hT=J6`r2*l|9|>>Nom zP*$MJE>@3yIIwv=)ggWp`Sf4?F{B+A;sPeOe9AQorm{s#xbA4Q}Do$Ooe*^Ow{;0>&w{`9@n zH211(Bu>CCYP*{yEIM^F+J^8dx13S6@D!@%ow!k%h!lwV{jI~!#TfkYB||oL?nvEe z8Br)yn>K&~xD(N~&1V=s%KAx?IcO-?y`^@XH-AhiB7+9sYhtCI&)V*AK2vn7J8{r{ zCJtwC5G&#htCyDQPbT% zf9mAM>d%dJakZ!h8 z(nw27NrQB==?>}c?oDrCvpEa@&-2E8&%O7Iapqt!7@HXEwdS1b7oYEUk~?@DSeANs zrg=N;aG-miz`Q?y{yyhs2K91&em+%GuZirR`!YhMXk}=)N}9XoaVR{$=;hL)Ul|4? ztv&9^AH^g*1ioOw`QIW@fon1LquB5yNe7wan+Jb)+2)NE6qhj!FE_XzalU42Qmlt) zu`lhOj?KTeYKla5s@`MHCYaVDV+eCgP9bsm1kgoG*Vr5MHM^v#k?BwI;LC=*$Dif6 zE6em(puzDFKbtf3n4CcH*IK)XlSTf|F|(a_*JC)*ql%gQA)vR;W#*WJ)Ks@%5trp! zv*&J+pIy%ogN>bW-Tz`ahXKxSss&=d7_s6n28F)Ff`5q)iB^CfAZJ0`EkjX3-x3QT z*PPWrwP{I<;thI}=RP|5pI)z|!%Z}B9pitp(sAIV zUq6M5{HwmmVzP(ZL4HxX%8=%Yo|k`Jvb#{}z1fGnVzfg`w&Rg==b-NkJ#UMJ5J*u_ zPNtOWl!EDtraGE-Fh_cChb*_Bnw#Iep?q`>kqZWpC9Dp^-K6)#TdY8ok1;hirY+3c z87S`sjSiJn{T>sn$+R1d2Pfe?-~Z9%JD6I+5H?NtsgS=Bf%w$Db0~hzNyEBqR!A0V z8aWa^T%JOD=!W421}{p@;1+_L>|Lj%9Y?-wfRW8C>G91@Hwgn8ub&FL3u+Q0`Z}So z07CMD2;in@SK9>S$N|v^WZnc>(MHcDT1ST+ZL>z^M zTjTE7ut_jxK7&lULQjS-tT_KXGFs1wF*QIfnS_pIpmR&|?*i;987`h&m=t$O^_uZd zBWmoT{J;L4NN2!AqI}b_@+bSa?T9Ce{Y%jEng2uS$hm#?s!cCdqb*A_mZGY^VWaV`KDqsTpGKZT?0F-=@*8#cQV@ z3Z0ys+=Nyw%=p<~XS@`g^4skC6OhlIpKl1jThSt<#La#k6^5-`Ovt5O5Kog&=IK%n zZ%870YPWkj^mVbGh3*^ylpk&-s%$(43EA?HrR7x7pgAAu^lze@v)yNf@|x&wFp(JifO$w3Xz~chcVFf)trPM``5EpMPX%TBT-pu6=&@>ZQSg;?l~} z#^-lljRHifGKKdtCkd(78G@v#DZn5B2?}})RDee5u3l-xIG&Z0y##o5vxhE0ed+*RDhEKl_KVC65Hn4kwx$5s%M8dH=yF8! z)n$;-Ux={|jKcAq*4Q6WHUMvX%)NmTC315=$=iYO$LD;ub>8@WAeh@~N+j)WqaV6C zxVNXJrf!G^UBcq$UtP+5-`mj#Jxng$m*y<2?Yg~d^ll`3*_*W@Vkm)ODQNO?hk?M{vRYadu7J6OSlXt$EnKXwdNKM3qJLnx+{SbL4ox%gcDbRex!E{I{`2ifP9)aOs zxLEAoBQ7>8`w9TEJ~5K!=p?3z^*7%ahP6#Si$f{aO2N|(n3>hAuH|%mM zc$v6rC!qy6+{;4Nvb-)PdqwURKDeOViSj2D35*Y5^rXU93pu$1{Pz52 zlrkNaR${C3;9qCBSx3IXAD64vS2{bf~MSO`?DlE3({lK29yf_ z4-q!f^8bmjQBE(E0$A=;cfUIJCV;-%sLF-NgN~bgZ-PYfG?e;+^7jiw{4(+=_x zJ$;UEea3qi_=ltqgXY2INT+kEsKz}+r+!FYsKE(t3t(kvNe+U6_uw&&Zbw)h=fnYkWo8FfSPuvnz+c}AN&UV&~sVC z6+X`5+aawWw3y(X%=Ox0|KuM;etpQvJ9MC40jrpW~rqHH(oYvh-Xg@jyJdCGwdb#e*f>hdw}Q}gx3-0t78 z@gj#kUBUvye`^%qQ)MAuLOfi+`_x5Lt?!At=d)`;rPFvj!Rxr&5IJbhK)}#w6dp?Y z_qW1S=xy}a7RD15N;iAWw-~-v%TYk;HGxH=g1bdmQz--`qde?aJ)@y7zkDQ0pLjs<3&o5A3jvC?8R zUq!$a;>i14UyS4MkYvNz-9y9s!Ytbr;G>zt2Onoy^>ot=v^6dD3 zkr3Gy)ZI>cMU1o5ClXgrg&gd=^G7SyF&^fW|N4>aB~c@LZ+eEfyuEixe*H6GGCVpo zZ@#?tRkVXRyAXZRGja_sHj&HKe&+n^gf}1{TA}{Ahqk>=mN`+XFCDI8auK2s`CME> zl#c85QC#3F)gjI|VS4P}4IN%z_j$KSaqMKn5L-9J)3Bi5C|#cY~0oLgBEOUCqWg z%sxo+`6jI&Z*PIQc8Ec5{q?+b4{8232T+<;^5|OabBK&gf2a*0e$r%cy7f-__OivPXJ4{uXF# zu@F$5*+cpGS2+|Y(gK9Hpo4!h^Ip7o=^4Ixi#Z+z?#bz~r|QU@DQ!itiG7l@nSh71o{+yFW5#PX5#OYQlS|hVAM2YTKKbjM8Wu`4jJW<3)_Ojs((G!&>)8F1#x2+qI_9;J`3yB|gGjX}8xCA)+2Rp>l zi!M(A#(8>0F7fNeLPF;(@#|k2!VSfzN1buD;jgGQPw%@tuY>vO41FOjf!Xz%CVf$%SZ?oREHDA8gB(&hNiafpzPCJq< zfo3*YOs2^$dkgv{8G3$ha`?w#&Efc4F#9{dq=Vk=qp$U(@TiyS8io`>rA5+cy0L~7 z5Q6?7>4Thk@h z)PhRhS%)^3O(#2VjPS2tOIiaTcc~lxI<|@y$ zII06AO5*WUm3W{0WM|*|*`+Rh!VU(ic9hFf-vMGxUUY8{Q5v6|Zl{9oAuNQ|Dc$;X zc9o@JV2R^~7N_}I$W19-ap9o1F`@kQ#`~8u*4)4-QuDAfZnuzI!D7;47+qqY!mah9 zqAg3gD`p-Q5dIUZ+iiP(WXz(TE-*~#pU|fgitTiw#PRRe#gV-u_hf?OD!ZsB`hq= zAynS&6w%6s5PWWS3EQ8& zq+>r(L4t3!cFxMmA+oIn>5J#2n!p_USB1!?!Kq}c?l@6^ULaJ$FJP%$(PnQ&+jP9mc4MAmlB z_9rX^0fkG6^ld0kL?=(pMVepuVQH zhh0h?D6X*Hdc{}0(}t33=a%(%#B4n7QMDM(3Muv;XCR_rzVs8vhPZ}nVu1Ye03g+J zeE@Ad+ZwD3rT}N8=NGj?UbE$jBN;unjw^4ADQHFkrJIf1nB<9c7`~Ts#)kUtFlt;g z9>n1nOr1F_5YC2?oYbqzsgan!&+dD?r>h$T@WmBox?2$A<~EXe*j+LFI~vHXMKie_ zS<$Sl9IxxGeb#_9p!?|(c>xU+k&61;*`ugg%N$A5mLckDnMz`WA^kc$(Rzho=)WBi zZmQtA*%KHt989eUgRXQx?|rxzZ8J;KSLxOU&Hg9hLDor%lj2>&x(C{22R1J8mO1p8xcb_UYrhn^(8M}x2@zpH~p%yeTkWu|ecls!Mhk4KVIutqO;;)2%l zA<`@Rsj;z@LVO&uklQfXc`*T%5Fpzx3^L1md9_YnqT!4E`gXlRysX`RW_2wy%oHM` z(k)b9dpj@@2NJFj`3w~D^i9aGI#eN6f{}etO;`{Fb{Nt-LdOL8N`HzA$QQ+#y_h)4 z-*l=vyG>z=W}w_j zJd|NDb|3L14&`f_PLTJYAzshZ7+@*iF?e)P6x)ivezrO9bz`Vn_$UVC=HhE@>zaHE zhV(WrX3Ke82x+eCu=+zRfZ(Qr?asHdAX)~xVZc>IsE)t0(>-Ur8zf?Q68w-Ot{fy% z1UpVB7h=|v?m6)WxL%Kwek#HZ=@vgH`;AIvfkxdK5|hf<-Uav2dOK6 zW_C`|K!VvB@7I(rEXhAVTlSm}ryX&ZF|ax$CZHm7s@5rZ(NS^ogWrhdHv-4wo$!h8 zps0Cv#^@;A8;@*grJG7ZLWJKl4>^=cCkQAKEuFSbI2(EoXk$$A=ccuadFkoto2-JU ziuaj)gS0MNrZgyORDT0}%F1|7G|`%`#h3{B(r+xaKRpko1+dWd|KA}jI> zACC#MPIj#dqy7i=qDp+Ja zS{!ec5bk4*pV&cR@Sxu;8G2y2&cX!kSP}U&Pbcc=XOKn~oCn!)`7^;T7u(e? zhZn(~&VSOg*7EBMu9t7F(^CV)J3Eu2Q-7>r0qW4ooz2VCfX>cNke=`zZHyVKbRa|; zV^OelI@eV(W((|%$Wi2UgHfh68O{tt>Ou@@I==|b6xkCfgwy^ks-MxEeC|_5=_t%c z{&=H+mV)7bV@aJ|34v5dE02ojC_}A^(PcSEM8{`k#G*d<9W;+ym}#~n^LP`Hs@;X$ zA-XVc=?i-dCAE&{6rqoAl=a0oi_LKJnMrQrONQ>6^?>=$11cX0`pdb0Um_R1k(8d! zXR+>X9AavEzc#vFBOM}0XxBjNQ|$1>k5IpIt~Fmmi2$jR)F#JA8>4dFLc_2Em_#$N zANRkz+QJ>nO#{Re+HZPW&AHEu-ICesn}L|=k~qec*Q4(CcAN>AaTyz<#gG|u6%-;8 zD2uG#1FDRMArr2D+bfVk9$Pfics8tAMgZ@!NdgtLehe%KL^c>w?m9*tgg(UG-^Q|Q z)Azo|-SOK1PoHZX&4z9!*4H>^5z^~trkqEe48TA z%I*2!w06=}SpNCm-pnvVsl$+*5!RArcPQk0MVUK{fgJBTlFWlyuhpq$wfbfQf%R{vwBX?j5H+Sm z>y-d?dlgDqnNa^E9i_7*p z76($6X)pwdRF2Tigb9_V*CF1T{1eid^cmb44xjgz4@q*{JlO_eQMV^bYYM24>*kV$ z-Re^Zi8#7^40A+ho?M`7y*-PEB!N9OjDVto$CH&4 zLC1#&FN!by@}{Fdf7^BY>!?g^HrrkLl7*lU{oSq>J19!)zj>drxQFc5)w}e9Z9)@% zZbR>gg23#uMg2G}-WZOV(AO}s!-|a;fL&TZom=^749d z($|{^%SWut+znm7Io2*$yJ~JjL!6ud?ID%&(qTx!=UkevXeMG@fmo1Gy^UFJ$1-Bxv?rW_(;}dYWr0yE> zE(2|%@zpq4=Ci5PJ9d}~>imnHRk*!aH?+GxVCpC2J?2Yo<+FteVn0B=XRu3jvsxIx zUsh;yL1w(J6Qm5y2~(?}x6Z}s(DCS;LehyZfa7B!Z>*`#F%&$Zn7(Ze+I5AgVQye= zRY=Zf)vwUXLUBDG_oyz7<{Qj$VeZjnH}ToNCMDZd9~$B$-tC%?&(fbMfYCNmbov~y zl~-HNGYEw5HghTRbr>cbCz3*p z$}}evl$QCsCMHDQy||}Pd%P`_upsacGJ!fL-`a01uY%_nVjD~`UMFi@U_4`5shW|N z%@x+T)K_Aw*e7-s6Xst#xp&=odCe!C*BH!lFy>p%oGPAW3PkMj1tc=v5Qx%NMYU2v zHpLR?p)O2;4Lt%PjpXusr22E?h!9O(V0|9GfKP(VitwccyGv}|5)f%1Xy4%=F&NA6>KIo+L@@0HQ3jT~Ndi*dKFLhp`Wg+=~!`HF-ZXFv^(eJ|z zuIkL>DjADzX6JO?p@M`hUcph=UTJEDog&ty@|ayGI_W?9G;CZEPpJdfFtt9QhIX{l zvATcMl$RaY2DC#3P~@4zl5pOrfRIRY{cNpQ^{vVZQ@*E4<(cPD|AaB)%qlmKW2qP; z%s2Ku-p)}aH&@2l^L<)v?H-*6R$282ROCsfc{x0EeExxwNeNG&8tC`Qa-h}QHpY$N z*gX_@^etn6q+gtU8o9Z^VP5h1+A4QFeJ!l@@v2duS2$xS4;_mX;l~uIDtPa{_T5)G zRNL2xw?8V3Cf!G1LvREdq7I6AotE%1rG-9!Cq_X-fGGAK;-q58ie%4e=woBQ8e#to zV7)14{}t=i#5SI7eryBxeVA#-`Q{N(Lb$2sLiKkIHUq7?M8)dCaRWn*l;lcRQ(Ie? z$1zl^f1?wi@iS3+tJZ?GWbc6!UG)3^E7B{%zc%V>V+d|o|52DtSPGuE@p3#O)yr_k zaC>krikXYRfoI%n`3|X{id$S_rYH2uPvtl1Qe-|@XC0x8dXA?|ie>Bc8C@?JUiPHD zgPox0u9w~h_oqUo4JVk9!&i5(>4lRT+wRweM4f~C&=fU^+JF_oFB(ksuJ@!mi5<`5 zgM9hT+M+&ss({WW@eE}dehoCuz2#j)dG|1Syd6E;l4oaZwakBFyNegk=EW)i*)6DE zD+5T|P1Vq}`Kn9HLgq1(4b(02FO{RoMSszXmEO*mqDi8doZ%HI;z_DBVXQctS+9nf z{4_~#w0%BzOh#(TD40JF$TC%UwA31YSI?>`vVIlJ!>s_@sL9b=bH7a>I9!%#f&*>{ z0K!X-5ii>qIKP0;XHUAn5jTO)%~Vx4nMZ{S-_?KK`5R2cL~3Ux5pVKY7%V$sqKcW( z%>B)nXgkHZ!p!-aA0gUw z)yPabCzp^(}2 zL*>O29C}!2%in5H?iB2BWkoyHd4drK+O6RAwCpJbGYbtuSl-35_l?uV)tGOO>fQ*T z7woI|;J%$8(r)(fyW$Yf>Gy>Bk#}yxNm~65e`;taW;QYc*}Sm0j`QIr#oN(U?Jo%t z;avtI#JdLqp1}bRsLMiAoup>bEjZp#w`UZgc8gQl+KJc8N8ckbjZBD zpQ1NwB2*qH1g1PR2^plWhfY5{)nG6x$_mEN2@0W_EQZv{s?P2SX;(zH;_l~Q!)G7J z;O7gn%R&u44;hzJCq}vthlTnfv@RHKqf575$J5weoxBz)=EYs*#a7Jx@CG~;op^0k zN;-!J=pB;6dW*A{m)AVsLALCxN_$J3DcQ4)bd;k0W*ax)Nqp8yT) zM0tAEq@nOT>Df;$z*KzAROlfZRJR~c7l$-c_a~~Th%Sn}LP7dK|1T%}T@KB=`jl<}|fX#Biv|h~&)+S4l~MWb{chPHkH>Lyh!Kz1^@oebRz0g0O5i(q;QK zdHqDXDA%QZRSkiC$MgOs_S{b@eSso@ntD?UbnQK0OQC}!fZ42$JESQyw40)rN*hu} z+mC%dnn4vO&X=E`Z$aRdbfDD}{uG#dEHJ7*0?rAFiGn|Da)U1u7Y(vmWhHP7v{m$vM=qJ1l&oN&^l7mWb(cc{GgCw$fdfaf{4^4~L;38H z(1nv->>|_8`QU;vy=UvkFa-S_WNf>`ThVgrjXnYP9g>F6l(;24I{0X(0=XY_xZrG?#eXJvEHzIF?iPP>czpUoSVZ-UyJ#w z`?uWIh+~K6=P(--&OvgUlCQ9#NNH!5;-DEo#@k8uhw4|tc3UCg$h9qV+&{dr;MJMT z%XGM@$t~GJ^~9ymu2$52EHE`J>|(2jH_PG{7Q)?p#O&0~u0nu4dEbRBBip|)ey zndrD|O;;>kfW-$~HT67e-nnK}T}j}YW)B@4}Q z1@WH1+;|u4SwocqYg?vh7Gep z=?&muc1~R1Kl@xB?KUcenJ}pq7gKdr$EM+!>kbJ6MU{U;({hGT8Yne~CxBA` zPaKjCwZnB95OHL*YcqKd`f_`^m%<{%&7063b$U`4IP-+22|vT=Ktg-A-n9otgT3@Z${l-(k?3 zwGZ2+yz$ID{PVCvtBH4hV;|_@f=nzaJKP!|?E_PZJ=2MFJ$d@Ab8Kd2=kqI0D`p-x zT>|o!3?L?Z*OZ$+_xRtIP^ziJr?R5__M`Fe(Q-v(oN;e_yYXNW1cC8`7cBo*@AkN;PKnY{_(pH0v~dn3=6PuKE8({ zJ~7$@cCN>7Jz10g`!~LTxYSh;!lW%&MfnLIfov9`$DacRWm@PX!uHmt3+v72eaVyl z*L(kum;Nt*sN-uwg|;1C+?bhFVZZJobBvkh4;H$(ZuR6itcVjO`O#v4jt>4cKYtcE znAo0Uct(H}$Xh(9?iB}^M%5hY)KR0~OPP2X*p>e32Z%{lrKM`8&?VnRiv52f)2olj@wt2%NS5@*onszNNGX$UFVDmi9LBsZipHipg zIGIbj+~-Srf-m8vJ2|zlXO=7r$O<-Yv_NVN973_vyH#i5HLi%RHHa7k#pcD@tqJeG zjA8X3sZ6ev?1<=h9`&H>>%FJ29~rLTJxNo{cLrP?4LmndcfZTU{^K5L;NA6~ur%v2 zlEa7{V73Do-gRt#@vGP@US`IfIC;aX$1de?QoL~FxSLXaA9wO8-QY)h@-2V@E9|?h<}r%1M)C}?nPKUIJFS9*fA@i>XkqGwte|c*>U&Aq5YdDn=QbwpW9$e zCd=zwb?fT6C8Qq>c+jm(K1&Z*G@Zy8w#S{OP6)l7%w{(AvncG5C;MYQ(ZhNz2fh10 za-#e})aRMMe#bX&cAzeY>e81dt>{V$&>Jnny5^nPRDr@K^*;xb)u6#ZQ{6dp`Rn(c&ige>1Hmt9XjVbbJijE;My_CJfD9w>148x-wB0Jl+*ksjevL=xXwi8`30+Y}^kU zKTOx1VH01kJO~7IZjX(SQia&sg%C>V)K+S1rga$pvNtWrDBs-Qd;vIq(j71R@CO*r z0OJ&8e3^U=q3 zkB{8^()8TzGqT)|w%f?QdOdS(x&!G)`1$iH@$q+lf-)?pg&&`4r0WT&)^2tgx@vSh zcCOoRK4uaR3;rx&p%`M6Z{Ahi`X+!oYJaa@!xeTj#+#IZ<*=uxWFw+YeE83G%6c>b z4ulZi2B}GTq5+wQxT}jxqEa|7Rh$FdG&>g0EH^6w^_C_G96Z#_ziq#AMh7snmqFS^ zQDsK*`LViapN-27C3vn?0?V{SlyhELs$&#xJq()(eUXMPko_5M7A*O&5Pd+RL-CIW=wa zf%;(ZW!Q$t`^lWD=TrwtnisQet9^e-=1C~D%A1$nXU>2)LbE%i%&>gflT%n2uz%)* zA70gDF7T-dtL4r|rxAHUF19W%iVF)*Pc3KyW|ZDt4Z2GqWhE#JE6FGIn@M^SBm0{T z+F(4IHx|dPHA41nJxiC3-fGM57r_`>4bt?r^T&w+%6H82Px1Z$Cr{ODdqeVc%7p$f zQ~>n-ABK>e+F#{0l^xM(04|uguy~7#PRpQ_i%0>$oXt2% zTx5eLZFpgV995NyIlM%XQl`4jrfX%a$o^d~T%N8tf9sK9{yrx8<*1_ErQ)=`}?}#9gPgit&KXdT&mO z-;EOKjc!MijUR?h1QLunKqdf!xVY;*KlRLL4_P<^%zT%9RSQ0YWMfH3`a!_?bd5dC z=U5WIX7l~8;&P_tjusn@)q))k9P|G@;r;b8o4PR+nVl&+KhUWa`?X*8>*f=ifO+P} zCb$tJ3aEIHn@r+X$TZ-0EMc+DJiiKXst?P1tkkT}1SfH7uI)4xRrjOzAnLhiRI&6*92V<)ezUs6+5^*6G0_Pf8mZdTKGrpfFH0Suy{7p^@#^ubO~-9d&Ull5>fyx`HR75>Y-sT03vC3e`_brqB!g zS;cL`l^p%V(<}~JcDk7R%}VtQg5e#SZrhn<`Zi)GbFkmynoT(Bm6Xr?I+@k25WaRi z!}q*x_lzFK6Q_s8XbC&lQ`5bTi^PcxVls>Qq2JCE>uLt77|&FRHnFjnd{I}Yy8Zgu zfS05TuA_mvgc8b@4~-1$eQQ}$hs-%)+do_38Y8TqHulI9iL*h^K>d525QpL_itq=9 zV?0f(Bp$0ajfUEI{6VahIvb?7>zJkN@)ua}X?ERU!z=|uQI=7iQiiai@VG+iKA@|} zy~tu1O4%(kQxH5^qa~*qoC%R;6)Uj4-XuUtoT8DmIl-#2OiT81+Jemi-jIZBD;}Ki z#QZNmLo{v1`FmH?@`wzUTYa*?4?`!s(hQf7sqX=H7!TYv(71SJ2L}u~0@gX%dpr?Qdl3Zsu(m zat_tMy};dPqz0_vECjFBg=xeOD}em28ONYpy-0X+X<}w(X2MrSBZD2Fo0h{Y#rC3s zRPCDVkZ*Zq92yR=DmDR=%`x?LNag$Zlpd_mQ42A`U+pmKE@@;(*HXMH^jZR zY4rQQA>1kgYknC*cpoA^r-*)qApc+>Ak?g|Swh`+V{L>GhEm4vvpgCZ<;pVYuHX!cv=%WyxW3 zbC#Ognj!5MIuj#=00O2to!!CcA4g6mTSg9YR}m!3lVUx$+**y7QR%eSXLo|SW15zK ziQc=`C;~p2YfJ&Tq8go=H`OVPs<3NSQQg+*!d(38dcq8B(8W&BJjPu0S*zpQ2|q**?cEG3g2ezXd6Q+IdwSBbv)nq6_7x*@ho6xmE^((MXu*Hq0jqOo8VXS0Me6 zLUa5>m0vJ9GyR>j%$5QLHZVIlMFg6CyDK+%;U+wXRRk{pTPtFPbOvh&mccNm)DyeH zX=`WR@-xunV3|FQAraRgv}=>mdXTvMCxS@}HP*4D&;+!dmBW=@PV$vb9h{@=c?Q4sLMZ(Kg@k>%6!6WX~vV(TySqmIfwYyfyQOpFNKZR z;E^&wV}*%x+RH~)L#vmI2ieR!l)zjYBX}gcOUPW)>_tfI00>@ic^mJG2k(MnRAfXR zR{+mOw&PI@ictavfn34YUe$h zrWjI(tJS|4)555%(mg_>=YL!dA_0Wt!{P+etvG%wRfkY~DC!5IiIOYzZ-x4%fEn6n z^DGnUB79fkG}_>P{eE_N-RvI+$(}PCXEJM~T-`M|Ko30bGNFR+)AjJ7Es9X#%*k~2 za7@Wc1oI`se8$ZlDjX?zhFtX0B{hpKtZB69FN4}z&{^=hA$?X;N6vc+EV}gdq0Fte zukw+WO~vd7D2N4?j^Ds>AJCIh!4Ae2{CGy2C5C)mMg{1GX$WtUvSlEcjr73UnzYrm zbZ~uvlPJ2tx^#KVwVwq2ap8JuM#J*bB+)>vcNk+C!@zH8MiKN@Aq~6)T!FLBNske= zuhT5RnGJ4>EHDBxI@;Q&BiGN)+AI3o(Ey_w{JlLCz@{d2&yzLL^g4s}UDjRx1-vfX zDWlb0vFXtjzGF-ggST)+-wQCTIZ~5S|Jp%u<3ByvC3;vOeH>j;N5^JZ8)7g?zcxBk ze)$24*6u^|nLHJsD(J=hFYZqg9@i$8eFw#LvmX^E$4V`WA*BhT(6Zfg4d_Nz1Amfx zMxfT26b+325cq|Lh5mba(ZIeXKuP?av+S~4^?K+jyttZT-L4wN+wGywC%aRcp#uxFUCIw;0UEtdlMuvh#~ z3=aU<6ydf{EHTYHMxyU6Sqeh?eG2La`yGf_DkQ?`;?Hwy>~V9Gm0k5*P9!ER!i25J ztiw&gs}k?kL|Qwl-pqr`BbyofN=F;WcFeC5NkVwNptC0~+j=tKg4utHLCWQJ6hp8X zbJ(Atv0 znbh{Ua)RDv?F1IDP5f-xrzGM5QnGnUZ2FtGwe`}#8o(71q&Z@vYztYyr0wO6T6Y2n$HGu? z08efq&cptI7noAP-};Lv0laV{er00$NfTYBP_q+!a;;^0ABLX61we@;YtgcnQ|4UU zHbPeR_V&}(uFtvlZ|GJGlY?vWol!s%(HrO?KO4=s6reO61NGy97V&k@te=juwr~WA z)K!A3Ux5NTa`ZZ7p)^VDEaBC;`m_qqxkkMS$sH)n!cmc9)&CO!QzSvY*)oqmXL>+Rppa&~#LGyOK3NN$hp0hw94>((^;|~UF1H3T zw^H_a`I~%Bpw#vsrkms%zRt%u2VV&j{gpPXBzo%BzoML1(U3QF7OY~!BR;(f*Pzfr zSv2g&hsEaH_??l-jpQU3-gz^>D^<}e#zF?!d-k@p zAxslx@xf8e+t_fJ_LFY~MGn5?53y6Gp&#`eewIeDN+Dw8SfL<4O8cP#5ri|Cs4nk- z24L$ghKLYpLc&x&#t$lpEqpc$a1=BHGKi}Q%+u5x;1GkG$e78W`n{yBSng##8OGS- zRMN#kUX-uyq6!gvBtqD;F}~cycSm}WssffiES?pmJ->u(8|;Axx|}!^tr-YbCCKI1+8EBLt-Z5fZCoEA_$v{o*z(a96!Qtrrv ze3Y#99N!=A@9($QF`fmyqADBLodT?7^gVe9MK#$QAv>)T-_T~FhZnSIo1aopzwD|8 zEFzG>%?+VVEm6YcfDF!V>q7}z>3`ukpZ*8OnVBFH^NTU2-&2_ZPmfrzQwrL7U>|PS zcEVOVvzHQ-Wuuz3k@~bS@(7R)>D`hQ7oB0Ffb`1EJ|>muo`rfZm|HRT&30H;FEj(r zh4uM|?V~h`K2L9GP3cX6Z6OmDoSC7J1|dWkrJH{#P6tqw*&e}6eh=?4>V@-qF+xb! zEG4H9jptO3*0rJGnU9{+=vES&TKq#L$tc)L!dD){DckhB z(*nc-R>@a+h%qo7@t;M`s@xY)=IVRFcT5HU+W6>U zcFd|90fh$rIZ{*{3uwCNba5(%Y)d_^@{0ckCK-qR1x!-H0JCx|Gw~(61mMhzI$p$R zxUS%ul2g#E+gtbJ@1;vs+d55qZR}cwd>ZKDfCv_F#2k0Q9p5woawu(&bH+D1_F0DJ)&`C?Qa^Fv(EAfrxy*UJbz{eJH6D zloh~2Fu;PkUjo5x-aHj*&_2UevhS<*jIHtg6?!wh+UaWtaRsg=6q zKd$Cjn70gWJtj@j%KA8&bAEu9f6B(m3Kfj1K?uKy%Ju$zq>~W`4V-#I03b3QyyIaU z{Wve*E9AeimG{sWb1e01yiQrP2S`N`SYKokwHQ%X4eGAMD@NMH%171^+OT^@@|%*4 zhEy9bL}(8)A!K5moK9~l_@up(!0yv8@0DyCUQ49cT3W3)oS=}2UN#uuDgF-wh*JOr z`gZR4Dve7WoH-M^bNJ$B+pTd{by6c!1c53?+ zAC9m$*ELU-GrEH|0LPXEw?}_}g#h4*5x;xl{L-wmlnVoW)bkb2If71_cb_Vj(y~Ha;ukxKK0+&4os!=E13- z!J|`WDd!7AgyE(P&I@UPVF5gzWASi;uHz5`=&FtjqFrevjO(I`PDI}fJug#e=dlbOeDustWhCs)H4sLGWtcY7S$nLW+J zyTxrc<^M6Y0GwWvrYpVVnE}|7*D`?Ky?^&AY4w?#`al&S|Fzh6V5e8zQzq`nBmz@S zR{?aO*d}nt9vV{6UG*}L zXb$k-WB{sGfbElBcI=@dZ{om)oFM~GDii6JsnAfpDewN65Dwj!A(vs;)JWbc*h1#m zJEX1Ox!y+%asj901pYIT-vf2Q|37cf@}$4so(KP5-ku`VYYtO{2UVDluAWsp?-i$) z53@J$fjii&S6j?wgL$elZE=u$0oCjW7MNiY{ycDhR@5F>zl0vyj;q)>Gdu<7{M*xvpW?( z0P1X+&+j4MUf13WL7}x!7<57r!PS&lubK6~n0xE6DA&bNTg4!yq@+>0RHREv8l)Sf zkq!aLQMx1~C8fJdkWf0LTO@|=jsXTZ&!B7Xwbowych0%K^T+r8;dP1Q0Q1hg@jUl) z-?$fMj5on}C8NFKDtRkyHlw++$#g}6@^_us9*W6&Lqn$4_-CC@@d`yVWMd^0YIl&T z**{nZebhh^;;0O-BtQr-<%fdTHl9N8=p^ZAwk`6q^7c>2qeyA8mM6mCr$OVPoZ^^o zH3MJ{^{~Rm%V8pxP_>7mEVaLlnuHp_sEKcWsNNVspZ_Ai!D+n~56xL3bqx_yncSI* z%DP>I9E+tM#g&6)h&j{R2hCT?@ILi+MNu%kSj8(OVG)7hu5tpR)UHV{fG@#}*XT%L zjrv_t(L*7sKxrAo7G9wBurXpEcFyP5bI$I5J}#7B#w6&dpv(_Zh!> z&nNw3&L1q?&$mR%tqg5fdKOo!m$3~5&Nh16P=jol&sChay8wWBrm!ktf%rwe!KceQ=?q zJR&z)Y{T|Z1`q-)71O^3yOxzUtj)=1g(`#=E(u;oW-Ity>)UH9p-=^!8SW4^0dbcEmM7`;gSyXpWY6TO^BxriDA*uTqo&iDS3?_m{8&OnWo6D; zUW~5u3L}OZ%X0QHQk!`2lG?+*2p&}bn(!J&h2$?D7Fy3UU-5GJ{jUYP(+xS=g1!%J zRIYt&XH>fT{eYXiav}d%1y+uXd%<*p3Q^Fi;&?-Fu{J;M(|G#Mz6$FTYZRw435EzI zd@+F)>%=cPWf{|-(m??%Y>i2&*)Fo@pVuxebKM=WI3~Uhf&f1Jcp0 zUOg6Y#@kNhIQqx~;4r%fQwm6a6i1?e)psMhwvdI+XUH?(Ou;CGb?K+o1HL50{@?vV>@Cd=4hwk;;Jt` zV!tqYZVzuu=MH77Cu;msY;7^C$y{>0z?|g)j38;P*KVuA*v=4Z@DEj&qsQ11aXw;+ z>vU>j(YytF+N%z+r^oj(gqmjf0p8f&zpD2O-`K&skZ7g*N3iYd9OeHB#l~qzTP-A? zm^n5uWt>Bhb|b(7&vjJtgVQqJz3xToCmLJHI-vB`w6^zmqRgws7q)O9{85`Fc5&;+ z{SW8dxy<+nUx0VCp+UEu^4ln}(<5)L4^Aco;CaftWUh0^HT(sKt0@(@KV0A3vL2L< zYA4Y1`crlz!d$`p^C5hfxWSdf&gU!QR#@iG{!l(vCXq$Xhl`c>*Qb$y0japS56WdU zsA{Q=A2py~Ei9Ro1D+(?P0#USiJ~&iQ^$K6UYK5uaAmlq1jPi;&yybhr+{=Zw;S7b z9fSIYTbE*@4Vclsnl}D)rSJqaGV0`3A%xl9-E6xM z5uR*v!(}D{DL!3uHh+W;9)z5H=t(0ffsdZfgK_onOU2(0r&KSyac!LP8qBbMkA|25 z_yg|#7OLO8GWt_Il~%oWjrS&KEphTU)}_i(VBnQK{oHx=EEE)14pJ(0YWfz7I57DH zwZ8V!|Jt?DtMKu&H#onsT??y}Yn9&Q zgSrh*p=$96LwA=VXW>oav(Fn?$Tq66xd$|0n%aD_(prqrH32;8ho)>~s-%)u!$Ga! zRE?EhDf)N2&f_R*!)hv5YU|D0-a^HN-l-(u*TND9Xb8vfY7Yk*t&^6UllF@{ssH!g z<3|Y#V^vUQODAde$(uSrO-kJ`1S&uUdP`$6PtB$6r{4Shc*m0rIpmBc9(wTVy#QmQ zx)$dPHQxSDb(&T2i&}w5gcuP&$;8fkTrY;$e_DSB1BD0BrJp2j`@O5Lc}z{SwxY$0 z-rlWwsBxv|T&ff=2Oe?P({`ef{(4%`&dvD{> zQ?R^`|HRtKY4q^mG$QN=k`ZHLo#*~?L3DR)jN#;;63Y5>R#*@J&7zqer(795M`@5+ zo_+5Fno;sC%mL}<<8oXxsA)rTX!=N#GTLVSUeihMd3%lSdqL12zyEyl(Qewe6?*79 ziG<^NH7aB;<-hqrZ;m3cZeW%B1J! zau6hek(q?4+uQ#i$uy!21evD1^mj6C*5HavE0Nv}LgIX^yOg!HUHDRPpp+O_yD*D9 z`Dcfd^jHRwIA~`j^&NgP!=o%yBO_{YH`*~{9@jVHa+;^MrU0%p0bR_Uk<8`KBg{I` zK74^DJsI6&5}Qj8aQQJWbj7I+9_uKL{q&IQ_3%{l?(tB9ahJ0Onfod^TI@lzd#3p} zysvZ0)R{eUm5Td`ggwfbr$!}?eA(AoU#{~KvBLYUEmhs!@%s)0L1|zev1Z}u6SL9L z?Ay$*1wjE!5_kS{jQ$ow@{~zA8o>q6>-)x}Vj{ucM9&x;SeWBVB}U!>|4{WLKwc(? z1P=)_hJNf0MK-0RU_-0*?=WOoXG{cy%?D~Q>^gw&C{bGlZ)a; z10NJ1OINT6Ovr9fh*^}*WY;^qIP!z9>k+(IDqA3{VFIUPJweB9rI={R%)vd)o>+d` z?NB*aiAPri+snA#dIZ8Y9iHloNEi5thyZG11c$y1|qM#v+{=- zl$sV!39R5fo_$_kl<@k5u-x2Y2tH|s8&hyxK98X>HC+ii~I`fa8`%M6F z&iynffV2VNY`D{YtG{8kf~7qLfhu9FhmJ2p6wghpllk*<3&92qqJqgaJSWx{%*YV^ zc`@$+r6#1uh+T-2jN4oSNj^WZxOC3R)KSv1_~~W|>dx_;=T++OCWuZ&$b|;QHfV#}vcD6r*NBB1$>R z`}zjcUtM^I8Z?cAx8gNPQ6c3DIjoe=o*Y}Bsx?7%^5^nVt;N{=Q*khKm-FV?U7b(! zr#*T(0fMRs{7rAqJcoG0wv^(=;v>Ysr{EgzjKlJxBHl$)|D-6w|M^5iFi1(!)6kDM zBh;7C=O;&&MtzXV&D{Zo0E^0u~Z%&p6+bpxbRm`FCQyN zqCOeym%4Qx(WED0qD$&fZQ~M&CpK`}#H1pw_K4Pv)21<_ArLhTd)gy{>?r_0Q{@SO)#woZ=6+wc;0dPO!uN} z8{C2u3+}Ny9dE`G`gzz+d!wRL?r^}tqOYF$VD$(DSXY0Y-V62z9d|)vRAA+BFHSbj z4ZJv9>ln7h8IJzkaU6(O;s$t={h5y|h>&Kz;^RhK|HQ|k{@?O(HF3ZBxX(wsD+NLw zQW<#s9FMquQE==67aP+(XnIv@x6@Thv)0G*os&btI$+o&w94jn)!iy~fX zjgJM-Xc*}j!aI!yVeH*5H)FwV^S=|x!y|iGk&MNoL?Ksn91#Xjrtn%Nv1f9ehDP=P zF(@pT7S@#7j^W=FniHXe4{$8kwmKyd+U;#Dg313@wfSw@-XR}Ks`K!aZ{?uAXS)yl zqg9fsRLEuY)YZC^K?%yg63TV+!haBRr)!mN-lNOoBdCun~|yP48INRaTI` z3#mQLn`8d@MfFx_G}*kGs!(oLO%Eb4{oGqkacgV>EVSeW3RDRk3%q;q;w04Uaf=APJQV$p>Qw+Nn#FV z%>s>$&3pzqC)^9mAC=wVL8^@P%okQ1B6wacX5XP$yV(JnDgBQ!R55bPnF9gpmE;++ zejg^_nON=`^0$eYwO-<5m$*Qp=sQ0`Zl=i$b#OkOKk#COq_BaQRjP(+% zUEa*W+VI_SX`ug>nf};DL`s7;{hf+l_ekL=4W1(J!PF{U=lEAgt&9=o@3)^#iy&l!9r8K(^TE;IBc@<%JCx=vI8G~a1err`xaPF(k z+1$q?FXG$y6jnALJ;NFxAntaioYJv~$EU%aLh3&>3-V16d+aHI zIawAVM+0r~U;({*kQIN@vLDf(KHN&(v#jQEiPaHlRBzA3gkT9H>0@( zv4Z%VZVg`Dvb@8;SitE#s=ER18d(`SCX)$icW$OUyyuLXUm;C=VW(2WY~1>!<@mkY z_4hE5bu?UlpT@)P7^oUJnaz@`RB0;Q18D;#v2X%4n8jy}1{|RvNRa|Yu0?T70lFYG zOjGWK%X(+e-OD!%Oz9%|j&FPZL$`_g;ZrcqqnTzgI?%t)zPg2tt1QpCu7wB|SE)LB zsHmu zrEX{6wGExQtOU2)m*4A6l`9E1s1hb%Yw4pIMBDrx9N=Wi!*dGzwjxcdd*5Jbb;VoZ zeqS=iW421)Hu6(_|Ia!^w#|dY%mo4t*!38V#{86U?-wT_=-=DPk}7wCDC!k+PhU98 z|4}Syc(f||hA|B#Q?N*qamu-5`3(e8oUWxt5QRQ0%FkKBZz?YEIA*-ZRmY)R&Xgkg zOLoZOx_TfkeS~6j=|sPxPHK2J9-a!jERP7SzkWcK?#!1!iCkXI*<`XD@l;MtYzd5^)bCXD@ia^H3g!fo{JoG9d2JLUp3nhR+uz(H4e5@^?pOnr|bGB8em;b(6fM9|N2<|6!S2 z+8l-sO>i!4y55!O5*Z=7yrs};-J4SjcYRPJuDha0a;2p7pu`4w=z{ATR-MTT%?X+v z>WxcM)zhWrqOzVTri?!{yS3})9b9L|2B_|=Ug(=jR;uRX`>86aNuBF@PKJhX%JsM{ z9c4w4hCU;%b`j)fqtL*<0o2E{6E;Vdn$%62!Vln3t+ujP>(vO!q~CmIvVHou7b@YM z6>0J~_8rtI?-VTgMV;t=aEyn~&^B?1e>N|9vGmR+RqWO665ZXye3zmdclX3ISllly z3C_Kbe5)TY<<6ZSD7@hVW5S=pNg%1a{+{EhXE0%99H4jd9h|hyLt8RTFYd70Joav~ z#$mr#HcWE-<`wkYZ--^P?~W>3>GwYcsO@Y4jy*6On0C}wRzO#+F&ATA$l<)taM|94 zkcYDGJK>M$C}{P*vBSe!$9re{`|UC(eu-nce|;kwS-AUf7tqL!%2$IyFbX)7(o&o7 zeo&o9b4-kHZsCMSa*OaECQOV9A^eND>(=><3Evm;in$}Sr(B}g9ypyzQ4{ZYG@LTj z%H0PIH7|En$I5~FKxb(2p0>;GF3w4yVi+V-FxEO>mmnuaBnu;rkyNxWtXf{KtTH^@ z_twb~`y;37z*o^W4KnyDFKci#2haDnFdVEKPiN0qr0oht(IBJlWk^NR#U;3BW`q3q zo_d@MKaB)6YCjW1je4%a$zWB1wDpVoTG^|5>-}tI6U$bu2i!6GfX%g=D?2F)w_IGs zu@rK>8WOTVlCA0L=p4 z?UArBnY#>n3P6O;z$d8L;nNrtQc0Gg$Ud>lhk0W{H%&}K)jPFZWT5JssMA}zT=;UB zW|1NAQL%=6xA3Lg#Q^No)MfATgha~ssOnEDjy8;S()?_Ka#8KXxABE5aeTza zpH!E0A7O`wN=~T;7$-k0k4{{Q^qej?`TBIXO0`145c_aUrjh=M#BDcuvi81rLrS9aEb~EXKcj;1Dr{4?TGRd6t zN5umbT+u-kE@shF(}KIW&|;9jhFCRM7ddLUYu%p5Rw*3$Qpcj^<-K0o`$e5< zJFbabT$v(*V(@|X!A<(dmtCrlMAP8ddj7Rw9iN*t#N!?{9HoCxcmWg1*dG92;jY9# zcEDoZoPs9nBRM%FGmt+dlE+rU3t4D z^bV0sZ<|tBod^kU#;|1i2o~KrU{k<3J&dhrVm12I6>!XZ3;x(L}6ZWn*S3(n90VsymbW-nmPf1P*oXv*ik}@NybM=L`IYE^(JdNOvzK; zEp|QL{b;A_M9KeHGk>4$O)p)MiWy)Z*i~YCh6@feYHDgU@<)ii=PdKOKyDF?Cihaq zvH3&UD&DPq=mCdMWUP59Sdzjt4g<9B&>be_=7JMj_P{8gwb5l;m9BAv+vV&Vp)6A3 zdafQpRTFs)x`{>x^(MosCvh&*qdOtkLRHOgDQZLmYFJ^FIK@rmY_T`W`ECrQZP_j0 z9}J0~JcJ`IxUP^w%0mvmKaj%awVK4Y>h$l)J{0Vfs~EX6s#se98AlZvl%3NHOwy@V zun&IuzSgcz6fIi(*3@QS zk6d#g($vM5NFweRO?VOeSDKKh1<-`}|Dp-s)fL6hAu()~SRam zL0%0{$16PGJ~NXeeS}XZBpPu2QA*k8QhO08yD#sl6RJkWEBu*78^DZ_d zaCi(;F-q7im`+Sg+{nW7O9Bqld`1>dup%ki)B)S*5a1kJZcbdypFAz6dl{e@Ff3k* ztdaN%5!hmn?9WZdgWdj_85_f!=1}RZKQV?&PyTR40>;qk9~i@+KQV^oDu*h_TQo+l zQsnuku0t0*yUH4Tc!q!&w5#vX*D15^q)nEH%`BuaL`xsA-cLh?fK$NT$qQ{(@c!lE z!9r*tk7P}$Jfe!hK^MSMyyzc6$Rl`;Xck6(hh63@N|}?w+9w?kZ}xkdlp9PW6v>MP ze!{O>^e0oLd2~;DvdX6d=T2k@)s}lW@F*t`3^I;oU>kD!^&H&pxPzGHqkuCP?a(4y zl=A_PDM;$_>C?d}CnwQVz{UFuo$sf81Uj&xBzKC3alP|Bri&2?(`6y$6lu|8c$Hxi z!z-A59WQQ;fh4$Bfv|S|CuE4gt;sz{66q2jLB94BHE_~POS}7t!N!W7~gW& z7+-gDM}H@4)^#rJd{%*E;@)X0zbQ6nfyV$1GmO-%k0_OVFEukA(W)w9qGJ-R^J#~G zg!iEFHY0TX0~!iHl2%L9z_#yuW#_j`c1lpYVB#cchNO33}7Wx9=6 zC2YFK4eui7H3^?FDi8rh5Z`J-@Q4K&zj;Jqhd+44beAh0Q9$BcdZT|eQ|);g<{(R}9jaLVgTT3Kq1LzNQpgkYnW-o>I;A8+LJH+gWLB#RX+~F|Qg2f6L9}k)a>5cRVSPFt++>pGEl` z4|m_Dc7pcBs&YTLF)AjYco>eJxw?nnJ@1_VOfDB2*cXXJD)=Kwvs*1Xh3%TiH6BM= zh((NAo2m-R3{AR7Q7MXD<6pP@adYhYQ)2bJ)6=NbJ-8Cp@apk-4HK!P*+6wXLkGNlYW4#)Nr3;C{=$_2=^$FL%JMcHy3?C|9vRR5y6JE1|rRowslK$Lp zbEvnCWSdXycH7sA5q~%+?|jiJe|!CH+?_dxe(h^*Qn$4S^cuvH zF%7A!cj)mbV}6J2SWU_VY8hg1_8BtxiS007-8j}4J=D^!HQdtJk$T|(Kj-xkd{~_= z%$`w%7<^_jX#w_x<$RYA&Kyx;7L6$%6ixS`xE)mQInDFe+w3({Z$v(M!mwkmCB6jg zV$^+lYW&P!?4tYLeOC&W>iYA@H`iIvo@)%G`5oV2(PVG zbFmJqJ`{3#q_6kVDyUU$YWk7>%~(`OpTzsmDSV^jpva@sskcRDhelH+Aos=?UN_A!NKz_1>C0I3>?B7d#T{0KEse@ulA?lf zwFM+8q^O)G;)&EgzjNhQh$)RZoQ%`TRFtlWYSa{khJCPkv{C6lD5O-ukJJ*XTkm!O z>WbSL9|Ax+opr&>n+cu@F0lzm2cO62NTmvj^JRH!?2}$87>D=gT)6L9@*emj`6Sq* z$h;IU7CMMag_AX$CXVe#ghrulHh>7)5w4cQ+xKB-YVpFzhSq2=quR4N^{5=2UHbkK+!lNbt zi_j8(j?J0e{#Ri<+3xXP`}T4Yh|jE5EoRa7$E=JjZ=ZYHEn!@}8OW`Q!u}QuPjHG1 zs#z~1GWz0d8Iq}vOZP1^Ejr-i>XTPW1fH{1&j)26J3OT9$igoRGSJe&K78e>kd(Ak z3|iI><;@d~jpeG)sZ{m-9=aO2m#7XvqG3pQ^o@J>EH^!G&pq!B9{rc1_jR)j$>r)z zGxTI%ThH;%;rsA0)aL z?w4n9>k~S4v(+hUBN$1p|HeqF*Zl7p$xP(uiHAWO(MBO`N2i_7K!%Bp1b+PD0h2Le z`TwP_AVaTLl%ph)RoPro&jwUhFEzsL!gDcwi}jV{TqjmEO#+Cb^DT zebR3Qq4s2~JArmp_{R&-?_@^IV5PDYcMq0%GxsSdE0&JTi(@x}H%1;k2`F$&Qac!Y z2qN8V6qK3KUqV`|Up^=Z-~8Qd7n{9xM+K&&rc4;fp3w?ih6Ed(x}^z3_p4;Z_xHBK z#&t{Gg3~(JnBjako#<{4ttu23i+mmi!EDoxg%`bGHtM4Pb;*e@WCO>bljZlG3TaV2V%T9xd8OoZz&@oM^{G(W??vy)NSQ1f3Nx} zwuD9a#j`)ZgKMw1yXIdtbWBdn>>){KYXZ<$Tges0Yr}i2YW+!6A9PDOepXNP`|jok z=FZ&ea`h0;A;23@!gq|JQdTE0uAg(IR_XkKw(Tz23wAE($F*~}b+2npJ(s(0;%(BW znmY0@794Uy@IKm2Se!ozi%lVkTP^1_-&Nht-!a9efos6=F4C$jt#(-8BkiRqkV7OU z#He0sUFw_}Au5y=2xWhmbdZk=n@kwW&NB--%ny(lu7R0pkNpkKpPe2p6@w&PhfODB zhOgN7-@v3yliDxtccu`Sj%4&?6WM`E*#W21wfa~~r(pf@-@$jdS4~jcZBg6YSnCie2J7mY zdabnzpekJFz8%5rSbMi`G3!r`Lwioc=CmKF#aP?i?hJ#%C=et)X|Uu@zP4CwhWFB` z!t~5cVSGPzNZUmX37W1*x@^tX;6o4h1v}i(U*@;;Pe^_JjZfR;>qGdF5j;9rhbr#O z0!GC~dE4XDL&lu%$aGI0aC?H+u>Iq7$jp>lHM?yg{gAL0F=i#W}i?Bl)|+kNjSN9Y3o__g^#X zZ(Btav)RelwpQ2I*SDs#^BIwK^U+_u-<*xu23kJSP$aiPaCp6#e=u)Pe9)rdN;;+E z=SsQm0#-Bh@kkDC)p;4z)QQq`PnBD^8EC8POm~g@B;NQ4+zNE}sGq$0AQ~=%m6CX+ z4lMhrqm|w?JgcO|bf*D)yLAVW@yQ0$$YW|%#pJXpFVA&E+5GVMn3UTrdRqoL%yfaM zC8C1ui#iKy1fCIU$F~EqxuYvglK^kQYWe&7!+}pU(x1^uRFG{z&rlitRVCSZQE~Cu zfYqQX@|oY@`4s>8)m9%WGIBDYznCs;%ObcL-nq9D=ZGU|3AO^CWp>k?y7w9X6j$G? z`+HoSaczu!rRNW6;c>1R5ud<>-3k-*8NF{;R}0u%PLb-5>+`RVU=Pho?q=Lo#Cs|M^j?CyN-4 zH(uq0XM(g2`2KIHfBX9=o)&;KA-fDn1=@19U`LL{hC*Q0D0b(obcaYxh5U<|MWp$sXoLd zt7VT|S^PBCvZA&97Zds#Udzc3y!oY@w?(@Cw)g)K>13M~Kt-14z1fs7V$4tv@~Izx zI2CGP`%Tk;fKZj{kdfKoa13kxmb|kyn4m#&(B9)W?i?=+&<)i3^6h26#rjLSJJ)7s zB>#?ZlCIzv4w#p#GjX_S>0Ix#+wG)R)OeL%HDXq#&V-*Zl@(<#cYVGrnuB+;?+KAs z<>8=?R-7yt#@@l__>viKWl!H(P=Yh_s{B6jC@U^^@csV#7p6~pDo2+KwYN6M4;w8o zE-g5FR@qN0BNi_2o1&9lqT^~Fe)Q5=WsGrv1lqN|j33I6N-0?2`8txy8&|^fER6lI zV^vU>XU1@=&Y__WvY%F;+dse3^`M#1n-c&$q=Y*ZS|^K#34*3B+q~pN>yZv?_q*y` zqoYe*KwnZ}_~%2dpJ>yGlK}|k^Sy891w@O|)uoqx-Dt>rAIdO)|V zrg`;r1*|ax!sco@SeCTQ)f!lC@uGtdfS!Cq0w{m+*L&XN(}Iy=gFNal1rJ**gd7VNG#WUp;tJ^*Gc&UE#QzWi@}g|klZHI#tc#| zwAHKoONHI;Zu28_w_8o*v%PAu$@53$EivufBUa#LKQf{83jR2z=ak|C*JdGxi5KCqCOfql$EB~EwuWw0d(7UDeOU^ z9c}dt)CRT$XqN>V^S>=M(8Cia2*fIza%QvDC4C)_PyhiBaEI{*Ql zG||DaqWhDER?7W~l6{0_LpEeyQ}}cK8>fUG7H!yi|Enm+dl<~OaX0h7*HpZ0qf)s8 zz?CU$aexoYUk)3-m9%J=mj{Dy4=x&VtsnR_a{@0iX&%z;LBzu>f=VS6y zXHmw+z-eBmx^LXq`6WG4H{z`G*hL2@*Tb}W71Htng~@+iQYK5y_qoAl~mr?-oPi?~e=s*ZuI;s8y;Lf(I-JkRf4 zXa+s~eB1efDcWU3#0Ph6Sb}xSagqY6&e=Ohg{^ie&sY5sYsnXHeFU;<(IEH^HhbAy zR!H>lRXKrA23A~4KS{@IF6OXzmm6Zh(VP5hDW7MJC@@}PU375Q4$r6GTjVq}D(KnR zFF!DgzoaO{ZJ{kndC&VZF@Da?K4o$ph`esBrvvrPT5Tw#Ge6b3q`B1#IjUJ(ki+Iq zUzHC^L3LnAw@Qq0ndiOi40s#5vji!c8gh-5Ot~vwKJr|8I@TEK<0P`y;tK@i8EMR; zrW%8}bdN^&^(s}HYwKYb`IZddmqVqIUOJO}8y5a=li$B2k9yvqzXS-m8rE;d^ML=) zj3>2EI6cMxhVhI|@pgq4-QUuw-V!Qahi-AqtkhnnQ`V^Eqb>~p30SbcxO1^AQ{ z9jj~Jc>$9~Bu+0*dn%}3EKgWGipD5^2Fm9_2j)9(bxdI2zDy0PN`h*ZPc<% zFrw~?T%h?xWP=dOq7 zWzne@>uGJ5+5|tF_+6)&E5GMIGn2D6 znu&e5L&i@u6m)NUQB3Sh0>>k0GiJkg@5l}7wFUjI`vb|Gnz=r`7x$tZ0)}k z-WAolMP!QVG)*7H%OGy;pRPoq=BlY3xfqxjCq1`$bpL%@)lyrNjuzI z{7FZHmd^56S`i4o-uuQBE_qzP8?))*6ljpoE*(aVjaem654$?G=eCLU!%qvb`vMrs5qfLeF+iQzxMUaTeFAEQ84EI2WL z3EOQM1{G`>Mg}4(C8TezsO182eigZy7DbB?c2_^q@i{u<4jvU*Jvw3gY5k5d<|t5kX2XHQAH+lHO1ny!-Boua zVa@GskSeAVhTqln+Ve2+tU}(;PO~`nR=FTq`?64k%4^z9j=@5m=(L-+O8Rj8eE@}o zQJr~sOqNeCtRFC!4 zPL9v*!@3<%^0H=mG!JT0!d^)?+y_^g)2dVba;!IpwX1@4y^+Nf(ly&S&StWnsB1{0 z5DlY-#a`w=^mS531_dU*nW)*pfw*eQ%!0^)uZRS^EFuBVbmuAoKU>R;sMjNln09w*1szBBjR?gD5&I0_}%3vuZzMbDf4wu7&Nf%+caaU zusr*u{!A>{({|dBt+gKovJl-zQWSvFb0b~UFcnqfp@~|_dmxF>D>yJM$QX~;8u+$Z z7{!h9u1ptuN;DBM>?6!Ajlu{lP`ZKpNU0r$Y7{-d4o?BEL4()_8oBAdlVOQP+%?OG z;J)#a#|h%~Rr!&Lq4SwxET+tWXzna4_3d+b>Y;BSS<}p^arX{m#VRB6N%{!5%Bto1 z$l$f1cX2l@sK_F<7Yy>{>aAZ^axO(Uh*NnLr-NCk>>bv*Vv(cU5;iYNmMM^4% zN9*285I)Gj;WTByQGWEuVc2zWm4vE@hJ$(&z9w)+}IzM9*T1QC`TtE;5 z2u}RS;*?95Rrm>7?Dp(>bD%%wbO{P0+&Mu!6x-Ps+(RhUE+p*wHzsk9j5+UZoJd1L zLwic~l6R+vOw+HnF!GRw*K;VtrpciKgqgb=6Id1k4^lqiVV8}nMMtaTuHYJZtJ24+ zRej{)SG*a{Bcb{wiQ!}_H7{3@jv)#|)?_b?^!~@Q-TzvqCwna9B3<6l3S5Zh7Aj+E zDk|*ehr^jwoJ)@(Q?&=T9NWyz@|5ZB1)&ytrYgFy8$x|3BviR8b<3)s(t*r_F@~Mi zkHfas09Z3vwoSJn(7`)RIbFH-LAs0JE?mbNGGlv0~%<>$b*d0dB0i{hp)*MqnFVl<$S8C z;$NK3$5#w`+;Iy%K0fBDb#xT!oRb}H5t(#wTEI07oGKV&=Ujh%d#=W>Lo8oU{OHb{ ztV@2W<9e$;x^{fM!qW8185gN5)W+qtUjzwnt|ZDoW3G!SI|PTpKLQx-KDSjfF&z4K zHqrHzV&#fUy%TnT>rhwSc0+}@Fvb>7GqhJ@rcCc@U;!#-lE+WK=v(>B#>B$@Z5cfu z!IKB}7?9tPr)CP;E06u2SeGilnklI##`4cpUu67Q=lDPtbWQqZhy`Xj5>f}l_2r9? zKl^89=(@`=2|*6wczb6VT=vKJJZ@1nvP(vvphbb+Dk^r=)3A5x=4}&*RFl1{9!g(Q z%gm{#?cLiCaPb$P#!J{eEQ~@HEF}uqgkDj!=2%x0?eARt-zeITrqBL~qKz2)gQCss z?Zsg~@klN-vtV}E4eeXKP!bO&$v7KNq8`yK)FjlD)uBIh1LR`IoB827Q)`>9*W5R( zelDFKe1Cg!mJ^NwfyIF>m%`g3>=D;8NOLcN8-g{QW1<98J*j3z3 zcoDK6?=<-0XA*tAs?XP;a=|#Vn1W3A_qD%Oi*SzOj*NpOWaQa(|KxPwXwzQxP=^e!`o+lD3;E!s{d-_K>^V)t_^VSj>4htMFEw*#`g0^O{>K47e?2il1rYEyt9pW9^ zZgfX#LO-3KDeE3{W=m%}6;VeA-;Fz1_HPvl--*~wS`g-Z1T0yMqTwX&NR=x(G0L0W zFJvh{mtlg4WgPa8GQD>;?7GP*@b}76@3xO_Hve5TMt?pPVhGmwJ4tlDn!;}Um zAB7l=b)#$}f-&Q9W76F%&9R{_EkW$H-t$J^c8A>6jwTZ%-_`%@D{|}}4pP?A)jd0< zP#H5burx9=GfEuT$wu-+Muv!fhjoR|hKw)KQkZ*aT;1iLyz$DfL6E1g| zcut+#aVgX3YhW@3-i{Dls7>Xxfkhrur;G+JK{Xc*jfvgQuJ;yE zO^+*=b1Qp|XRbzXWD0|u-e4I#n5HmK$_v_c_gZWZG!(g|vb8E9&ze(Jp3)@jk zmQ~(d8Bh64*j#eku{mmpeNY)L=CU3&beLg)=BK|OYk4uW6>ndgtY&3M|GIbid2_LA zGk-mF?Dn}x`f5_MSgxmR8@136i|`$BGV891tpFWDFmMtaWHT>)egyA5i3t(2M}dgb zanJG=Aq%MV=o|FTlGaK1Dj3bYIqzNGf6vd~$(6Wi54r5QknV+Ju3kQ52zab=Tf;~2 zqEhIqyKfK$;bOk%b387$Z%opshpf%+`PwFWoxq8eoGSS6n;Utd;p(E@L)E&CSI1tT zEyZ67Un|PUIS78O292kCS53-g-2=C6*iod+P)n`PoKz(3**-pa7i_9i{$g1F^@mXr zYpg=!x(dC4VIl3#kB0~Uf85O zj=_p;%+b&9gCEVNoZk6ZFG`*nw==f7G~76ft2M@x1ZDOP49Kg>D+re!l^CZiv)Y*& zm1GrPAe}7V6}|NVh0420tQnF;w>^o?L@}sBz&d+2Ar@VlXuTn?!}c%cUV4+NnK4&}VJ8Z`bPw=j_lvlU50x9g5Fc2zJZiZ+wCZ(O8XE zk{gQ}vU|Fi0@igZ7$)1-l9?%Sn5xQi>rBTy$=Y%5hJ36LZr|k`Oxx~KZ+EPC2@RXp zfIo}HM}dZY>Vi-7Ig^FiKYTR7$ zLGRYvvDbDO01;6X2sIy9TB~f4tB}i1X4MQ~knH@Dk1S-n`e-5e=>2Q;>ge{!VjBDA z$KLl=7Dcm@+`1K0r^y0IAUA#rlqZx2zTI)Yn9#ey8xY|UWza-E!&X}G5o?!Zq8on) z-qaORV6_I<4zl8oXP~m|8YqZRfPn+{qL9(*V0d4IP3MRHW^y*vMhZTqsAEf%8@|QtQ~iy~7!O`?+}hl$6DD);VFPAUig5Wo4J^a`58|1Ew!|DK;Ks2j zBhjAr{<_RD=j#B3u2TQm}o2ddF$an3iR_7Ry6Cxly8`uy`l19`CjT)8L1)pakZ- z?>KdD^i3d7rZVO9elXZV(=U9u;NWB5eZ5#7E1AB_jU%vKvF&d2y^DK?95+6@7Aa_< zkrU&4Pz+k6O1=6XZj{ubs<3PIx~9-H_Q>{qKzI#yxUZlyv^J;zNxqM%@oSv&pik}Q z*13gDJ(Drf8JhPM){{t^Qkcr`eSB7WQu5q59=gL7{X?ZL`7Su}H<4F0^wX)~qw`2h zzvtn??5K;gx>1UR<14tNIujKYh0z&EIb=B?Z^46teD;n(0hT~e4|Ps(=vu8!muxGM zh5XP_l8fY~L2bs_(n?b54L#2{_%N6s_}UZwaVTs1%$}j6kyqm`O(7xF2D)}bn1wH) z#;fQF2_-b_@#7J_>^zweem?rx9omu&)7XQwnIK$;yHvzeUsA=xZqu$rKdkiacl&JY zUC=a>6UWT)eOQ@N-vJU;g|wMU^cK!58IJCGdBs2EgW9U8L0qP5Ra#3WLt zzP*8UVe~Px<5ql6FiCW`V~SGwkiN!~*MM|{zxNSzJ!N@3d zY={fNfi$`>Un>~hM^kOCZ+;%L=fMZ0C+hYMzUa5X-k{3Z5r;tP#DevusqXI*d@h=n z%Sgpmdaob=-Ny8Xcwf_~h=_AAij&8&kN_)#1yZ)#b+`%r9FtvzEvHdV%pVIkkyC=j z>Wu?KQ;g5_^in3tZ)fK0`2R23-a0DEHtO595owVw0RfTj29<6>x<#UI~yF4w|g0dvin``r83zwPM%P&S|2W9gCr;>Lf( z&zjU<_v1G|ho1;R(%!(Q($N>0Q=8F zkhu}tsDieIW!RP@4vh&nqn)hD-6Z+b_OFO6HN&_C@>SuYwHUNIHx8oDA?4*R;_u;x z!k_c|t0>7C#+(ZikLW_gQ3b#I*`nOOJYE>VTYGnEX5(9cNA_nROC}%|8i#{?Bf>zC zXVYI&Jf6aIvH#E_PM|P7PG?HM5~wqp9x-xnDzW9*+Yw*wU0g}H8=ZIqOXf8~8lWha zEk^t35H$VQ)$y|M_LgEU;9#OYe+s?2rZ3o4g(k!Aras==I~w)Y>YDw3N5pt<5pOo8 z@vGA>5GI?dr}T?j?57d~;Tn_1K-P#vs>q>os@`zOz%hY+)P+s12&?B!;PB=trk*CB z%K@Ef#GQc$0*Jw^=zqcGwID;<2l`y!h!$3Cj-A2{S0=s669S z){DhoA&(Xm&fc_+a0s=5G#>VU9O@>bUykTGCQyd&I__+~}4-47@I4CQe4JpUWHq=)HDp@!nWF_qTP{hH<9zB_DAL#IdJ#Gov*KYf7) z;d$H5W#}ieJuH*|yiQ-cBI(`KX2qn0H#q zyQx|Xd?1wZ?TCPT7cJ#^f=$`XyQ7-l7*C=2jSD8spUXGnXx?e1Ig?;1+)cen88m^x z)Ha8}%ZIDMwe$s+eFk+CW=Zx9Wk%(+NXR~TUm&1=PV3Il$GC5s*nI4(l&jziG90EJ z1Q%dlwnvj|2EC+snxOp(7g-6xWaH_?FW3hQ(N}*lRCM5*-n{$S6U*}q;= z?P(92nJJ-xnoGiqyzRA!^{f>GDrMlxY<=;ff&E#fVrHtMWlyx98IY#I7GomzESp(k|UVWa`M@l@q1M z{U=J+++)CksRY$6p&&@aL`JLwB({e=!>tkSs58)IJzK?QS%<6v#WkO^}L567)+wSMeuCzCG$X;7bm5tI?ur&0Su}E)Q=mZ616@ji|-Wt@xy1s z$W^UUV@}2bLZcKuNbK z80-oTvtp9?AB#d6=7JVf%G$X0I`%E)C)60+91wmqh*5iP)7sZ4s_$9la6q9E%2M!l zJ4@mcJzU*KlZGY6du3X4I|oKfV!`5*`^>;HTig5e!qNQE@_dNZ@WyE~Yeh8oNb3e! zQ_m~E(R6x_?y4X@HQ)Wnb@~&rbzP;gx#uxowoaq@p17{^o68cqyQ;KhA^b4Q>}8#n zYqEzC>ov`fs8<%GL{1is%-b^1hh;E5gXz91;_rrSjg#u<%B?br(~el_xvE%{N7>rF zKL?H^Uy%>ywyXJ09vRj-eTB>E7e+`P_2K~;lRB6%qUndyJ0}fQ&xFtY6JWKp$V(|W zHPubvgJyaL$qS*Ca!}eEJzy1V9fonMjZl;y!MfN>g=vhxmy&spEMiB z&eQU8JU`Y0*M~FO@bWpQ)QAU!|@)EK#Dq90#qQ5LZ z-nTSnK*29N>XssRi~)hAxQP3Gp2)M*$6|!rW{YOj`Qq`jeL6aEtv@)02|`DPu-jGg z@;sdAdT+S4J#`u*T~Lo_{5Dlymf%(1$gI(yqZB`?G)iCS0gC={(->nMj`_{^Y+&-y zOXBL_(?-(Z2rMv_a_D?${X;(p4?Laz4M6B2Vb#MK63Y?swjM3Zuk(B=hIx43%VFM| z76C9-1b4_EwrO)T|S!Kuji-b0^mFvV7=W>xHo2S`Ngq8B6n4dCZM~QV4Lx%Az z2L#os^1=_R(qc>W7g(y*2>s1%Ti3%x5hr~PzETWsfT?7ry{5H6S3NtAe)(@IO}FW=xdy{rdXZ4fZRZcF0MQdCDQL)_s~2w$=R6UWb9ah8c@_1dhyspY#r*gpv+A7Oyl!nYAXl)ExYPRcJS?`vbW^JB$0Sq*uk)he=Ic_`R71dRKCQGwCZSj|%*=bs@tRfxY#IU7mNwcn9p$W!f?!Y8CbTx6Y*9 zb`dGR<;`vXgnU){i8-2SJRh`aDsEDa_D{>+S5ofrjpMo*)S^_>FG6 ztql>fv+aS-+;`Lc7&=}9{)B{>baD!yWlU|s_wbDs0K-x1t1G=$$+!CP+)Hs-9C=C+ z3X5RwY;$BuE{nek#p7+wdSKEWRB|S)+3NKkiKDGV7l)F09C8>{F8;td6vvV1;~Y{J z^D$`-%!v-RyTA}>nxAt&dx8bpw|>G0Oj~T^7U4rZtz!F+g-assmWqX2zbW?1w?AA;UY5g^~u zjnU1wH9dv8<$aT0InNQqCf(8hI%$G}JmOm8t95H}ZYBGbhwxHL7SM36zaB~)@RTpR zZqG{ZKSZDB%4~RlK=5agJ|`FWyT$FTf(7N4V*=cqoSf|JU3=2CN8N8NHd4;QTSy6A zh2j2ns1X!`tGH^3!3D#cDTE{>)Dfoxb7=}NNvpwZaXf40p^uUta|vw(I#bF?z?A7u zb8cKX$W<&KpJ|9<7*=vrj$+u#?y=k{EVW8UZKLqdONU&je{0uA+$@Z+?0LMegugZR zZOuQiC#m;$1gf|uETs#sr_5yO@5+bL8UJCrh_%%#v2^x(v8?_RQ|Si<_-Oa8o5`y7qN7UCZzFK{;4*xB10ZI%wl`nHN9U#wCIGN_Mavxcvd^DNKJ z*WF!B4on@cRW2Uk?B36i>LKNh7rRnZA>8k_F&HRaNfhh>$J4i&cL=7jDcDn2&u5q+ z`g8mh{GCmSzvJuzXTFP0ExrKTb^G}6on{h{pD(|FDA=`Sd~P^`r7CangQG=^yjTG* zhR8lZrF6Y73I|F;J$jJ_>gT}y08wFJ2srAVpzwpJz1elz?`E0z;9~!Eb#}Yj8j^|aj7+o7h_|6u>^-P?Cw zbQP{G2=Opd)52MIM8T%0ZM|Ik&Dpif%E@|xbH;VjIW^!@!@P)XNaxDia8#q`GghNZ z5IN_a3+2+DV!{>lveM66v~US|rNgWEI(8{!a;m0cxV*VY?q`Z;Y_1IqrN!-71C56{ z7lt+6{|<4infycgtJ{Oab}t*O*C7!nXb?&ZY-upOr)%L@VKSSZjkZ zp8X`9UIxsq8IGqZU{u9Q**No(HDa}NH7gXhJ%88U?mew%6Z8c!3O(HJaZy-^EY4(q zm)#S(GZN>%DkypF@TY62!Re&tq-u4GtJX6VbJ4KlK_3HxuMN<0Pmc?+@88i>2$B(> zQ2C{aEg`XsL80{bG`wUKpO_#U1V>5G=$6wyiE@<#jGrc#Hj3WLjpP_ih|`0E#l^*zmPJo@omu&}H*DkM4wF0b0CsIr!!PV5e8*mH zq&|%V1c8B;e$cGy##WV2$1yQ^AasLyc5&h8a=5rWmay)Z9zOjW>w-+gZlz;dS0JdG zo{Fki(4GZ`U=!ztVYXP-|OJn;=INohP(=DLO*D$ z1D5S4GP|mpbo}`5<2W1E*MXkj+*>f8|7g72j~oQnS>8KU9iXMk{86n?UX)N(v;4aM z`@W><+}zwk^Rx{kh>wTr(z2?|U-%OUw1g~6Q+2bXnh0)bcviJ~dXNru?yauCxp7j} zA5A_Mll5zP&H5{1sG4?Z?^1(#+IwBh8K}CnSa@ubpzMbly>$tosNByzLrg~i`FdHa z{OCdETb)K2vi6gBe78*iUVj%p^1rl;O&A<9?*)UGu^xQsyl<@cX8 zFh0N^3q1J=Zm(Q~6u*oXZ!cE3Pey$(K0vw!ctPL8AAi$qGJ5(Q`0YLH2fG;k;F>`j z9ZvEXeE$1fy+ow}Z)V^tpk4ToyGE>DFTTL|2!4&w*R(oGu?kb z&wsoIz9xG9fBZvVXn(x6k!@VCsS?mGaVWTyQ%PEPE?79IRyU#(Te-v?ci0V;2+6Cj zt+iU8um8Gh+ARK%BSkz9Tm{Ir&dpWD4`q0JQL_obYNtl5%(*Ax^UCyC`@tDU0VX*W z`PGi?9!CLHklX>$d#7T#5NSksK{ym@;?(tQDJxMu|azE4`YUuKnc(#zyF%M){CdzfH9IoI>?i z_f%D#aj>dMeF#Dx)<5+_XBI3Bwzgl0^PlZ^dIULFw^YlQWFx+<+Pj%;=y$3~AhBSM zWnO`+o}iWbYYchm-cEv-+qn?)iTYYJyMu!**L%Y-YM>eB`0(^+e<9}+&`eL&o&H2@ zFUAac$R*YoGJm>b)>e8Qt{^jsu^dg5aF{af^V0dJ{c}k)|l5jE7iIz zOW|R@U#uLD3YgF~^a<6zWsqF@BU&=-4u4-M%KprbrR2UGPq6Lo>S~H$*?%0sda5zH zx6@%q+O4|-$1(|B>RE&!5DRoH-OVkp7cqr|uG~mWWBS*qRZEw!XTBQj@#!56f5qw^ z?>odgPCsRl_)jaOtmdsWgv~-66vdwNwv^3d*7Ix@dQJ?#RRm5KLy=|-*LYUM=$vcKe{jv?D;xV_t6C>>9 zHJ@kf1+kl5gbCxg9=`HorD*>FtVTd^?JgUiQHH3c>7;M-^vYuuq}K6WG0|&?EiKVdh_6;0(>&;};ibTqBcJ~1)MS*^7fnqj}j`ke^voD7&Y z5kKmF<9Zko=L^xz0 zM}A(*q|W%;k@{`3iQZB9H6XP3s|`dmTD+n{w0ij%DbvEF393=)y0H{Ne&f>zJDX#n z_spPVC58~;RB6C(P^K;-wIUV6h3=3e1=p28!ndv|C&UR+AksxU6B5>G?tb^C<&dm- zVF?lvkfPp7$=$u4+flSVQ|P0b)aVj|AHTI={kOZ`jEL=J(?;>Ix^ zKqN(g6py4xW_Xt5?D=1OpMx>i82xa;$*gH(WjsIO&^jk+2A`mbTHm*-ry?FJZT+wq z+xPGkHa*Ul)W-gwT5jHbMyDV-4aPyLO^(=>HmhU0DXVwW&rd+k+ zy+&-8^Uzv_Y+OAmKT0syDw?=e)pfnPk9mu<+MVBfh~0r8W-#c1$*)H_k9}K#`>yz} z-{Vy}5@q~|{=;cZdo;+Q#C6oEp7r*d$GCc+!$%>>%))#(u^QOqHZu<}_3^38mjboo zK7UT`r4#zJ{4)nZ=|q}Jw_a=NFaBX!E7zAGKj*#WI%ML<`^ zKaq6;1tMc8Z@4?7DjS=2n^sd$k_Q@uj$c|*oVZ~tQ$z-C*)sq&JiK7CbYAAw1N{(R z8YK$aRrW}YPpXs|}Ql2hp)zNjRSAsN4->A}V5zqrEoO9FCpaW0^LzVWrHPVh_#_ zz9J%)AY2R+j`rrQC6sGBde(dc2NBQyl7k}V$j0gJRvV3HxAz3v04H!peHUz7+KD|I zkl*c4JT82i()d;WRa&Ml5dCPeAia#WdIKr|#K=G2jm~pwAK5I)bCY%-ho51og1gVq zATuWGKDzj=Am$&YvBhXayNd8#GAd&<_m$WVI@(oieDJVa%7o$Ugs8-M;I+A zrR-NcX zO(j!SZ7c2+Xmei0YfC17{o_jJKbLnO*IZ6-od@2wOb!qr14*ExYvt-pz#}`!C^T1u zv4(Ht7c=d~qk`eSOjn&;7|(D=&$(q&DvIP{nhlQUlZ=$uN9$Xjz|o6rBevRou~yOpR*hxQ?)U0f~`+^u|_iE3mT@8E95SM3=J6s2oI21Y_D zuLO*K7_3_}n`p00&&SK4hj8(leC)_z2D^Q1Kc~Gt&xa0mB+(8tUk!yA$2dA_)%uz4H3s!wG=@cJ5Qnv*L3tc+rL zu3HIqvs6w;E3Rf6`THH?)CSl69GER3E*+raX3EnXxaQ*xpG)S{vGhL&hlEt2hv!KX zykbA{F8=8g^6N8pB3Wv@IX|(3dTy+ZDc9E|%Pap$N$IC&n78lwgE`cWoI)Ck;}(8S z)Y@M)Wg$?TUF<^q$xqQ; z!jUKVG`L1z#~ZUiytY)jKfSAxAL{;Czy76Fk9d}Bx?9ipQ6+k&@{1ll(zdLU1LA1H zq_$Wk{X=7!f7h7`VU-(cdfO(YWZ#lidQ@<|%vL{qd85l$ zZAtRg7%KfZtFNYD zAS1-DRabT@P^78lHJcP%TiAu!&fms?uU^p7VVQSPC035$L%QB_`Le~#BXoxAS}jO( z(Z$E(?@7RgFa(=_PN>E%$nache99qBk24%oSP*h;1X>E>*enwyb*29F9y@DtrF#D{ zt|Ps_x=4n=__WJPcL~h={OJ7Lri=?wa0Z4Cdp81S!W7`QTidQ`6?Www5=jxW#A6P9 z#9MaVH#R0CX|Hj>&F2Yzwwc%C!76>^mkNT$WRV<;A#@S1ubT%xD8nfAWJT=h z^54l`IvZ1>_ggvGgIl*xNGO>?-r>Jd6VCa#=Xt5X|CFYQyr#k2b{hHjPV9be?6%@7 zat!UEtwz@ILX9@lZv%kL`v$;QfYVDOuV&H?ogq2xDfr&4&s|fUD2IR2g3_0Ai_+*&n}WG!eSCa)@|2BZMp5y(3r0a4{2jnoy5Ndwh|{#FpWlj%00aGU2V-XES?n6e|v>SO7xlzG$F^jnC@qQPvl+?^(y!G-Mp>GH(nk&0!d4){pUi7x@N5%-14Ib# zc2-ZFkPwUwNLw)H^zocZOs-GP6$+gg5F7!9Sc7&_f5dMLE&KI9=8Bl%#Ci)do!2!g zPeQ}xJLM-1pTZ1EM@Yp<5?H%if^hhH&ahH~OG3M7?FccPXgbcvK4>pb6&by{tmEUL zMkZdOU24>_PuZg3PB_P-iVa?iiHEV~CN zK%4>`0`A1JFfo*Y%PBRYFg{D+NaVzv)}%r+t4YFf(BR}`e1+bJUW4Z6ftT6Osl|J* z{6kk|Lyshh1o^_p&^;Hw?Vnb=Eis3H4@=1Jy(AC;K-E!ywSDXp;SRx6?KNFT_|F+U zl+I@;z}-Zf>DD#;$5A)tS8bKiz09d#rl5&r*23q+^>gY_tq*&rT8K-mwF^G3b$SY#t#ylQ&n56uNQ*kh%U0!Jq&DnlPB?pGvs z;ggy6_3%FWw`6?_e1x2lkdQ-8`B(%o)JO6!Sg8W9t$2r zbuIcmAboh0XGmUQfN}oFYH!=Th2dr&<1pVUWB77U3nmAZGG1G+DaX=tRq(L>KdIm| zmo4>_Ld9mh=QRsd%i^7vL4+*9tje*ewmn-FfT-76Bk8p@>8(M8#YYC8<+!=OVh+Fv zK&P~=G$YmKTnbq*!nV$P6}eHb*y00Z8Yx za)Ys|0lTX{eHvcYBEA|@%PU{~|)#++p!lDPJu}1-U)^su+6J5+1bjomCIGP;6 zTR$0t6y+~(amk{O27bKNAj2=s?Lt##xaM?~MgQCw4f;JTQ8mpGvv{-Iv@LGw$BY<_ zIe%(56aI5>PX|?Kd-@OvwkxH!%Dp}Rm&$!ay%v}$*>F$gikO^x%i8=GmCFE7xjX+s zFzrE!_aA| zif4-}0i`)=eI+xOr16-@zJxFIrbf#w9Q|Gxs4Em3IUBjTM4ILX<#tOTf=Hd9^-B=S6om$5wTw<~TGS z6h&D+%(Lw|YMn>HQ-m@koI8sV5Qn1*09?(P7;JX$J$ICD3C2ry*fXqTq# zFOjbjZF@E=MA0B2;}PN$)>az4QO6;|0>NJ2%)J2Ma+}+Qu(!Bmz>um?EZ#eh)kO%s`c74p{{K_U+TBi z9k?ri`B(IV#;QtoQQD=@{3e3&X>P9iLd|+IFuZbP%fnF0C|G2$8`)5xainb&e5Hi# zzCt)4**y>dBV9eZF8mnZl#D6bvSlY9@J8FMX~cpmhZK;A)$?_nt>@v#+u65Y z-^s3Os)$Y7yZPmW^{6uU$^qdP0BLbOen&8UU4CG>$iCl3kzvYgNh>BOc~SXPu=NK! zwPgywNP>ZOcz*En6njofqgTkbinngqnhmeqN^Id?j~A-gu;>;O-mT6URftB}`$v0G ze_|YmxGb*#+~_?-yFYciTp``S9>>Qa5sC|zX&|P=NRH4gFk?H=yh%0-f*Lq9F22C` z&Eo3I-1cT{%Lr8Ah)xHec{9ozQ}v7X8#EvK92Jash`sDaDMD-uss(kV#BlRYB{6m)es~x7~x9)Y{iA2h@cXFGp4~%cLF^ybo_x zbbZqDq#czd>5H&ySLy=U_HUsvo}NUX%lXYheFTi--Ls zV%sod|Eub{@pDofXdHVRMcPR5xV0_Yv+=R9F$2zIModAte2YqwzeDaRkVhD}iV>I@ zV1y>agQ4)TcI4Sk6$G-mClVxljpn_0njJ*4`x zPi>`CnaM9pOXNyf8VBUjE?;BNr-x;&gdLU>q#S)98m1k+hg@S%N!soqR}wFZs>f?r zrO#Dms14OUg_pR}iMdv^#XDi6>&EH2v+Z0|&eG*3Lxv9y+MJZKjMY#To44MWFbPCV z&MRwcCc~H*q-!{{rvv~sNX-or96TgzDR2+Tgtn+Z4kN9-zE?P{`V4QMA|PJx6ZyE= zU8ID9rg&;liz;R<^$MA&?fF?*nfnc$znrSXaLm>j!(P$A01h%9--e3VrZhN>NY%5- zHgeaS07xNNB@*j|;drC&HT5_i1%8#_Nbj^Dgo+)Muc3MMa+7*o#ZzyNZvC+p=yQVt zo%{$;2wpt?boBJ|+c$`M0973D``lBb*5nb5o%T4;w!}6_>vCaV|KYM?-y@Bz;+FIJ zHrc668Uv+fzY2cmSw?iL3(ePvA`491LsmVVS?aN6Uj~HVF3%eV@oL&AP7aAzjwVL8wWk7{`mC|L8rhnI0Q%? zqhLzKS*{K*~uT2BPwz1{-8ve%8UHTfxeTgOCl)+JyQRquyPk z;{5>cq=(P6{!7e4}>_CD#=u`CH`kOD!q{U^)W>is~iahqW2-yaGJg z&MEbB&FO~%Gk8#i#xRru$mc}Bc{B%vjy}L~SpcgrxFO^GdRM+)fIV=lbsHC{ve|1b zxms}10R6=^U3#%_m*0l#rLT};hI(ihXe~eA2I7TQ+JNT4dC0zN6Ml*}ybk1)xX9uoeQ*8W z4AaIO(Y2b2)`;q1CBY=#>nTmxJ{L{sP9tqpg+Bx>to2FavO99=W^G-5O>gypZoSjX zs=dbSHz3#e{lS4<@LZ$Rnx>{T9O})*Z&a}uD4c*J5gdJrLDZc<2$}oP++gx{yJ$`g z@_vx-=IP1b>a9O-7!Yj1?uEGURtdFMnp}Ykg@8yg&hDipQFuGRLhY9GOoGbF>IKtq zAWFw`AjKE9?kY+wDvuX0#zROm4wK*m^kzWOI6gUR9zDuwzr)6p7nHsap>d_ScBQ^L zeP9`G=GeNe7eqh4#Ycs`d(n}07NF3vjgF36W<_`LrT)eAkMZVmE+vv`qu$B5Og8P1)N*uZZT+BGWA6@E$ zWmeB=o9}GFfZyjyghQ9&h}#b8`Nzo~#N9A+Bn=n^vcGJVsBCE4qw(r2<2jU2I?CqR zB%ywpMQB&%bN6B8vF!Zx)6v+r*v{l5S-N-hm6xNvq=gPbP8e z<2{oIY?}Wti7|m(+VTILNd$}}ZGIM|ib4zTUmqD}?^EHtBG-9WX_h7zHn7_674W_h z0rl5y7Q$Wlq38EElU=p0&d!eeRyN$np`}ArBghY8q3NP|t+;#T+v>((^7+g``p2vbz>E5@3)p~VJh~9#Za7y^-^UR!mM8; zVHlShfE87Gi>`tFxG?n+An~nQ4&M+eV8;{yaeyQi6{7kv4*(I==tn%&dpTaxD(uw0 zNphFX%Ioz6ipNBt00xmR%n*Nh#lojBHrPyEp50ucH=YK~DdpQA=FjxLRQNk1hQ?LysGS>L zGSez#Zr8Zh0YTgK>7Gl?w6N7Mj>nk@9LOSp$k{oWz_0GC6W>DM1OBB8eWE7u&MdQP zGtWulDqpxHSxz&h9UfAqdEL9!y5;xst9g#Z>)w7*G8>`n`p_bqILwT$jJZR1O(Ryb znlc?~U&pwNHER3Xjhk~>k#gAIDqxRIruEJL;0ta4@A<-4W*bbN{a;^SUODOA4T5=2 z5C!7_`&|vGXJ_{mBD`9n+Z2`_b{%%~1xgBK4LyGGb?hd4P9|b-!iRudd0($bb&No? zIKumELVKZs%tbR<_JpV02%ZfD6jZw~TP}&%HEnFaV&G7g zNyXHE2V_{>yjT4pFd{-n?@n#jxTkl}pmEs%#>ez-RGaALd;y4!Y&*JlOwDPkjBczjjX1*El!75|D8FwDL=fg571-%bwcVn0G`r2Oq?e9G_nB% zw+C1sf)~hx?YwWJ`<_GO*Vno2F}ADc_L*CaUHf>dT;j@j*bq*C22cKj34EjeGy4<3 z1Y)0bK1+CC6;gC89~u@GmA;Wwa1|m_?iuVC#8<>mFy+aAV*(NW7s9I*o$W_R z@M2SMS^;^m0#Cf>jdh^mf^d~~zS zp)PH7-703A)Oyh+ocl*;+&eh(omz07-N)&r6o2a$kU!HZU#en%*IMmP%Aey{?@e}rp z+;X|g@9bh3tm)t+4lU+C@26(jRZt8S6ak2fJmtm5`8dt{t*wO zS4lbG)ea`B)J%+AutH6g|2lR2sms>!;BI;OrzJ}S;qlhg<=zDTfF!x<2V)hnD5A+M zhRZIu@UzXI07ALDX1Q+KJ!)xnLrB)t9r!R-9q!;uji3}f$6t@1tV(?uhh0$gz3O#n z=v2sSkDz0~FKOJcNn(I=8M*EZE=%vv`PC&d3pv5nP`$qb9u^%*(n|#Oroh%MI01mH zWPJO@X;q90#mA+G;{nA(TfgUOS^*ZW#Im4>S;QX7{y!1DmoE0i18Yivi=Ch8+(6|) zwOU`5N?bJ`Ppec3)0CNgt+BO~4p)wX?FXHY&MguJ<3OOnAdFv{Mn)%4QGw+iqLWvc zYA~Zw;Ha4lqS)HMZ`+FuvtU!wz1QKn^4j+dpS)LoAgNscOP*I|)mqS6RuPxSwl<}g z{`u+J6T&QH$%<8ni%Zkf)8h*?i*It_VPAzvh=J=$kaLfTp4^^t+&gBVDbO6XDbu%R zy({6WbKbjG_yXSQV9}B%zC4vAs-WFO7sE3XAQAz#uH?qj;VBTxJv zIzN99RrSBKc0ZS5`9}U8`j2GQ9y}0u@_>G-#M*f^`#7)9(;NIx2$F#qvpd0 z0Ed^WVGGvN6it&N?&C85-+$z}N9McN7Kz+=G@`Z-z63mYFm=x)-QzZYX~75B!WFy9 z_f4&P(5K}Az|8&UL-+pwj>z3V$-nJiA}?|X2&hGXu$qa9>rYp0wlKb!WbWLoUrzh@ zh>Av#Cb1o!$F1=xv=9>!8y#7gJ?|oZfM5y|)>#m=vY@cB{iQA;>VgX?EQ`^!ahcs$a2I!OyK&k;Td4)Ma_+{e8sbudL`ueYlPUY$DGQ6~HI7Jh z(m23isHyQck*&(AWpq0Kt~qrBe8)*5XU43Qo@s#dr0Aip{NGHJ@@Bu9R;(dyX-zz zoe?|axwFeXyz_8~y(>QyY2~{X!P)XSQ^8UZc~CA2P{Z|p{L@pa>V0CtD$jxxNvG}~&uyWSWN_Vc}HxY+)c zlhrbRBP%*$NmH#BxOxEt*gA6BZoM?VFTs(Np7s|-KvJvQp_l^vX8azfD+dT$li>5Q zwk7xtGoo`t?eKSnVRtxL-T3y*H)==a#UolcDthi$1fVLIi@R}Cqr=BVFP|LT?&Ez0 z+0UP2qQpZggjGMjE@T~*p4|sh+=rQ-6}ytd=BV}HyR1YOJ7&aMOwL`3eD;LDLjA4* zg3B(V+h-NH?cTBWG>w&ZEYmM5+IN5^?;L!m7QzviKA<~TIhYrT=`3<+l}B|lC4469 zDtWhbU~`zTu*lL_fO&zG%#6Iu=8CX;3-sskNQx~j;2zC&fp3Nou;?|pZd=>|xMq^i z)p3M7mA>phnN%@`FaS+n6o`bn93E%bEHyaYOy$Lg&fvCG^IA`%Q~cYf58ZnkSSX)e z(~TiYiaqER9Lg3|%eeL

8fq~0=I!v4AnE;d}cbadUlY^=u7 z2yT6}jOD5rRDIq88bDM$yF`mG{l+U@$dnz8edJQt{~F;<5C-6X4*}1<+)ZJq77WD} z@!$c=jI`JXmA@d})o5|DPwY2rJ-wXM-1&=vA6t(VDptOen?o^~9uQKD+5jBWZH zp}1)7ynVKOXuB&;O@~nTSj)UXuV!oYqU71fW z^;}EPtJUHnXah0YhG(bWDwSF(6teVCBcKY>!}HY&h^I(MJlPOpkf+Y8>8)+S+}XE}8f9u|=UhBUcN4AaYD}bWF?^QrDByPFtw4 zG>!v%F*l;W{}%R_xBH9LH4#_F%kIz#i2y7OACP~*$$4h~XE~x9*-0QMKEHKvV6*ZP zzd+T~bEhESF$)L-qnX^pb>o|Emzw=qR^ei#L33y=WvGW1SdU+-4MaHY$J+7+OhUg# zW13!C=sq!a(KmrCt~m#*25}rydmkp`TM1O+?R@}VP^mxZ(#eWDC?fN5kLn&lC!nyxP(Ynru{7`7FhKfe>hjcv~ zDu}JXLI)n-#q6D8Vcq=whU)A+dFRbQWb8wH#Hq;(E2Rf ztDFUPD+|czEEv|3&jLwdw((}HrCev0B|x4;!w?#CXKfS6z6oboi^Xu)$Zr`@DvXqtGYo2+D%KzDfpy*g zfeZ=#mxVSn?q^q~Y4p3MO~kl&LOZqSt*NkKzM*#c?v{z1*wu!8W5WfY_j`D_6IqAM z-e}eha|;QoSzOn?EUH<&oZGE5vhv%qHh4L4Ph+;~)qML=2sR`!R#LrB5VO%pEJrn8 zs|=NKGoN3gC_4f&43Dq=2ebG@^QmvExUoMb`)xY(gHCSUyEt!j!a1q@F`GI$KIq zqK9%m=-u4hK%TGlH`x~o!SOeo-8{4H!Cp#rVk;Gi1|9vD3jz^K#yli-UPd$l(wU?o znKqZL0P&rVa~oya=%~`vayRqVG{$>Z(g^F5#p2~{h9GQtp=W=Ci*!3l*+Ll7l@@j$ zp*P1JULix{(R^J5w!V>eKADaxAo$(oKP6*85qyY&I@Ld;recR>4Ayg7W|`mpNC;9s zNbv168mln@W^X6beex5KYk4Xj=BH4d(OTpy)kh9DU4&}AkuBjufCbo>@1sqx}1zQZGVVsJ^Va*+1*0 z0!W+}KpEzFj7$}>#IIrroBa22UYK!5IJfpy${>I6U4fA@7h2nYmjh9!=((RwSo$gY z>f;1*rDDMqbWiAd)9H5Dy4Y#d^`juuo*#whwFW+k9I>tmaUT3{=@CUGS~H!5*7C_~ zP%c5shTl82nBF_F7&PXko$B5w1U?0?>kdBr?lyP=j|J9Is zf{A1(?N#`rx{daRc#!}DVD3EU@bMSJe`~%U`JIvQ+nkIW= z0sq6I!2AjKMgF<8ORNCO!DaJA_JM~C1?P|+(y@Ep?K3=RsVt>l@&;40+VZ0GRPPKN@8R9@$m|JB-g05$cs>)r}d1!*cE z0wO}_AV^nw6Qo0E(u?#SskN5tH$UAv|;QU3AQ<$6g@Gs9qTc4esb-=ydGEw?0t9tjLSEsd4gT-P`G9B|AP}km%1eey#Fwe!89M6-RfAEFGj=(AhHOdy zno4v}x|{AJ(l(@KKHrePyH_kR_Snb-$Z1A?Qo|kYtpyhN;zkOiLy{zNoUq21g%d?; zCap(9lFCgDx~V!EXFTPI4{zIL=B)u&viI(<&WTsQd*vnBFxJIss)R>vU0n=a9*i;E zY$rq>uuprU?LKH8lgejk(0Eqa^u*bq0`tjo;!xwN{ZcJ&xE}2FEKDsH>I?lt^Awcx zG5Ckw_8AkH_kk0CCfD>a)rRAJj>CuhOz}PMJ)!q#R=fmg`&dw^V5vm17cwC&BbC?d z8x7exw}FGRmHAJ0`TE>xf5lzIFg@8;1>iyA!jI~$-7RM}4c2v}kL>ipiq*wcKIXW@ zNm^)XIP}leW7*WKUPWQn%4?i(H%*bibmt`l4 zI%=aNvwyT5z^T`eqT?nvAG4-Z7fu+hw#gpwybedD_fdPGx?sVQtt+Q7}2C5V^IH)`bB6$jG=Xjo6 zaEsmLfn4n@pm|mK(m<^lozlq6YEU78M~mp;REmfZ&QDUv(46*xgCI)mP^w}ru1C|74D~d3(Ru!i1BWfKzrtFY00yd?IjBm0m{`dVE5;N zAbT$Z%oCtbZ4%L^GmPZW)P9Rwet}4+cb8Z+a-})>AlbV#TthMrmX*O6WHwwJJ+X$W zZr59nssxv?r*b5yYLQ0f(z)FSOjQKcWv_pAQF*MZv|HwnJEWqpiY(R*n&DbQ`IB%fMD<;uWx zgqLxICy^-19Z7*pMnNpYB$H86^d@`@++!TVOTQndI*-01x+yK2iZyV0Dk0L!k{xKf zhRI))Sam;6vo{sYp~>1sw|w_kj+^18IldW_nD0yN% zj31*s(vSed>K8pRo|!K7WG}Y7*-NWt1c!g2(y!JwQgB9dP=G3TNr( zk}~%532A5P8pqgD#cAi8k|J z#-f|7+Zr=INr)wv?~^{wJDS#+^RSQE zODi0>`JMiHUe9&z65phnm*(0n7R{_t{+3_(7~5mqUe!ac3WBrOnBo?sbstsJ9=AjR z;aCKjpacg=7~@p2&gHfjPu4@$e~WYdYneH)EHx5_SK{2B;Z&K;Y8tJ%NWc}M1ABaY zngL1|DQW9kx)h%~$&qiu#T{Sdb$qaT!dzmhiO`37K7P)jjbFpF-TPO*Q)IBks{ z_wxio-i+ie2uqk`9ZHJUBw<(3&qCez$M;yE5UUUt@rpwqG^?y*XDi<$0wdM@NH)(7 zGFm6zFj*$%CPeA2AOsQuJ9a+cJo#T`iC-Hd;HL(!<^SLqjb2lp;{nmf#ZQr3nvZ*4tf))A)6IaTbPVG5CF09 zYRjI}?5i4zWmD%lZJe^x*QJXLAC$#x~36@&w1vJO? z8YsdsQI;hVBXWjA_m$(Smy@}re|V6{_7IwMxZ(*G^HsmWQw}agJaGPglCb*jn2$6y zii!=(HhZ8%7-%ySCeXq3+rj(uO%V}{SxHARW0%O>UXx8nVOC@Py;mQ}W;e@xa+?yv z%82?X-~Iu6Dk$jn&S_g6db-De?U`^}%iBlyG;2@2l)#(c@M}LGK?83x>kg3@`ZYV+ z3OXwhBkQETkOx5_KFC(-$tYc zIw&4jBN~=n;d%P%>!Q((Nqg}lnj3>5M;BIRGgbX`6fEcmt|d8&@d^Zqv$8hgk|b*Q zvU&i}m}9uya12uIvp=yLD%s+0VW6#}fK>aP5r@`0Busl?(`EQMUE#EcohY9roP#>;+~usuLpmz zel-DC>oa-?*ce~YW@^*)LC$Nd%<8_CA2lJi1SPUQT3E+PzhhAcc!rJ6B~z$KlCN2+ zj7ik2*xVQ;f%!XzZ#}CuqT-FkR>+oQ-3JtKHjECJwP_dDr1P<>8yf_BdwalM25f{o z-8T5XnSc){@Wp1|hnclchaKw;<4U?$z+;g<)4jw9cVxUt*WOx8UBroe@Q3Q4S0e8J zR!X-^s`t~JyzvbHQehV}Lh@gZz0^Dy?%+h7e~q>?Fzmr?t(Cm#0xxUNO-$Z3R=3iZ zOw|zpa%@+as-M%$qo^zf^kvZQjlg}P>o&?Rd%c8ga&HpJoH)GnW@Pa}B!-e)6{%dSZzF|eWkn=65}aa;#U=l|iw*eaGrfiv$YllI`&y%Cd zib4JJ&p5EL0=BmbD}M1CR&j*CsR(gJjM4rSCV70%+WX@WYoE%Ci0OlgUH3Qd0oi^* zs_7cS)u6aB-0*}m0suDEzs|~0uI7~2lr6T4vx;r&%E6e286q`m9-bqS`2PVp5^}sDtHz z0PhK%%+(}Wt}U*ylA`vs;5K+Hi%GK7Vj`oeyxi&f_K(JU2S65dnKp4*S`l0Ze;jFTyS6^NYl`U#?bFeEn~>sEqUIk2 z!rk5`PC+h5rS8fy^dQKniKK`E^9&dbC^ej(v0vTVT<-+t*&9dU>otkQs~7w{EuHl) z(VDLpuGOKUA#mcC4wWqRot?gSk=6M6u`M%BjlZ}(M{m6fB%Z1P5OjWZ~LQSRoM(&x)peQCi!PvBi_n%?!U{i~1vGZ@no z>=-r6=2bItJJJqfMm9m-P+wDHB_M znLv&sWu`oNYlSlKbu5DOp)VVYv;iG6Iqv>9fsn-|U0v+d0&9CdH$GO3fi6)OBmE-P zrtuLSIL6Ot78=66@|*?m6{s9D;(i9vV&A>xy&BKD?0ZVij-b>nl)E^E1E&q3vR)Fvl!;h=3TPu1)yP=z~=1^k6T^=G)B(LWUA zg^Mc+vQD4!+wBChh+@P8VPw49h$1v`w{SM0W_mfL>E#b6D`O}a%KBC>Xkdz}s-!;%f&DFx*I&V3A!OI|tw3A~+#5G31!kjWF=e)pZ!2&VhvuGhf0d zX0vlTJI+7R?-K;t^xosfPVsDRD|J-Jmdx*$ZXQ}i`}@B4Tq9*?P?7g&=7gHX>RiiG zm0#QYFC1rTy(t=XOn#&D_>?dIkah6Dt->607yC^AM3kyx56u=Ei(wmVM!qodlq)RMzWG!iP)v9v-8U)ve z{pyvNT%lB*2L59{0pM&>!k11jR@peBSEs;D)9&u3QE{9MuHLwR7|c2RD;cDFdc^5+ zSRIoG713-r9DnNAe;EM#*w>FCixn~z$~U9gv11Sqg~(7P7!F_eTbBatsH2UkX-X(l zCoxR&QFLizf6Ww)gO*a|=AiUydt(w{P_jb_S=+{KkmDRb7`$Hss)P8OQnrs+Li8SNFNNB?jcf zB(T|ahGM+4v55PjTEwb-d|45_d5a<@-4pTh`u(DoOo|PxK*M#2b(e4%PW19}OEqmc5n_-_yiS zl*>O@cs`c@@r-hBW|ItygDyFYyb;F9(7)47-{b!D+CqD2tU)Al{3fS|c;(zvTtMy- z)U1o4MwamiZ>D{R%)~-a4}($#i%I(!^CH#q@P9EPg=)y6AdPkjZM{+*gUC`N#5CUs zw_jPKtI@-DKbC_>5|w>DRV3V7BLHf8p;&vDctfLYh+W`cv9bcQa)K)jdm-rUE!c*R zkf`X*gDQM7ZYJ+A@L4^SJtW$j1N;T~PH$HAsmA;!_qWI09@R7Pu!7@WF*2wrD8tCK z(hevXvX}>I6ydm`?cYs-Ti19LKcWY~NXU;g0KBobGfW;8dHwn74=^Ci@E!`$i4EJ7 zndmY>WZYe$PqAqrKU=(}{V2_-JYM+Yqt~I?U&9Mr0^v;#-}1o7WaJHn@x0+8IDKZHh%K;e}8kcGxg zd<{(^PHg0dA@cZd3fFq~T-n$IL>aZ)Xiep42Az%UkN4VMoI(9gR>c=;n8w`%=5C++ zjrQoi&NKPu{PaOV{;QA{>6aiaXh zWa=I-+X%&m>178KU|gfm5#83+@nK2ViFxNCp5%rdj#XuG4`M4~vmmvNY|jOvUK@3r z_D)FUDPb3MlFo@h6$wN@x5FeAAH9!{2Q{I?4$T>tR-G-MkMOiNk39$^%K@Ld9;>%} zs&}#WByn*YwW^31de9(BEnh9Ms1jgTVVTVc9tB;xzkv>TnyHnR0e}|YZSy?Aa=eO- z>4p|j7dkIj^gRQu*LHS5;6={z?&^f(%_Q^_E`0(rj;f9u4sW+0$;g%$lOowKtL|xZ z>?PY9-XAmIFv8?h)p;>rw6^rIt-~E?nQNzgP0hrQ*4?!MjQdblfeq{l;gxH`93s|2 zS=p-ts)|lUD05}rC9&0#3%waGgJwV`QsFu-<}&kYUQg_XN9N&v9ga*xBE zWWUqI_XYgJl_%nk_k}9F{1aX4ut>N4oNWLthVlvtzat%rcv*vZ%QwVYvPdW{C861w zop$q#;NfqQ`*R+-vE2GQX+I+uYv^j+^9v&-Bdin;&O$ft;np8#l#nu`h|^4-E>{^A z@uk5{qSb(^G${#%_Tfk07HXqI1(s``ZM5eCq*iFgSs_hBX0zMQ ze~1E$Pe5QAhTb)feneFx-mqMO_f<~PPd}C!gjrX7;Pm=FB}z3Vd*n~1p{`g zOR0D}YUfQ24X~{_#jjF6>^4jF^ml$kl)G@nJ9V?)8XX6$(hxn+*_9#0)C4euZ1Dny zkYQqgeA~p1{`pat2d50JM_ll8O)tbF6z!4`N;UsrsJEK4*d5sGQcW_9Rkx{U2E^IP zi!LrVPurdasrfk-@U0TaA_0Uv9*sX7B#+S>P6cL&r-rtSUKyZp1&GpYnkx~~*u+xF z1JTX6YUeQc6l{J^wWz+M<_W*$;y_c=$E)`9NFO2>tz3{QZ;iLgp-h=d4!B4U%%)S5 zlK`;B=2Lzbj_7JY6RVzMGsBU6(m@!wbV!Q6aCb&qOXt*F9wT@f*VE)8lXk0;Z_Hnk zkS+mhdqoVe7=JN+g^TnrM)$1Z<}2l%+OUecr%6W^jWb~6vW?%aGXuB>W7?~nJi19p zra=|FkpmRZeP71Hn)UQms`>2{zjnwz#rOA@DXF(Y@WeJfY|&~ealo$F>on1D7hLoN zz*E3Vzr0zvTBL}M&KJEoAV4!+#~^$@m?N^yA$w$07ktZFL)|j_kN=$lWelW>DnKCow~hF_?+s5F)5PzfryDLO%f5{r3m z^&=xr$r{=^RF6s>zwyStLKP0cK^B&u9<$HUV)Aly)N`L?IYwI@d{1!^=yW96)_Y~? zMnV;UTGWwKX6-bi+w{0{0RfeoSx!Ic@{I((GSo<^z8c4*dgP-rF43=fzmq0BrhMd! zSI(md0*Cq=04iFK#_WnQ%43|t-zSRND|n*qbA2Kg7Wu-lc-NQK<2HTO6tx|yB~Y!X zq~gV^LU798sr?zu?yr?sf`7vL)W2g-zLCqnxmQ~=BdtXQaO+D*8(pl9<=polG&oD` zxXpB??;B{gzu0#GP@fuTQLf_?%U5mUA$`tb9yRF|3wu|NsZQrQCbki}n`F!0de_CFl!5Km+3d@gYC)pf*BOxO^vO1@zw#|<~>GAV{LYq24I+-m=ei6!F7B@*jhqO;Uo7Ps(-fC`!~;t`mP z`nz5EA*H68b zC8U>K<;If#fUN&mx`!JAjuX=>>&ksyA>J!)YR`+g7c+8wO6=UUtB z)mHxIA;`+p3g>3%0tR^}cd8Jh~&NfPi&(Ht&N^ci<185RPGro|F@jQPLZpO)^cmY;Lx$9!9#wWH7LnXGcrE z*SXd5r`gG6#MD;`W~dGAINDXOW)3#4p#iV;B5pL0bHJG!trV{fLWr1ZSkfWTl^sl6 z9Po8E`2Nt5T7%!KD2QwhX%u;u)=E6{UemT`YtoO#-77y$_VYy_3UmK;Kx6I6%AyGP zWh$qhw@Nt}0^}3Gn8V*chmCgS@ClpOGIlH+p9MZuG=Hh7J-ej~wVA08PPKIgj4hF# zIA#x#PmsgUvMfHJN0X;o=;)G`5}?Y!sT(Xb(?!t;rLMSoHCl2P@2;VN{l$)1SG}vK z;mWL|QR{gH@V<0}3Fl-&i)8o>H16G%3RCxd&FcYVB{h;itmj>FOPKNFfC6BJUJ{a^^+72*}# z-|DRFZSit2;7oqSY^Oa{cpiAG-e1(+$>e~VxLZX&vRDKScbLD`6TG7-DaqhN>&h1A z%rOTu!D+P{o@~?#Q4BSph{uyw##XlSu_9mgc+U zKFszg%UZ*ey=`l!8*zO`BT|vsX?LfBYT>bX*^u{N0>T&86Wl=qnLgrYSOQyr)!f0 zsr0!HJ2}dV4VzGklFdDa!VM&5raNrVFF%Cl={Vx*h2N-e{^D3Dml+*o#D+N>m?A3I zjbx|FE1rfGt&yyDog1h-NN*ZZbC(^JQ2;$I#Mhs|z93%bO2cR>xE_=I)S{Pks%RSt zhCqpQ*V?N@(yiC)8;%?``Vj|F;`GTY`Afmj=6U7df@f${G?CXIv8J!XY&es* zP~$Tl_C~QagFO^f{ADFsKg%%ceebMQ`D~42x=Pm)j{(M9g&C+y1aJ5RY@+ZEXL|>D z7XDH){0(-}9_~yF0+06ep%a_z>1S9n^U0tq^R$uAW}&1MM!^%ooDW=GKcM zr{L3Bm&1p?Q@kSH@oM4`K$*!J4qnJXgJG;(KJdFNVq_ZNnYbxUe2vn&wqoPoENb7Q zn2SXIT|Fwfe?Z)ep5*!mKjNtB*^4?==n5N)TFWLhxFt@kp)GUm)=NPMME1_kqNw3n zO-uc)~2|J%eDearT(ehX06-|gNZ(Ff3*sku5uS8eUj zE?U^1+b3Jm$EO0^&L%Ey1k>s59ZlP7DF70Evvq#4@03S^ZoGCf#v#gy&4caavJhC7 zGEb|uBPyyB;PGd5vNX?XjDoGj(dZ13*(M#9QIQEtEzqvb7kb0VzJ>S@1{qTpAs*Rm{dZ^s`#Anzs7tjBsm=}n}9GID< zm1?6qb6 zLjYF%S_Cl=nU9pxk0a;&=}L5Ls=_3%z2&9>t5A05c-=mFMAnf_lW{CCWYgt5TVLxywlOP)m)$@}5| z;)ueL4w=-HLN3n!9HBnVJnb2cc5gmr@jy)rRhm>r8(b5=du_kw1*`|4k;lqOJLrcJ zw(%*z_<=Y107`VlAeX$Aiv?(w9t&ds%c1|ChyU+9@_&uT)zkkw{v3PBKJ1=Y^YDc` za2OVMSv_|PQ+G=db5~2?=^7soA1@~lFDEar7LTw9uYd^8a}W=Y2oDeDP4Mggse^-) zg{>9j|J)&xL}U->@a*avY|YI@%$=MeuC~@T?$@OM{p#^6XJ>oM$BK^TTpW-2cz9oM wtK5DC^r8889}7!2b5~nucUvdNYcjTO&i1Ad8rqa5;3L-*WmRP=q}~Sq2RF(BlK=n! literal 0 HcmV?d00001 diff --git a/vendor/github.com/rs/zerolog/sampler.go b/vendor/github.com/rs/zerolog/sampler.go new file mode 100644 index 000000000..1be98c4f9 --- /dev/null +++ b/vendor/github.com/rs/zerolog/sampler.go @@ -0,0 +1,134 @@ +package zerolog + +import ( + "math/rand" + "sync/atomic" + "time" +) + +var ( + // Often samples log every ~ 10 events. + Often = RandomSampler(10) + // Sometimes samples log every ~ 100 events. + Sometimes = RandomSampler(100) + // Rarely samples log every ~ 1000 events. + Rarely = RandomSampler(1000) +) + +// Sampler defines an interface to a log sampler. +type Sampler interface { + // Sample returns true if the event should be part of the sample, false if + // the event should be dropped. + Sample(lvl Level) bool +} + +// RandomSampler use a PRNG to randomly sample an event out of N events, +// regardless of their level. +type RandomSampler uint32 + +// Sample implements the Sampler interface. +func (s RandomSampler) Sample(lvl Level) bool { + if s <= 0 { + return false + } + if rand.Intn(int(s)) != 0 { + return false + } + return true +} + +// BasicSampler is a sampler that will send every Nth events, regardless of +// their level. +type BasicSampler struct { + N uint32 + counter uint32 +} + +// Sample implements the Sampler interface. +func (s *BasicSampler) Sample(lvl Level) bool { + n := s.N + if n == 1 { + return true + } + c := atomic.AddUint32(&s.counter, 1) + return c%n == 1 +} + +// BurstSampler lets Burst events pass per Period then pass the decision to +// NextSampler. If Sampler is not set, all subsequent events are rejected. +type BurstSampler struct { + // Burst is the maximum number of event per period allowed before calling + // NextSampler. + Burst uint32 + // Period defines the burst period. If 0, NextSampler is always called. + Period time.Duration + // NextSampler is the sampler used after the burst is reached. If nil, + // events are always rejected after the burst. + NextSampler Sampler + + counter uint32 + resetAt int64 +} + +// Sample implements the Sampler interface. +func (s *BurstSampler) Sample(lvl Level) bool { + if s.Burst > 0 && s.Period > 0 { + if s.inc() <= s.Burst { + return true + } + } + if s.NextSampler == nil { + return false + } + return s.NextSampler.Sample(lvl) +} + +func (s *BurstSampler) inc() uint32 { + now := time.Now().UnixNano() + resetAt := atomic.LoadInt64(&s.resetAt) + var c uint32 + if now > resetAt { + c = 1 + atomic.StoreUint32(&s.counter, c) + newResetAt := now + s.Period.Nanoseconds() + reset := atomic.CompareAndSwapInt64(&s.resetAt, resetAt, newResetAt) + if !reset { + // Lost the race with another goroutine trying to reset. + c = atomic.AddUint32(&s.counter, 1) + } + } else { + c = atomic.AddUint32(&s.counter, 1) + } + return c +} + +// LevelSampler applies a different sampler for each level. +type LevelSampler struct { + TraceSampler, DebugSampler, InfoSampler, WarnSampler, ErrorSampler Sampler +} + +func (s LevelSampler) Sample(lvl Level) bool { + switch lvl { + case TraceLevel: + if s.TraceSampler != nil { + return s.TraceSampler.Sample(lvl) + } + case DebugLevel: + if s.DebugSampler != nil { + return s.DebugSampler.Sample(lvl) + } + case InfoLevel: + if s.InfoSampler != nil { + return s.InfoSampler.Sample(lvl) + } + case WarnLevel: + if s.WarnSampler != nil { + return s.WarnSampler.Sample(lvl) + } + case ErrorLevel: + if s.ErrorSampler != nil { + return s.ErrorSampler.Sample(lvl) + } + } + return true +} diff --git a/vendor/github.com/rs/zerolog/syslog.go b/vendor/github.com/rs/zerolog/syslog.go new file mode 100644 index 000000000..c40828307 --- /dev/null +++ b/vendor/github.com/rs/zerolog/syslog.go @@ -0,0 +1,80 @@ +// +build !windows +// +build !binary_log + +package zerolog + +import ( + "io" +) + +// See http://cee.mitre.org/language/1.0-beta1/clt.html#syslog +// or https://www.rsyslog.com/json-elasticsearch/ +const ceePrefix = "@cee:" + +// SyslogWriter is an interface matching a syslog.Writer struct. +type SyslogWriter interface { + io.Writer + Debug(m string) error + Info(m string) error + Warning(m string) error + Err(m string) error + Emerg(m string) error + Crit(m string) error +} + +type syslogWriter struct { + w SyslogWriter + prefix string +} + +// SyslogLevelWriter wraps a SyslogWriter and call the right syslog level +// method matching the zerolog level. +func SyslogLevelWriter(w SyslogWriter) LevelWriter { + return syslogWriter{w, ""} +} + +// SyslogCEEWriter wraps a SyslogWriter with a SyslogLevelWriter that adds a +// MITRE CEE prefix for JSON syslog entries, compatible with rsyslog +// and syslog-ng JSON logging support. +// See https://www.rsyslog.com/json-elasticsearch/ +func SyslogCEEWriter(w SyslogWriter) LevelWriter { + return syslogWriter{w, ceePrefix} +} + +func (sw syslogWriter) Write(p []byte) (n int, err error) { + var pn int + if sw.prefix != "" { + pn, err = sw.w.Write([]byte(sw.prefix)) + if err != nil { + return pn, err + } + } + n, err = sw.w.Write(p) + return pn + n, err +} + +// WriteLevel implements LevelWriter interface. +func (sw syslogWriter) WriteLevel(level Level, p []byte) (n int, err error) { + switch level { + case TraceLevel: + case DebugLevel: + err = sw.w.Debug(sw.prefix + string(p)) + case InfoLevel: + err = sw.w.Info(sw.prefix + string(p)) + case WarnLevel: + err = sw.w.Warning(sw.prefix + string(p)) + case ErrorLevel: + err = sw.w.Err(sw.prefix + string(p)) + case FatalLevel: + err = sw.w.Emerg(sw.prefix + string(p)) + case PanicLevel: + err = sw.w.Crit(sw.prefix + string(p)) + case NoLevel: + err = sw.w.Info(sw.prefix + string(p)) + default: + panic("invalid level") + } + // Any CEE prefix is not part of the message, so we don't include its length + n = len(p) + return +} diff --git a/vendor/github.com/rs/zerolog/writer.go b/vendor/github.com/rs/zerolog/writer.go new file mode 100644 index 000000000..26f5e6325 --- /dev/null +++ b/vendor/github.com/rs/zerolog/writer.go @@ -0,0 +1,154 @@ +package zerolog + +import ( + "bytes" + "io" + "path" + "runtime" + "strconv" + "strings" + "sync" +) + +// LevelWriter defines as interface a writer may implement in order +// to receive level information with payload. +type LevelWriter interface { + io.Writer + WriteLevel(level Level, p []byte) (n int, err error) +} + +type levelWriterAdapter struct { + io.Writer +} + +func (lw levelWriterAdapter) WriteLevel(l Level, p []byte) (n int, err error) { + return lw.Write(p) +} + +type syncWriter struct { + mu sync.Mutex + lw LevelWriter +} + +// SyncWriter wraps w so that each call to Write is synchronized with a mutex. +// This syncer can be used to wrap the call to writer's Write method if it is +// not thread safe. Note that you do not need this wrapper for os.File Write +// operations on POSIX and Windows systems as they are already thread-safe. +func SyncWriter(w io.Writer) io.Writer { + if lw, ok := w.(LevelWriter); ok { + return &syncWriter{lw: lw} + } + return &syncWriter{lw: levelWriterAdapter{w}} +} + +// Write implements the io.Writer interface. +func (s *syncWriter) Write(p []byte) (n int, err error) { + s.mu.Lock() + defer s.mu.Unlock() + return s.lw.Write(p) +} + +// WriteLevel implements the LevelWriter interface. +func (s *syncWriter) WriteLevel(l Level, p []byte) (n int, err error) { + s.mu.Lock() + defer s.mu.Unlock() + return s.lw.WriteLevel(l, p) +} + +type multiLevelWriter struct { + writers []LevelWriter +} + +func (t multiLevelWriter) Write(p []byte) (n int, err error) { + for _, w := range t.writers { + if _n, _err := w.Write(p); err == nil { + n = _n + if _err != nil { + err = _err + } else if _n != len(p) { + err = io.ErrShortWrite + } + } + } + return n, err +} + +func (t multiLevelWriter) WriteLevel(l Level, p []byte) (n int, err error) { + for _, w := range t.writers { + if _n, _err := w.WriteLevel(l, p); err == nil { + n = _n + if _err != nil { + err = _err + } else if _n != len(p) { + err = io.ErrShortWrite + } + } + } + return n, err +} + +// MultiLevelWriter creates a writer that duplicates its writes to all the +// provided writers, similar to the Unix tee(1) command. If some writers +// implement LevelWriter, their WriteLevel method will be used instead of Write. +func MultiLevelWriter(writers ...io.Writer) LevelWriter { + lwriters := make([]LevelWriter, 0, len(writers)) + for _, w := range writers { + if lw, ok := w.(LevelWriter); ok { + lwriters = append(lwriters, lw) + } else { + lwriters = append(lwriters, levelWriterAdapter{w}) + } + } + return multiLevelWriter{lwriters} +} + +// TestingLog is the logging interface of testing.TB. +type TestingLog interface { + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Helper() +} + +// TestWriter is a writer that writes to testing.TB. +type TestWriter struct { + T TestingLog + + // Frame skips caller frames to capture the original file and line numbers. + Frame int +} + +// NewTestWriter creates a writer that logs to the testing.TB. +func NewTestWriter(t TestingLog) TestWriter { + return TestWriter{T: t} +} + +// Write to testing.TB. +func (t TestWriter) Write(p []byte) (n int, err error) { + t.T.Helper() + + n = len(p) + + // Strip trailing newline because t.Log always adds one. + p = bytes.TrimRight(p, "\n") + + // Try to correct the log file and line number to the caller. + if t.Frame > 0 { + _, origFile, origLine, _ := runtime.Caller(1) + _, frameFile, frameLine, ok := runtime.Caller(1 + t.Frame) + if ok { + erase := strings.Repeat("\b", len(path.Base(origFile))+len(strconv.Itoa(origLine))+3) + t.T.Logf("%s%s:%d: %s", erase, path.Base(frameFile), frameLine, p) + return n, err + } + } + t.T.Log(string(p)) + + return n, err +} + +// ConsoleTestWriter creates an option that correctly sets the file frame depth for testing.TB log. +func ConsoleTestWriter(t TestingLog) func(w *ConsoleWriter) { + return func(w *ConsoleWriter) { + w.Out = TestWriter{T: t, Frame: 6} + } +} diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go new file mode 100644 index 000000000..30f632c57 --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -0,0 +1,136 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semaphore provides a weighted semaphore implementation. +package semaphore // import "golang.org/x/sync/semaphore" + +import ( + "container/list" + "context" + "sync" +) + +type waiter struct { + n int64 + ready chan<- struct{} // Closed when semaphore acquired. +} + +// NewWeighted creates a new weighted semaphore with the given +// maximum combined weight for concurrent access. +func NewWeighted(n int64) *Weighted { + w := &Weighted{size: n} + return w +} + +// Weighted provides a way to bound concurrent access to a resource. +// The callers can request access with a given weight. +type Weighted struct { + size int64 + cur int64 + mu sync.Mutex + waiters list.List +} + +// Acquire acquires the semaphore with a weight of n, blocking until resources +// are available or ctx is done. On success, returns nil. On failure, returns +// ctx.Err() and leaves the semaphore unchanged. +// +// If ctx is already done, Acquire may still succeed without blocking. +func (s *Weighted) Acquire(ctx context.Context, n int64) error { + s.mu.Lock() + if s.size-s.cur >= n && s.waiters.Len() == 0 { + s.cur += n + s.mu.Unlock() + return nil + } + + if n > s.size { + // Don't make other Acquire calls block on one that's doomed to fail. + s.mu.Unlock() + <-ctx.Done() + return ctx.Err() + } + + ready := make(chan struct{}) + w := waiter{n: n, ready: ready} + elem := s.waiters.PushBack(w) + s.mu.Unlock() + + select { + case <-ctx.Done(): + err := ctx.Err() + s.mu.Lock() + select { + case <-ready: + // Acquired the semaphore after we were canceled. Rather than trying to + // fix up the queue, just pretend we didn't notice the cancelation. + err = nil + default: + isFront := s.waiters.Front() == elem + s.waiters.Remove(elem) + // If we're at the front and there're extra tokens left, notify other waiters. + if isFront && s.size > s.cur { + s.notifyWaiters() + } + } + s.mu.Unlock() + return err + + case <-ready: + return nil + } +} + +// TryAcquire acquires the semaphore with a weight of n without blocking. +// On success, returns true. On failure, returns false and leaves the semaphore unchanged. +func (s *Weighted) TryAcquire(n int64) bool { + s.mu.Lock() + success := s.size-s.cur >= n && s.waiters.Len() == 0 + if success { + s.cur += n + } + s.mu.Unlock() + return success +} + +// Release releases the semaphore with a weight of n. +func (s *Weighted) Release(n int64) { + s.mu.Lock() + s.cur -= n + if s.cur < 0 { + s.mu.Unlock() + panic("semaphore: released more than held") + } + s.notifyWaiters() + s.mu.Unlock() +} + +func (s *Weighted) notifyWaiters() { + for { + next := s.waiters.Front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value.(waiter) + if s.size-s.cur < w.n { + // Not enough tokens for the next waiter. We could keep going (to try to + // find a waiter with a smaller request), but under load that could cause + // starvation for large requests; instead, we leave all remaining waiters + // blocked. + // + // Consider a semaphore used as a read-write lock, with N tokens, N + // readers, and one writer. Each reader can Acquire(1) to obtain a read + // lock. The writer can Acquire(N) to obtain a write lock, excluding all + // of the readers. If we allow the readers to jump ahead in the queue, + // the writer will starve — there is always one token available for every + // reader. + break + } + + s.cur += w.n + s.waiters.Remove(next) + close(w.ready) + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 5f47f8f34..615b562e7 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -32,6 +32,25 @@ github.com/cenkalti/backoff/v4 # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 +# github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995 +## explicit; go 1.18 +github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 +# github.com/cloudevents/sdk-go/v2 v2.14.0 +## explicit; go 1.17 +github.com/cloudevents/sdk-go/v2 +github.com/cloudevents/sdk-go/v2/binding +github.com/cloudevents/sdk-go/v2/binding/format +github.com/cloudevents/sdk-go/v2/binding/spec +github.com/cloudevents/sdk-go/v2/client +github.com/cloudevents/sdk-go/v2/context +github.com/cloudevents/sdk-go/v2/event +github.com/cloudevents/sdk-go/v2/event/datacodec +github.com/cloudevents/sdk-go/v2/event/datacodec/json +github.com/cloudevents/sdk-go/v2/event/datacodec/text +github.com/cloudevents/sdk-go/v2/event/datacodec/xml +github.com/cloudevents/sdk-go/v2/protocol +github.com/cloudevents/sdk-go/v2/protocol/http +github.com/cloudevents/sdk-go/v2/types # github.com/coreos/go-semver v0.3.1 ## explicit; go 1.8 github.com/coreos/go-semver/semver @@ -45,6 +64,10 @@ github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew +# github.com/eclipse/paho.golang v0.11.0 +## explicit; go 1.15 +github.com/eclipse/paho.golang/packets +github.com/eclipse/paho.golang/paho # github.com/emicklei/go-restful/v3 v3.9.0 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 @@ -161,6 +184,9 @@ github.com/google/pprof/profile # github.com/google/uuid v1.3.0 ## explicit github.com/google/uuid +# github.com/gorilla/websocket v1.5.0 +## explicit; go 1.12 +github.com/gorilla/websocket # github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 ## explicit github.com/grpc-ecosystem/go-grpc-prometheus @@ -189,6 +215,12 @@ github.com/json-iterator/go github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter +# github.com/mattn/go-colorable v0.1.12 +## explicit; go 1.13 +github.com/mattn/go-colorable +# github.com/mattn/go-isatty v0.0.14 +## explicit; go 1.12 +github.com/mattn/go-isatty # github.com/matttproud/golang_protobuf_extensions v1.0.4 ## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil @@ -198,6 +230,14 @@ github.com/mitchellh/copystructure # github.com/mitchellh/reflectwalk v1.0.2 ## explicit github.com/mitchellh/reflectwalk +# github.com/mochi-mqtt/server/v2 v2.3.0 +## explicit; go 1.19 +github.com/mochi-mqtt/server/v2 +github.com/mochi-mqtt/server/v2/hooks/auth +github.com/mochi-mqtt/server/v2/hooks/storage +github.com/mochi-mqtt/server/v2/listeners +github.com/mochi-mqtt/server/v2/packets +github.com/mochi-mqtt/server/v2/system # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd ## explicit github.com/modern-go/concurrent @@ -385,6 +425,14 @@ github.com/prometheus/procfs/internal/util # github.com/robfig/cron v1.2.0 ## explicit github.com/robfig/cron +# github.com/rs/xid v1.4.0 +## explicit; go 1.12 +github.com/rs/xid +# github.com/rs/zerolog v1.28.0 +## explicit; go 1.15 +github.com/rs/zerolog +github.com/rs/zerolog/internal/cbor +github.com/rs/zerolog/internal/json # github.com/shopspring/decimal v1.2.0 ## explicit; go 1.13 github.com/shopspring/decimal @@ -554,6 +602,7 @@ golang.org/x/oauth2 golang.org/x/oauth2/internal # golang.org/x/sync v0.5.0 ## explicit; go 1.18 +golang.org/x/sync/semaphore golang.org/x/sync/singleflight # golang.org/x/sys v0.13.0 ## explicit; go 1.17 @@ -1443,7 +1492,7 @@ open-cluster-management.io/addon-framework/pkg/index open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration open-cluster-management.io/addon-framework/pkg/manager/controllers/addonowner open-cluster-management.io/addon-framework/pkg/utils -# open-cluster-management.io/api v0.12.1-0.20231124100313-881401342553 +# open-cluster-management.io/api v0.12.1-0.20231130134655-97a8a92a7f30 ## explicit; go 1.19 open-cluster-management.io/api/addon/v1alpha1 open-cluster-management.io/api/client/addon/clientset/versioned @@ -1502,6 +1551,19 @@ open-cluster-management.io/api/client/work/informers/externalversions/work/v1 open-cluster-management.io/api/client/work/informers/externalversions/work/v1alpha1 open-cluster-management.io/api/client/work/listers/work/v1 open-cluster-management.io/api/client/work/listers/work/v1alpha1 +open-cluster-management.io/api/cloudevents/generic +open-cluster-management.io/api/cloudevents/generic/options +open-cluster-management.io/api/cloudevents/generic/options/mqtt +open-cluster-management.io/api/cloudevents/generic/payload +open-cluster-management.io/api/cloudevents/generic/types +open-cluster-management.io/api/cloudevents/work +open-cluster-management.io/api/cloudevents/work/agent/client +open-cluster-management.io/api/cloudevents/work/agent/codec +open-cluster-management.io/api/cloudevents/work/agent/handler +open-cluster-management.io/api/cloudevents/work/internal +open-cluster-management.io/api/cloudevents/work/payload +open-cluster-management.io/api/cloudevents/work/utils +open-cluster-management.io/api/cloudevents/work/watcher open-cluster-management.io/api/cluster/v1 open-cluster-management.io/api/cluster/v1alpha1 open-cluster-management.io/api/cluster/v1beta1 @@ -1509,8 +1571,10 @@ open-cluster-management.io/api/cluster/v1beta2 open-cluster-management.io/api/crdsv1beta1 open-cluster-management.io/api/feature open-cluster-management.io/api/operator/v1 +open-cluster-management.io/api/utils/work/v1/utils open-cluster-management.io/api/utils/work/v1/workapplier open-cluster-management.io/api/utils/work/v1/workbuilder +open-cluster-management.io/api/utils/work/v1/workvalidator open-cluster-management.io/api/work/v1 open-cluster-management.io/api/work/v1alpha1 # sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 diff --git a/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml b/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml index e08074e70..9f2cc65ec 100644 --- a/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml +++ b/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml @@ -152,7 +152,7 @@ spec: anyOf: - type: integer - type: string - default: "0" + default: 0 description: MaxFailures is a percentage or number of clusters in the current rollout that can fail before proceeding to the next rollout. MaxFailures @@ -249,7 +249,7 @@ spec: anyOf: - type: integer - type: string - default: "0" + default: 0 description: MaxFailures is a percentage or number of clusters in the current rollout that can fail before proceeding to the next rollout. MaxFailures @@ -335,7 +335,7 @@ spec: anyOf: - type: integer - type: string - default: "0" + default: 0 description: MaxFailures is a percentage or number of clusters in the current rollout that can fail before proceeding to the next rollout. MaxFailures diff --git a/vendor/open-cluster-management.io/api/cloudevents/generic/agentclient.go b/vendor/open-cluster-management.io/api/cloudevents/generic/agentclient.go new file mode 100644 index 000000000..95691d660 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/generic/agentclient.go @@ -0,0 +1,300 @@ +package generic + +import ( + "context" + "fmt" + "strconv" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + "k8s.io/klog/v2" + + "open-cluster-management.io/api/cloudevents/generic/options" + "open-cluster-management.io/api/cloudevents/generic/payload" + "open-cluster-management.io/api/cloudevents/generic/types" +) + +// CloudEventAgentClient is a client for an agent to resync/send/receive its resources with cloud events. +// +// An agent is a component that handles the deployment of requested resources on the managed cluster and status report +// to the source. +type CloudEventAgentClient[T ResourceObject] struct { + *baseClient + lister Lister[T] + codecs map[types.CloudEventsDataType]Codec[T] + statusHashGetter StatusHashGetter[T] + agentID string + clusterName string +} + +// NewCloudEventAgentClient returns an instance for CloudEventAgentClient. The following arguments are required to +// create a client. +// - agentOptions provides the clusterName and agentID and the cloudevents clients that are based on different event +// protocols for sending/receiving the cloudevents. +// - lister gets the resources from a cache/store of an agent. +// - statusHashGetter calculates the resource status hash. +// - codecs is list of codecs for encoding/decoding a resource objet/cloudevent to/from a cloudevent/resource objet. +func NewCloudEventAgentClient[T ResourceObject]( + ctx context.Context, + agentOptions *options.CloudEventsAgentOptions, + lister Lister[T], + statusHashGetter StatusHashGetter[T], + codecs ...Codec[T], +) (*CloudEventAgentClient[T], error) { + baseClient := &baseClient{ + cloudEventsOptions: agentOptions.CloudEventsOptions, + cloudEventsRateLimiter: NewRateLimiter(agentOptions.EventRateLimit), + } + + if err := baseClient.connect(ctx); err != nil { + return nil, err + } + + evtCodes := make(map[types.CloudEventsDataType]Codec[T]) + for _, codec := range codecs { + evtCodes[codec.EventDataType()] = codec + } + + return &CloudEventAgentClient[T]{ + baseClient: baseClient, + lister: lister, + codecs: evtCodes, + statusHashGetter: statusHashGetter, + agentID: agentOptions.AgentID, + clusterName: agentOptions.ClusterName, + }, nil +} + +// Resync the resources spec by sending a spec resync request from an agent to all sources. +func (c *CloudEventAgentClient[T]) Resync(ctx context.Context) error { + // list the resource objects that are maintained by the current agent from all sources + objs, err := c.lister.List(types.ListOptions{ClusterName: c.clusterName, Source: types.SourceAll}) + if err != nil { + return err + } + + resources := &payload.ResourceVersionList{Versions: make([]payload.ResourceVersion, len(objs))} + for i, obj := range objs { + resourceVersion, err := strconv.ParseInt(obj.GetResourceVersion(), 10, 64) + if err != nil { + return err + } + + resources.Versions[i] = payload.ResourceVersion{ + ResourceID: string(obj.GetUID()), + ResourceVersion: resourceVersion, + } + } + + // only resync the resources whose event data type is registered + for eventDataType := range c.codecs { + eventType := types.CloudEventsType{ + CloudEventsDataType: eventDataType, + SubResource: types.SubResourceSpec, + Action: types.ResyncRequestAction, + } + + evt := types.NewEventBuilder(c.agentID, eventType).WithClusterName(c.clusterName).NewEvent() + if err := evt.SetData(cloudevents.ApplicationJSON, resources); err != nil { + return fmt.Errorf("failed to set data to cloud event: %v", err) + } + + if err := c.publish(ctx, evt); err != nil { + return err + } + } + + return nil +} + +// Publish a resource status from an agent to a source. +func (c *CloudEventAgentClient[T]) Publish(ctx context.Context, eventType types.CloudEventsType, obj T) error { + codec, ok := c.codecs[eventType.CloudEventsDataType] + if !ok { + return fmt.Errorf("failed to find a codec for event %s", eventType.CloudEventsDataType) + } + + if eventType.SubResource != types.SubResourceStatus { + return fmt.Errorf("unsupported event eventType %s", eventType) + } + + evt, err := codec.Encode(c.agentID, eventType, obj) + if err != nil { + return err + } + + if err := c.publish(ctx, *evt); err != nil { + return err + } + + return nil +} + +// Subscribe the events that are from the source status resync request or source resource spec request. +// For status resync request, agent publish the current resources status back as response. +// For resource spec request, agent receives resource spec and handles the spec with resource handlers. +func (c *CloudEventAgentClient[T]) Subscribe(ctx context.Context, handlers ...ResourceHandler[T]) { + c.subscribe(ctx, func(ctx context.Context, evt cloudevents.Event) { + c.receive(ctx, evt, handlers...) + }) +} + +func (c *CloudEventAgentClient[T]) receive(ctx context.Context, evt cloudevents.Event, handlers ...ResourceHandler[T]) { + klog.V(4).Infof("Received event:\n%s", evt) + + eventType, err := types.ParseCloudEventsType(evt.Type()) + if err != nil { + klog.Errorf("failed to parse cloud event type %s, %v", evt.Type(), err) + return + } + + if eventType.Action == types.ResyncRequestAction { + if eventType.SubResource != types.SubResourceStatus { + klog.Warningf("unsupported resync event type %s, ignore", eventType) + return + } + + if err := c.respondResyncStatusRequest(ctx, eventType.CloudEventsDataType, evt); err != nil { + klog.Errorf("failed to resync manifestsstatus, %v", err) + } + + return + } + + if eventType.SubResource != types.SubResourceSpec { + klog.Warningf("unsupported event type %s, ignore", eventType) + return + } + + codec, ok := c.codecs[eventType.CloudEventsDataType] + if !ok { + klog.Warningf("failed to find the codec for event %s, ignore", eventType.CloudEventsDataType) + return + } + + obj, err := codec.Decode(&evt) + if err != nil { + klog.Errorf("failed to decode spec, %v", err) + return + } + + action, err := c.specAction(evt.Source(), obj) + if err != nil { + klog.Errorf("failed to generate spec action %s, %v", evt, err) + return + } + + if len(action) == 0 { + // no action is required, ignore + return + } + + for _, handler := range handlers { + if err := handler(action, obj); err != nil { + klog.Errorf("failed to handle spec event %s, %v", evt, err) + } + } +} + +// Upon receiving the status resync event, the agent responds by sending resource status events to the broker as +// follows: +// - If the event payload is empty, the agent returns the status of all resources it maintains. +// - If the event payload is not empty, the agent retrieves the resource with the specified ID and compares the +// received resource status hash with the current resource status hash. If they are not equal, the agent sends the +// resource status message. +func (c *CloudEventAgentClient[T]) respondResyncStatusRequest( + ctx context.Context, eventDataType types.CloudEventsDataType, evt cloudevents.Event) error { + objs, err := c.lister.List(types.ListOptions{ClusterName: c.clusterName, Source: evt.Source()}) + if err != nil { + return err + } + + statusHashes, err := payload.DecodeStatusResyncRequest(evt) + if err != nil { + return err + } + + eventType := types.CloudEventsType{ + CloudEventsDataType: eventDataType, + SubResource: types.SubResourceStatus, + Action: types.ResyncResponseAction, + } + + if len(statusHashes.Hashes) == 0 { + // publish all resources status + for _, obj := range objs { + if err := c.Publish(ctx, eventType, obj); err != nil { + return err + } + } + + return nil + } + + for _, obj := range objs { + lastHash, ok := findStatusHash(string(obj.GetUID()), statusHashes.Hashes) + if !ok { + // ignore the resource that is not on the source, but exists on the agent, wait for the source deleting it + klog.Infof("The resource %s is not found from the source, ignore", obj.GetUID()) + continue + } + + currentHash, err := c.statusHashGetter(obj) + if err != nil { + continue + } + + if currentHash == lastHash { + // the status is not changed, do nothing + continue + } + + if err := c.Publish(ctx, eventType, obj); err != nil { + return err + } + } + + return nil +} + +func (c *CloudEventAgentClient[T]) specAction(source string, obj T) (evt types.ResourceAction, err error) { + objs, err := c.lister.List(types.ListOptions{ClusterName: c.clusterName, Source: source}) + if err != nil { + return evt, err + } + + lastObj, exists := getObj(string(obj.GetUID()), objs) + if !exists { + return types.Added, nil + } + + if !obj.GetDeletionTimestamp().IsZero() { + return types.Deleted, nil + } + + if obj.GetResourceVersion() == lastObj.GetResourceVersion() { + return evt, nil + } + + return types.Modified, nil +} + +func getObj[T ResourceObject](resourceID string, objs []T) (obj T, exists bool) { + for _, obj := range objs { + if string(obj.GetUID()) == resourceID { + return obj, true + } + } + + return obj, false +} + +func findStatusHash(id string, hashes []payload.ResourceStatusHash) (string, bool) { + for _, hash := range hashes { + if id == hash.ResourceID { + return hash.StatusHash, true + } + } + + return "", false +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/generic/baseclient.go b/vendor/open-cluster-management.io/api/cloudevents/generic/baseclient.go new file mode 100644 index 000000000..be9e6b7fd --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/generic/baseclient.go @@ -0,0 +1,209 @@ +package generic + +import ( + "context" + "fmt" + "sync" + "time" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog/v2" + "k8s.io/utils/clock" + + "open-cluster-management.io/api/cloudevents/generic/options" +) + +const ( + restartReceiverSignal = iota + stopReceiverSignal +) + +type receiveFn func(ctx context.Context, evt cloudevents.Event) + +type baseClient struct { + sync.RWMutex + cloudEventsOptions options.CloudEventsOptions + cloudEventsClient cloudevents.Client + cloudEventsRateLimiter flowcontrol.RateLimiter + receiverChan chan int +} + +func (c *baseClient) connect(ctx context.Context) error { + var err error + c.cloudEventsClient, err = c.cloudEventsOptions.Client(ctx) + if err != nil { + return err + } + + // start a go routine to handle cloudevents client connection errors + go func() { + var err error + + // the reconnect backoff will stop at [1,5) min interval. If we don't backoff for 10min, we reset the backoff. + connBackoffManager := wait.NewExponentialBackoffManager(5*time.Second, 1*time.Minute, 10*time.Minute, 5.0, 1.0, &clock.RealClock{}) + cloudEventsClient := c.cloudEventsClient + + for { + if cloudEventsClient == nil { + klog.V(4).Infof("reconnecting the cloudevents client") + cloudEventsClient, err = c.cloudEventsOptions.Client(ctx) + // TODO enhance the cloudevents SKD to avoid wrapping the error type to distinguish the net connection + // errors + if err != nil { + // failed to reconnect, try agin + runtime.HandleError(fmt.Errorf("the cloudevents client reconnect failed, %v", err)) + <-connBackoffManager.Backoff().C() + continue + } + + // the cloudevents network connection is back, refresh the current cloudevents client and send the + // receiver restart signal + klog.V(4).Infof("the cloudevents client is reconnected") + c.resetClient(cloudEventsClient) + c.sendReceiverSignal(restartReceiverSignal) + } + + select { + case <-ctx.Done(): + return + case err, ok := <-c.cloudEventsOptions.ErrorChan(): + if !ok { + // error channel is closed, do nothing + return + } + + runtime.HandleError(fmt.Errorf("the cloudevents client is disconnected, %v", err)) + + // the cloudevents client network connection is closed, send the receiver stop signal, set the current + // client to nil and retry + c.sendReceiverSignal(stopReceiverSignal) + + cloudEventsClient = nil + c.resetClient(cloudEventsClient) + + <-connBackoffManager.Backoff().C() + } + } + }() + + return nil +} + +func (c *baseClient) publish(ctx context.Context, evt cloudevents.Event) error { + now := time.Now() + + if err := c.cloudEventsRateLimiter.Wait(ctx); err != nil { + return fmt.Errorf("client rate limiter Wait returned an error: %w", err) + } + + latency := time.Since(now) + if latency > longThrottleLatency { + klog.Warningf(fmt.Sprintf("Waited for %v due to client-side throttling, not priority and fairness, request: %s", + latency, evt)) + } + + sendingCtx, err := c.cloudEventsOptions.WithContext(ctx, evt.Context) + if err != nil { + return err + } + + klog.V(4).Infof("Sent event: %v\n%s", ctx, evt) + + // make sure the current client is the newest + c.RLock() + defer c.RUnlock() + + if c.cloudEventsClient == nil { + return fmt.Errorf("the cloudevents client is not ready") + } + + if result := c.cloudEventsClient.Send(sendingCtx, evt); cloudevents.IsUndelivered(result) { + return fmt.Errorf("failed to send event %s, %v", evt, result) + } + + return nil +} + +func (c *baseClient) subscribe(ctx context.Context, receive receiveFn) { + c.Lock() + defer c.Unlock() + + // make sure there is only one subscription go routine starting for one client. + if c.receiverChan != nil { + klog.Warningf("the subscription has already started") + return + } + + c.receiverChan = make(chan int) + + // start a go routine to handle cloudevents subscription + go func() { + receiverCtx, receiverCancel := context.WithCancel(context.TODO()) + cloudEventsClient := c.cloudEventsClient + + for { + if cloudEventsClient != nil { + // TODO send a resync request + + go func() { + if err := cloudEventsClient.StartReceiver(receiverCtx, func(evt cloudevents.Event) { + receive(receiverCtx, evt) + }); err != nil { + runtime.HandleError(fmt.Errorf("failed to receive cloudevents, %v", err)) + } + }() + } + + select { + case <-ctx.Done(): + receiverCancel() + close(c.receiverChan) + return + case signal, ok := <-c.receiverChan: + if !ok { + // receiver channel is closed, stop the receiver + receiverCancel() + return + } + + switch signal { + case restartReceiverSignal: + klog.V(4).Infof("restart the cloudevents receiver") + // make sure the current client is the newest + c.RLock() + cloudEventsClient = c.cloudEventsClient + c.RUnlock() + + // rebuild the receiver context + receiverCtx, receiverCancel = context.WithCancel(context.TODO()) + case stopReceiverSignal: + klog.V(4).Infof("stop the cloudevents receiver") + receiverCancel() + cloudEventsClient = nil + default: + runtime.HandleError(fmt.Errorf("unknown receiver signal %d", signal)) + } + } + } + }() +} + +func (c *baseClient) resetClient(client cloudevents.Client) { + c.Lock() + defer c.Unlock() + + c.cloudEventsClient = client +} + +func (c *baseClient) sendReceiverSignal(signal int) { + c.RLock() + defer c.RUnlock() + + if c.receiverChan != nil { + c.receiverChan <- signal + } +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/generic/interface.go b/vendor/open-cluster-management.io/api/cloudevents/generic/interface.go new file mode 100644 index 000000000..484b99255 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/generic/interface.go @@ -0,0 +1,65 @@ +package generic + +import ( + "context" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubetypes "k8s.io/apimachinery/pkg/types" + + "open-cluster-management.io/api/cloudevents/generic/types" +) + +// ResourceHandler handles the received resource object. +type ResourceHandler[T ResourceObject] func(action types.ResourceAction, obj T) error + +// StatusHashGetter gets the status hash of one resource object. +type StatusHashGetter[T ResourceObject] func(obj T) (string, error) + +type ResourceObject interface { + // GetUID returns the resource ID of this object. The resource ID represents the unique identifier for this object. + // The source should ensure its uniqueness and consistency. + GetUID() kubetypes.UID + + // GetResourceVersion returns the resource version of this object. The resource version is a required int64 sequence + // number property that must be incremented by the source whenever this resource changes. + // The source should guarantee its incremental nature. + GetResourceVersion() string + + // GetDeletionTimestamp returns the deletion timestamp of this object. The deletiontimestamp is an optional + // timestamp property representing the resource is deleting from the source, the agent needs to clean up the + // resource from its cluster. + GetDeletionTimestamp() *metav1.Time +} + +type Lister[T ResourceObject] interface { + // List returns the list of resource objects that are maintained by source/agent. + List(options types.ListOptions) ([]T, error) +} + +type Codec[T ResourceObject] interface { + // EventDataType indicates which type of the event data the codec is used for. + EventDataType() types.CloudEventsDataType + + // Encode a resource object to cloudevents event. + // Each event should have the following extensions: `resourceid`, `resourceversion` and `clustername`. + // The source set the `deletiontimestamp` extension to indicate one resource object is deleting from a source. + // The agent set the `originalsource` extension to indicate one resource belonged to which source. + Encode(source string, eventType types.CloudEventsType, obj T) (*cloudevents.Event, error) + + // Decode a cloudevents event to a resource object. + Decode(event *cloudevents.Event) (T, error) +} + +type CloudEventsClient[T ResourceObject] interface { + // Resync the resources of one source/agent by sending resync request. + Resync(ctx context.Context) error + + // Publish the resources spec/status event to the broker. + Publish(ctx context.Context, eventType types.CloudEventsType, obj T) error + + // Subscribe the resources status/spec event to the broker to receive the resources status/spec and use + // ResourceHandler to handle them. + Subscribe(ctx context.Context, handlers ...ResourceHandler[T]) +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/generic/options/mqtt/agentoptions.go b/vendor/open-cluster-management.io/api/cloudevents/generic/options/mqtt/agentoptions.go new file mode 100644 index 000000000..3f9361870 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/generic/options/mqtt/agentoptions.go @@ -0,0 +1,87 @@ +package mqtt + +import ( + "context" + "fmt" + "strings" + + cloudeventsmqtt "github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2" + cloudevents "github.com/cloudevents/sdk-go/v2" + cloudeventscontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/eclipse/paho.golang/paho" + + "open-cluster-management.io/api/cloudevents/generic/options" + "open-cluster-management.io/api/cloudevents/generic/types" +) + +type mqttAgentOptions struct { + MQTTOptions + errorChan chan error + clusterName string + agentID string +} + +func NewAgentOptions(mqttOptions *MQTTOptions, clusterName, agentID string) *options.CloudEventsAgentOptions { + return &options.CloudEventsAgentOptions{ + CloudEventsOptions: &mqttAgentOptions{ + MQTTOptions: *mqttOptions, + errorChan: make(chan error), + clusterName: clusterName, + agentID: agentID, + }, + AgentID: agentID, + ClusterName: clusterName, + } +} + +func (o *mqttAgentOptions) WithContext(ctx context.Context, evtCtx cloudevents.EventContext) (context.Context, error) { + eventType, err := types.ParseCloudEventsType(evtCtx.GetType()) + if err != nil { + return nil, fmt.Errorf("unsupported event type %s, %v", eventType, err) + } + + if eventType.Action == types.ResyncRequestAction { + // agent publishes event to spec resync topic to request to get resources spec from all sources + topic := strings.Replace(SpecResyncTopic, "+", o.clusterName, -1) + return cloudeventscontext.WithTopic(ctx, topic), nil + } + + // agent publishes event to status topic to send the resource status from a specified cluster + originalSource, err := evtCtx.GetExtension(types.ExtensionOriginalSource) + if err != nil { + return nil, err + } + + statusTopic := strings.Replace(StatusTopic, "+", fmt.Sprintf("%s", originalSource), 1) + statusTopic = strings.Replace(statusTopic, "+", o.clusterName, -1) + return cloudeventscontext.WithTopic(ctx, statusTopic), nil +} + +func (o *mqttAgentOptions) Client(ctx context.Context) (cloudevents.Client, error) { + receiver, err := o.GetCloudEventsClient( + ctx, + fmt.Sprintf("%s-client", o.agentID), + func(err error) { + o.errorChan <- err + }, + cloudeventsmqtt.WithPublish(&paho.Publish{QoS: byte(o.PubQoS)}), + cloudeventsmqtt.WithSubscribe( + &paho.Subscribe{ + Subscriptions: map[string]paho.SubscribeOptions{ + // receiving the resources spec from sources with spec topic + replaceNth(SpecTopic, "+", o.clusterName, 2): {QoS: byte(o.SubQoS)}, + // receiving the resources status resync request from sources with status resync topic + StatusResyncTopic: {QoS: byte(o.SubQoS)}, + }, + }, + ), + ) + if err != nil { + return nil, err + } + return receiver, nil +} + +func (o *mqttAgentOptions) ErrorChan() <-chan error { + return o.errorChan +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/generic/options/mqtt/options.go b/vendor/open-cluster-management.io/api/cloudevents/generic/options/mqtt/options.go new file mode 100644 index 000000000..da9a56c61 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/generic/options/mqtt/options.go @@ -0,0 +1,234 @@ +package mqtt + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "os" + "strings" + + cloudeventsmqtt "github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2" + cloudevents "github.com/cloudevents/sdk-go/v2" + "github.com/eclipse/paho.golang/packets" + "github.com/eclipse/paho.golang/paho" + "gopkg.in/yaml.v2" +) + +const ( + // SpecTopic is a MQTT topic for resource spec. + SpecTopic = "sources/+/clusters/+/spec" + + // StatusTopic is a MQTT topic for resource status. + StatusTopic = "sources/+/clusters/+/status" + + // SpecResyncTopic is a MQTT topic for resource spec resync. + SpecResyncTopic = "sources/clusters/+/specresync" + + // StatusResyncTopic is a MQTT topic for resource status resync. + StatusResyncTopic = "sources/+/clusters/statusresync" +) + +// MQTTOptions holds the options that are used to build MQTT client. +type MQTTOptions struct { + BrokerHost string + Username string + Password string + CAFile string + ClientCertFile string + ClientKeyFile string + KeepAlive uint16 + PubQoS int + SubQoS int +} + +// MQTTConfig holds the information needed to build connect to MQTT broker as a given user. +type MQTTConfig struct { + // BrokerHost is the host of the MQTT broker (hostname:port). + BrokerHost string `json:"brokerHost" yaml:"brokerHost"` + + // Username is the username for basic authentication to connect the MQTT broker. + Username string `json:"username,omitempty" yaml:"username,omitempty"` + // Password is the password for basic authentication to connect the MQTT broker. + Password string `json:"password,omitempty" yaml:"password,omitempty"` + + // CAFile is the file path to a cert file for the MQTT broker certificate authority. + CAFile string `json:"caFile,omitempty" yaml:"caFile,omitempty"` + // ClientCertFile is the file path to a client cert file for TLS. + ClientCertFile string `json:"clientCertFile,omitempty" yaml:"clientCertFile,omitempty"` + // ClientKeyFile is the file path to a client key file for TLS. + ClientKeyFile string `json:"clientKeyFile,omitempty" yaml:"clientKeyFile,omitempty"` + + // KeepAlive is the keep alive time in seconds for MQTT clients, by default is 60s + KeepAlive *uint16 `json:"keepAlive,omitempty" yaml:"keepAlive,omitempty"` + // PubQoS is the QoS for publish, by default is 1 + PubQoS *int `json:"pubQoS,omitempty" yaml:"pubQoS,omitempty"` + // SubQoS is the Qos for subscribe, by default is 1 + SubQoS *int `json:"subQoS,omitempty" yaml:"subQoS,omitempty"` +} + +func NewMQTTOptions() *MQTTOptions { + return &MQTTOptions{ + KeepAlive: 60, + PubQoS: 1, + SubQoS: 1, + } +} + +// BuildMQTTOptionsFromFlags builds configs from a config filepath. +func BuildMQTTOptionsFromFlags(configPath string) (*MQTTOptions, error) { + configData, err := os.ReadFile(configPath) + if err != nil { + return nil, err + } + + config := &MQTTConfig{} + if err := yaml.Unmarshal(configData, config); err != nil { + return nil, err + } + + if config.BrokerHost == "" { + return nil, fmt.Errorf("brokerHost is required") + } + + if (config.ClientCertFile == "" && config.ClientKeyFile != "") || + (config.ClientCertFile != "" && config.ClientKeyFile == "") { + return nil, fmt.Errorf("either both or none of clientCertFile and clientKeyFile must be set") + } + if config.ClientCertFile != "" && config.ClientKeyFile != "" && config.CAFile == "" { + return nil, fmt.Errorf("setting clientCertFile and clientKeyFile requires caFile") + } + + options := &MQTTOptions{ + BrokerHost: config.BrokerHost, + Username: config.Username, + Password: config.Password, + CAFile: config.CAFile, + ClientCertFile: config.ClientCertFile, + ClientKeyFile: config.ClientKeyFile, + KeepAlive: 60, + PubQoS: 1, + SubQoS: 1, + } + + if config.KeepAlive != nil { + options.KeepAlive = *config.KeepAlive + } + + if config.PubQoS != nil { + options.PubQoS = *config.PubQoS + } + + if config.SubQoS != nil { + options.SubQoS = *config.SubQoS + } + + return options, nil +} + +func (o *MQTTOptions) GetNetConn() (net.Conn, error) { + if len(o.CAFile) != 0 { + certPool, err := x509.SystemCertPool() + if err != nil { + return nil, err + } + + caPEM, err := os.ReadFile(o.CAFile) + if err != nil { + return nil, err + } + + if ok := certPool.AppendCertsFromPEM(caPEM); !ok { + return nil, fmt.Errorf("invalid CA %s", o.CAFile) + } + + clientCerts, err := tls.LoadX509KeyPair(o.ClientCertFile, o.ClientKeyFile) + if err != nil { + return nil, err + } + + conn, err := tls.Dial("tcp", o.BrokerHost, &tls.Config{ + RootCAs: certPool, + Certificates: []tls.Certificate{clientCerts}, + }) + if err != nil { + return nil, fmt.Errorf("failed to connect to MQTT broker %s, %v", o.BrokerHost, err) + } + + // ensure parallel writes are thread-Safe + return packets.NewThreadSafeConn(conn), nil + } + + conn, err := net.Dial("tcp", o.BrokerHost) + if err != nil { + return nil, fmt.Errorf("failed to connect to MQTT broker %s, %v", o.BrokerHost, err) + } + + // ensure parallel writes are thread-Safe + return packets.NewThreadSafeConn(conn), nil +} + +func (o *MQTTOptions) GetMQTTConnectOption(clientID string) *paho.Connect { + connect := &paho.Connect{ + ClientID: clientID, + KeepAlive: o.KeepAlive, + CleanStart: true, + } + + if len(o.Username) != 0 { + connect.Username = o.Username + connect.UsernameFlag = true + } + + if len(o.Password) != 0 { + connect.Password = []byte(o.Password) + connect.PasswordFlag = true + } + + return connect +} + +func (o *MQTTOptions) GetCloudEventsClient( + ctx context.Context, + clientID string, + errorHandler func(error), + clientOpts ...cloudeventsmqtt.Option, +) (cloudevents.Client, error) { + netConn, err := o.GetNetConn() + if err != nil { + return nil, err + } + + config := &paho.ClientConfig{ + ClientID: clientID, + Conn: netConn, + OnClientError: errorHandler, + } + + opts := []cloudeventsmqtt.Option{cloudeventsmqtt.WithConnect(o.GetMQTTConnectOption(clientID))} + opts = append(opts, clientOpts...) + protocol, err := cloudeventsmqtt.New(ctx, config, opts...) + if err != nil { + return nil, err + } + + return cloudevents.NewClient(protocol) +} + +// Replace the nth occurrence of old in str by new. +func replaceNth(str, old, new string, n int) string { + i := 0 + for m := 1; m <= n; m++ { + x := strings.Index(str[i:], old) + if x < 0 { + break + } + i += x + if m == n { + return str[:i] + new + str[i+len(old):] + } + i += len(old) + } + return str +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/generic/options/mqtt/sourceoptions.go b/vendor/open-cluster-management.io/api/cloudevents/generic/options/mqtt/sourceoptions.go new file mode 100644 index 000000000..5e4f20898 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/generic/options/mqtt/sourceoptions.go @@ -0,0 +1,83 @@ +package mqtt + +import ( + "context" + "fmt" + "strings" + + cloudeventsmqtt "github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2" + cloudevents "github.com/cloudevents/sdk-go/v2" + cloudeventscontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/eclipse/paho.golang/paho" + + "open-cluster-management.io/api/cloudevents/generic/options" + "open-cluster-management.io/api/cloudevents/generic/types" +) + +type mqttSourceOptions struct { + MQTTOptions + errorChan chan error + sourceID string +} + +func NewSourceOptions(mqttOptions *MQTTOptions, sourceID string) *options.CloudEventsSourceOptions { + return &options.CloudEventsSourceOptions{ + CloudEventsOptions: &mqttSourceOptions{ + MQTTOptions: *mqttOptions, + errorChan: make(chan error), + sourceID: sourceID, + }, + SourceID: sourceID, + } +} + +func (o *mqttSourceOptions) WithContext(ctx context.Context, evtCtx cloudevents.EventContext) (context.Context, error) { + eventType, err := types.ParseCloudEventsType(evtCtx.GetType()) + if err != nil { + return nil, fmt.Errorf("unsupported event type %s, %v", eventType, err) + } + + if eventType.Action == types.ResyncRequestAction { + // source publishes event to status resync topic to request to get resources status from all clusters + return cloudeventscontext.WithTopic(ctx, strings.Replace(StatusResyncTopic, "+", o.sourceID, -1)), nil + } + + clusterName, err := evtCtx.GetExtension(types.ExtensionClusterName) + if err != nil { + return nil, err + } + + // source publishes event to spec topic to send the resource spec to a specified cluster + specTopic := strings.Replace(SpecTopic, "+", o.sourceID, 1) + specTopic = strings.Replace(specTopic, "+", fmt.Sprintf("%s", clusterName), -1) + return cloudeventscontext.WithTopic(ctx, specTopic), nil +} + +func (o *mqttSourceOptions) Client(ctx context.Context) (cloudevents.Client, error) { + receiver, err := o.GetCloudEventsClient( + ctx, + fmt.Sprintf("%s-client", o.sourceID), + func(err error) { + o.errorChan <- err + }, + cloudeventsmqtt.WithPublish(&paho.Publish{QoS: byte(o.PubQoS)}), + cloudeventsmqtt.WithSubscribe( + &paho.Subscribe{ + Subscriptions: map[string]paho.SubscribeOptions{ + // receiving the resources status from agents with status topic + strings.Replace(StatusTopic, "+", o.sourceID, 1): {QoS: byte(o.SubQoS)}, + // receiving the resources spec resync request from agents with spec resync topic + SpecResyncTopic: {QoS: byte(o.SubQoS)}, + }, + }, + ), + ) + if err != nil { + return nil, err + } + return receiver, nil +} + +func (o *mqttSourceOptions) ErrorChan() <-chan error { + return o.errorChan +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/generic/options/options.go b/vendor/open-cluster-management.io/api/cloudevents/generic/options/options.go new file mode 100644 index 000000000..57fbf14e9 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/generic/options/options.go @@ -0,0 +1,66 @@ +package options + +import ( + "context" + + cloudevents "github.com/cloudevents/sdk-go/v2" +) + +// CloudEventsOptions provides cloudevents clients to send/receive cloudevents based on different event protocol. +// +// Available implementations: +// - MQTT +type CloudEventsOptions interface { + // WithContext returns back a new context with the given cloudevent context. The new context will be used when + // sending a cloudevent.The new context is protocol-dependent, for example, for MQTT, the new context should contain + // the MQTT topic, for Kafka, the context should contain the message key, etc. + WithContext(ctx context.Context, evtContext cloudevents.EventContext) (context.Context, error) + + // Client returns a cloudevents client for sending and receiving cloudevents + Client(ctx context.Context) (cloudevents.Client, error) + + // ErrorChan returns a chan which will receive the cloudevents connection error. The source/agent client will try to + // reconnect the when this error occurs. + ErrorChan() <-chan error +} + +// EventRateLimit for limiting the event sending rate. +type EventRateLimit struct { + // QPS indicates the maximum QPS to send the event. + // If it's less than or equal to zero, the DefaultQPS (50) will be used. + QPS float32 + + // Maximum burst for throttle. + // If it's less than or equal to zero, the DefaultBurst (100) will be used. + Burst int +} + +// CloudEventsSourceOptions provides the required options to build a source CloudEventsClient +type CloudEventsSourceOptions struct { + // CloudEventsOptions provides cloudevents clients to send/receive cloudevents based on different event protocol. + CloudEventsOptions CloudEventsOptions + + // SourceID is a unique identifier for a source, for example, it can generate a source ID by hashing the hub cluster + // URL and appending the controller name. Similarly, a RESTful service can select a unique name or generate a unique + // ID in the associated database for its source identification. + SourceID string + + // EventRateLimit limits the event sending rate. + EventRateLimit EventRateLimit +} + +// CloudEventsAgentOptions provides the required options to build an agent CloudEventsClient +type CloudEventsAgentOptions struct { + // CloudEventsOptions provides cloudevents clients to send/receive cloudevents based on different event protocol. + CloudEventsOptions CloudEventsOptions + + // AgentID is a unique identifier for an agent, for example, it can consist of a managed cluster name and an agent + // name. + AgentID string + + // ClusterName is the name of a managed cluster on which the agent runs. + ClusterName string + + // EventRateLimit limits the event sending rate. + EventRateLimit EventRateLimit +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/generic/payload/payload.go b/vendor/open-cluster-management.io/api/cloudevents/generic/payload/payload.go new file mode 100644 index 000000000..cacfcd950 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/generic/payload/payload.go @@ -0,0 +1,48 @@ +package payload + +import ( + "encoding/json" + "fmt" + + cloudevents "github.com/cloudevents/sdk-go/v2" +) + +type ResourceVersion struct { + ResourceID string `json:"resourceID"` + ResourceVersion int64 `json:"resourceVersion"` +} + +type ResourceStatusHash struct { + ResourceID string `json:"resourceID"` + StatusHash string `json:"statusHash"` +} + +// ResourceVersionList represents the resource versions of the resources maintained by the agent. +// The item of this list includes the resource ID and resource version. +type ResourceVersionList struct { + Versions []ResourceVersion `json:"resourceVersions"` +} + +// ResourceStatusHashList represents the status hash of the resources maintained by the source. +// The item of this list includes the resource ID and resource status hash. +type ResourceStatusHashList struct { + Hashes []ResourceStatusHash `json:"statusHashes"` +} + +func DecodeSpecResyncRequest(evt cloudevents.Event) (*ResourceVersionList, error) { + versions := &ResourceVersionList{} + data := evt.Data() + if err := json.Unmarshal(data, versions); err != nil { + return nil, fmt.Errorf("failed to unmarshal spec resync request payload %s, %v", string(data), err) + } + return versions, nil +} + +func DecodeStatusResyncRequest(evt cloudevents.Event) (*ResourceStatusHashList, error) { + hashes := &ResourceStatusHashList{} + data := evt.Data() + if err := json.Unmarshal(data, hashes); err != nil { + return nil, fmt.Errorf("failed to unmarshal status resync request payload %s, %v", string(data), err) + } + return hashes, nil +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/generic/ratelimiter.go b/vendor/open-cluster-management.io/api/cloudevents/generic/ratelimiter.go new file mode 100644 index 000000000..a1061d318 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/generic/ratelimiter.go @@ -0,0 +1,34 @@ +package generic + +import ( + "time" + + "k8s.io/client-go/util/flowcontrol" + + "open-cluster-management.io/api/cloudevents/generic/options" +) + +// longThrottleLatency defines threshold for logging requests. All requests being +// throttled (via the provided rateLimiter) for more than longThrottleLatency will +// be logged. +const longThrottleLatency = 1 * time.Second + +const ( + // TODO we may adjust these after performance test + DefaultQPS float32 = 50.0 + DefaultBurst int = 100 +) + +func NewRateLimiter(limit options.EventRateLimit) flowcontrol.RateLimiter { + qps := limit.QPS + if qps <= 0.0 { + qps = DefaultQPS + } + + burst := limit.Burst + if burst <= 0 { + burst = DefaultBurst + } + + return flowcontrol.NewTokenBucketRateLimiter(qps, burst) +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/generic/sourceclient.go b/vendor/open-cluster-management.io/api/cloudevents/generic/sourceclient.go new file mode 100644 index 000000000..d0dc09f9c --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/generic/sourceclient.go @@ -0,0 +1,309 @@ +package generic + +import ( + "context" + "fmt" + "strconv" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + + "open-cluster-management.io/api/cloudevents/generic/options" + "open-cluster-management.io/api/cloudevents/generic/payload" + "open-cluster-management.io/api/cloudevents/generic/types" +) + +// CloudEventSourceClient is a client for a source to resync/send/receive its resources with cloud events. +// +// A source is a component that runs on a server, it can be a controller on the hub cluster or a RESTful service +// handling resource requests. +type CloudEventSourceClient[T ResourceObject] struct { + *baseClient + lister Lister[T] + codecs map[types.CloudEventsDataType]Codec[T] + statusHashGetter StatusHashGetter[T] + sourceID string +} + +// NewCloudEventSourceClient returns an instance for CloudEventSourceClient. The following arguments are required to +// create a client +// - sourceOptions provides the sourceID and the cloudevents clients that are based on different event protocols for +// sending/receiving the cloudevents. +// - lister gets the resources from a cache/store of a source. +// - statusHashGetter calculates the resource status hash. +// - codecs is list of codecs for encoding/decoding a resource objet/cloudevent to/from a cloudevent/resource objet. +func NewCloudEventSourceClient[T ResourceObject]( + ctx context.Context, + sourceOptions *options.CloudEventsSourceOptions, + lister Lister[T], + statusHashGetter StatusHashGetter[T], + codecs ...Codec[T], +) (*CloudEventSourceClient[T], error) { + baseClient := &baseClient{ + cloudEventsOptions: sourceOptions.CloudEventsOptions, + cloudEventsRateLimiter: NewRateLimiter(sourceOptions.EventRateLimit), + } + + if err := baseClient.connect(ctx); err != nil { + return nil, err + } + + evtCodes := make(map[types.CloudEventsDataType]Codec[T]) + for _, codec := range codecs { + evtCodes[codec.EventDataType()] = codec + } + + return &CloudEventSourceClient[T]{ + baseClient: baseClient, + lister: lister, + codecs: evtCodes, + statusHashGetter: statusHashGetter, + sourceID: sourceOptions.SourceID, + }, nil +} + +// Resync the resources status by sending a status resync request from a source to all clusters. +func (c *CloudEventSourceClient[T]) Resync(ctx context.Context) error { + // list the resource objects that are maintained by the current source from all clusters + objs, err := c.lister.List(types.ListOptions{ClusterName: types.ClusterAll, Source: c.sourceID}) + if err != nil { + return err + } + + hashes := &payload.ResourceStatusHashList{Hashes: make([]payload.ResourceStatusHash, len(objs))} + for i, obj := range objs { + statusHash, err := c.statusHashGetter(obj) + if err != nil { + return err + } + + hashes.Hashes[i] = payload.ResourceStatusHash{ + ResourceID: string(obj.GetUID()), + StatusHash: statusHash, + } + } + + // only resync the resources whose event data type is registered + for eventDataType := range c.codecs { + eventType := types.CloudEventsType{ + CloudEventsDataType: eventDataType, + SubResource: types.SubResourceStatus, + Action: types.ResyncRequestAction, + } + + evt := types.NewEventBuilder(c.sourceID, eventType).NewEvent() + if err := evt.SetData(cloudevents.ApplicationJSON, hashes); err != nil { + return fmt.Errorf("failed to set data to cloud event: %v", err) + } + + if err := c.publish(ctx, evt); err != nil { + return err + } + } + + return nil +} + +// Publish a resource spec from a source to an agent. +func (c *CloudEventSourceClient[T]) Publish(ctx context.Context, eventType types.CloudEventsType, obj T) error { + if eventType.SubResource != types.SubResourceSpec { + return fmt.Errorf("unsupported event eventType %s", eventType) + } + + codec, ok := c.codecs[eventType.CloudEventsDataType] + if !ok { + return fmt.Errorf("failed to find the codec for event %s", eventType.CloudEventsDataType) + } + + evt, err := codec.Encode(c.sourceID, eventType, obj) + if err != nil { + return err + } + + if err := c.publish(ctx, *evt); err != nil { + return err + } + + return nil +} + +// Subscribe the events that are from the agent spec resync request or agent resource status request. +// For spec resync request, source publish the current resources spec back as response. +// For resource status request, source receives resource status and handles the status with resource handlers. +func (c *CloudEventSourceClient[T]) Subscribe(ctx context.Context, handlers ...ResourceHandler[T]) { + c.subscribe(ctx, func(ctx context.Context, evt cloudevents.Event) { + c.receive(ctx, evt, handlers...) + }) +} + +func (c *CloudEventSourceClient[T]) receive(ctx context.Context, evt cloudevents.Event, handlers ...ResourceHandler[T]) { + klog.V(4).Infof("Received event:\n%s", evt) + + eventType, err := types.ParseCloudEventsType(evt.Type()) + if err != nil { + klog.Errorf("failed to parse cloud event type, %v", err) + return + } + + if eventType.Action == types.ResyncRequestAction { + if eventType.SubResource != types.SubResourceSpec { + klog.Warningf("unsupported event type %s, ignore", eventType) + return + } + + if err := c.respondResyncSpecRequest(ctx, eventType.CloudEventsDataType, evt); err != nil { + klog.Errorf("failed to resync resources spec, %v", err) + } + + return + } + + codec, ok := c.codecs[eventType.CloudEventsDataType] + if !ok { + klog.Warningf("failed to find the codec for event %s, ignore", eventType.CloudEventsDataType) + return + } + + if eventType.SubResource != types.SubResourceStatus { + klog.Warningf("unsupported event type %s, ignore", eventType) + return + } + + clusterName, err := evt.Context.GetExtension(types.ExtensionClusterName) + if err != nil { + klog.Errorf("failed to find cluster name, %v", err) + return + } + + obj, err := codec.Decode(&evt) + if err != nil { + klog.Errorf("failed to decode status, %v", err) + return + } + + action, err := c.statusAction(fmt.Sprintf("%s", clusterName), obj) + if err != nil { + klog.Errorf("failed to generate status event %s, %v", evt, err) + return + } + + if len(action) == 0 { + // no action is required, ignore + return + } + + for _, handler := range handlers { + if err := handler(action, obj); err != nil { + klog.Errorf("failed to handle status event %s, %v", evt, err) + } + } +} + +// Upon receiving the spec resync event, the source responds by sending resource status events to the broker as follows: +// - If the request event message is empty, the source returns all resources associated with the work agent. +// - If the request event message contains resource IDs and versions, the source retrieves the resource with the +// specified ID and compares the versions. +// - If the requested resource version matches the source's current maintained resource version, the source does not +// resend the resource. +// - If the requested resource version is older than the source's current maintained resource version, the source +// sends the resource. +func (c *CloudEventSourceClient[T]) respondResyncSpecRequest( + ctx context.Context, evtDataType types.CloudEventsDataType, evt cloudevents.Event) error { + resourceVersions, err := payload.DecodeSpecResyncRequest(evt) + if err != nil { + return err + } + + eventType := types.CloudEventsType{ + CloudEventsDataType: evtDataType, + SubResource: types.SubResourceSpec, + Action: types.ResyncResponseAction, + } + + clusterName, err := evt.Context.GetExtension(types.ExtensionClusterName) + if err != nil { + return err + } + + objs, err := c.lister.List(types.ListOptions{ClusterName: fmt.Sprintf("%s", clusterName), Source: c.sourceID}) + if err != nil { + return err + } + + for _, obj := range objs { + lastResourceVersion := findResourceVersion(string(obj.GetUID()), resourceVersions.Versions) + currentResourceVersion, err := strconv.ParseInt(obj.GetResourceVersion(), 10, 64) + if err != nil { + continue + } + + if currentResourceVersion > lastResourceVersion { + if err := c.Publish(ctx, eventType, obj); err != nil { + return err + } + } + } + + // the resources do not exist on the source, but exist on the agent, delete them + for _, rv := range resourceVersions.Versions { + _, exists := getObj(rv.ResourceID, objs) + if exists { + continue + } + + // send a delete event for the current resource + evt := types.NewEventBuilder(c.sourceID, eventType). + WithResourceID(rv.ResourceID). + WithResourceVersion(rv.ResourceVersion). + WithClusterName(fmt.Sprintf("%s", clusterName)). + WithDeletionTimestamp(metav1.Now().Time). + NewEvent() + if err := c.publish(ctx, evt); err != nil { + return err + } + } + + return nil +} + +func (c *CloudEventSourceClient[T]) statusAction(clusterName string, obj T) (evt types.ResourceAction, err error) { + objs, err := c.lister.List(types.ListOptions{ClusterName: clusterName, Source: c.sourceID}) + if err != nil { + return evt, err + } + + lastObj, exists := getObj(string(obj.GetUID()), objs) + if !exists { + return evt, nil + } + + lastStatusHash, err := c.statusHashGetter(lastObj) + if err != nil { + klog.Warningf("failed to hash object %s status, %v", lastObj.GetUID(), err) + return evt, err + } + + currentStatusHash, err := c.statusHashGetter(obj) + if err != nil { + klog.Warningf("failed to hash object %s status, %v", obj.GetUID(), err) + return evt, nil + } + + if lastStatusHash == currentStatusHash { + return evt, nil + } + + return types.StatusModified, nil +} + +func findResourceVersion(id string, versions []payload.ResourceVersion) int64 { + for _, version := range versions { + if id == version.ResourceID { + return version.ResourceVersion + } + } + + return 0 +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/generic/types/types.go b/vendor/open-cluster-management.io/api/cloudevents/generic/types/types.go new file mode 100644 index 000000000..d3453f5fa --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/generic/types/types.go @@ -0,0 +1,227 @@ +package types + +import ( + "fmt" + "strings" + "time" + + cloudevents "github.com/cloudevents/sdk-go/v2" + "github.com/google/uuid" +) + +const ( + // ClusterAll is the default argument to specify on a context when you want to list or filter resources across all + // managed clusters. + ClusterAll = "" + + // SourceAll is the default argument to specify on a context when you want to list or filter resources across all + // sources. + SourceAll = "" +) + +// EventSubResource describes the subresource of a cloud event. Only `spec` and `status` are supported. +type EventSubResource string + +const ( + // SubResourceSpec represents the cloud event data is from the resource spec. + SubResourceSpec EventSubResource = "spec" + + // SubResourceSpec represents the cloud event data is from the resource status. + SubResourceStatus EventSubResource = "status" +) + +// EventAction describes the expected action of a cloud event. +type EventAction string + +const ( + // ResyncRequestAction represents the cloud event is for the resync request. + ResyncRequestAction EventAction = "resync_request" + + // ResyncRequestAction represents the cloud event is for the resync response. + ResyncResponseAction EventAction = "resync_response" +) + +const ( + // ExtensionResourceID is the cloud event extension key of the resource ID. + ExtensionResourceID = "resourceid" + + // ExtensionResourceVersion is the cloud event extension key of the resource version. + ExtensionResourceVersion = "resourceversion" + + // ExtensionDeletionTimestamp is the cloud event extension key of the deletion timestamp. + ExtensionDeletionTimestamp = "deletiontimestamp" + + // ExtensionClusterName is the cloud event extension key of the cluster name. + ExtensionClusterName = "clustername" + + // ExtensionOriginalSource is the cloud event extension key of the original source. + ExtensionOriginalSource = "originalsource" +) + +// ResourceAction represents an action on a resource object on the source or agent. +type ResourceAction string + +const ( + // Added represents a resource is added on the source part. + Added ResourceAction = "ADDED" + + // Modified represents a resource is modified on the source part. + Modified ResourceAction = "MODIFIED" + + // StatusModified represents the status of a resource is modified on the agent part. + StatusModified ResourceAction = "STATUSMODIFIED" + + // Deleted represents a resource is deleted from the source prat. + Deleted ResourceAction = "DELETED" +) + +// ListOptions is the query options for listing the resource objects from the source/agent. +type ListOptions struct { + // Source use the cluster name to restrict the list of returned objects by their cluster name. + // Defaults to all clusters. + ClusterName string + + // Agent use the source ID to restrict the list of returned objects by their source ID. + // Defaults to all sources. + Source string +} + +// CloudEventsDataType uniquely identifies the type of cloud event data. +type CloudEventsDataType struct { + Group string + Version string + Resource string +} + +func (t CloudEventsDataType) String() string { + return fmt.Sprintf("%s.%s.%s", t.Group, t.Version, t.Resource) +} + +// CloudEventsType represents the type of cloud events, which describes the type of cloud event data. +type CloudEventsType struct { + // CloudEventsDataType uniquely identifies the type of cloud event data. + CloudEventsDataType + + // SubResource represents the cloud event data is from the resource spec or status. + SubResource EventSubResource + + // Action represents the expected action for this cloud event. + Action EventAction +} + +func (t CloudEventsType) String() string { + return fmt.Sprintf("%s.%s.%s.%s.%s", t.Group, t.Version, t.Resource, t.SubResource, t.Action) +} + +// ParseCloudEventsDataType parse the cloud event data type to a struct object. +// The type format is `..`. +func ParseCloudEventsDataType(cloudEventsDataType string) (*CloudEventsDataType, error) { + types := strings.Split(cloudEventsDataType, ".") + length := len(types) + if length < 3 { + return nil, fmt.Errorf("unsupported cloudevents data type format") + } + return &CloudEventsDataType{ + Group: strings.Join(types[0:length-2], "."), + Version: types[length-2], + Resource: types[length-1], + }, nil +} + +// ParseCloudEventsType parse the cloud event type to a struct object. +// The type format is `....`. +// The `` must be one of "spec" and "status". +func ParseCloudEventsType(cloudEventsType string) (*CloudEventsType, error) { + types := strings.Split(cloudEventsType, ".") + length := len(types) + if length < 5 { + return nil, fmt.Errorf("unsupported cloudevents type format") + } + + subResource := EventSubResource(types[length-2]) + if subResource != SubResourceSpec && subResource != SubResourceStatus { + return nil, fmt.Errorf("unsupported subresource %s", subResource) + } + + return &CloudEventsType{ + CloudEventsDataType: CloudEventsDataType{ + Group: strings.Join(types[0:length-4], "."), + Version: types[length-4], + Resource: types[length-3], + }, + SubResource: subResource, + Action: EventAction(types[length-1]), + }, nil +} + +type EventBuilder struct { + source string + clusterName string + originalSource string + resourceID string + resourceVersion *int64 + eventType CloudEventsType + deletionTimestamp time.Time +} + +func NewEventBuilder(source string, eventType CloudEventsType) *EventBuilder { + return &EventBuilder{ + source: source, + eventType: eventType, + } +} + +func (b *EventBuilder) WithResourceID(resourceID string) *EventBuilder { + b.resourceID = resourceID + return b +} + +func (b *EventBuilder) WithResourceVersion(resourceVersion int64) *EventBuilder { + b.resourceVersion = &resourceVersion + return b +} + +func (b *EventBuilder) WithClusterName(clusterName string) *EventBuilder { + b.clusterName = clusterName + return b +} + +func (b *EventBuilder) WithOriginalSource(originalSource string) *EventBuilder { + b.originalSource = originalSource + return b +} + +func (b *EventBuilder) WithDeletionTimestamp(timestamp time.Time) *EventBuilder { + b.deletionTimestamp = timestamp + return b +} + +func (b *EventBuilder) NewEvent() cloudevents.Event { + evt := cloudevents.NewEvent() + evt.SetID(uuid.New().String()) + evt.SetType(b.eventType.String()) + evt.SetTime(time.Now()) + evt.SetSource(b.source) + + if len(b.resourceID) != 0 { + evt.SetExtension(ExtensionResourceID, b.resourceID) + } + + if b.resourceVersion != nil { + evt.SetExtension(ExtensionResourceVersion, *b.resourceVersion) + } + + if len(b.clusterName) != 0 { + evt.SetExtension(ExtensionClusterName, b.clusterName) + } + + if len(b.originalSource) != 0 { + evt.SetExtension(ExtensionOriginalSource, b.originalSource) + } + + if !b.deletionTimestamp.IsZero() { + evt.SetExtension(ExtensionDeletionTimestamp, b.deletionTimestamp) + } + + return evt +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/agent/client/manifestwork.go b/vendor/open-cluster-management.io/api/cloudevents/work/agent/client/manifestwork.go new file mode 100644 index 000000000..54261d88f --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/agent/client/manifestwork.go @@ -0,0 +1,171 @@ +package client + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + kubetypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog/v2" + + workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" + workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" + "open-cluster-management.io/api/cloudevents/generic" + "open-cluster-management.io/api/cloudevents/generic/types" + "open-cluster-management.io/api/cloudevents/work/agent/codec" + "open-cluster-management.io/api/cloudevents/work/utils" + "open-cluster-management.io/api/cloudevents/work/watcher" + workv1 "open-cluster-management.io/api/work/v1" +) + +const ManifestsDeleted = "Deleted" + +const ( + UpdateRequestAction = "update_request" + DeleteRequestAction = "delete_request" +) + +// ManifestWorkAgentClient implements the ManifestWorkInterface. It sends the manifestworks status back to source by +// CloudEventAgentClient. +type ManifestWorkAgentClient struct { + cloudEventsClient *generic.CloudEventAgentClient[*workv1.ManifestWork] + watcher *watcher.ManifestWorkWatcher + lister workv1lister.ManifestWorkNamespaceLister +} + +var manifestWorkGR = schema.GroupResource{Group: workv1.GroupName, Resource: "manifestworks"} + +var _ workv1client.ManifestWorkInterface = &ManifestWorkAgentClient{} + +func NewManifestWorkAgentClient(cloudEventsClient *generic.CloudEventAgentClient[*workv1.ManifestWork], watcher *watcher.ManifestWorkWatcher) *ManifestWorkAgentClient { + return &ManifestWorkAgentClient{ + cloudEventsClient: cloudEventsClient, + watcher: watcher, + } +} + +func (c *ManifestWorkAgentClient) SetLister(lister workv1lister.ManifestWorkNamespaceLister) { + c.lister = lister +} + +func (c *ManifestWorkAgentClient) Create(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.CreateOptions) (*workv1.ManifestWork, error) { + return nil, errors.NewMethodNotSupported(manifestWorkGR, "create") +} + +func (c *ManifestWorkAgentClient) Update(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.UpdateOptions) (*workv1.ManifestWork, error) { + return nil, errors.NewMethodNotSupported(manifestWorkGR, "update") +} + +func (c *ManifestWorkAgentClient) UpdateStatus(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.UpdateOptions) (*workv1.ManifestWork, error) { + return nil, errors.NewMethodNotSupported(manifestWorkGR, "updatestatus") +} + +func (c *ManifestWorkAgentClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return errors.NewMethodNotSupported(manifestWorkGR, "delete") +} + +func (c *ManifestWorkAgentClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + return errors.NewMethodNotSupported(manifestWorkGR, "deletecollection") +} + +func (c *ManifestWorkAgentClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*workv1.ManifestWork, error) { + klog.V(4).Infof("getting manifestwork %s", name) + return c.lister.Get(name) +} + +func (c *ManifestWorkAgentClient) List(ctx context.Context, opts metav1.ListOptions) (*workv1.ManifestWorkList, error) { + klog.V(4).Infof("sync manifestworks") + // send resync request to fetch manifestworks from source when the ManifestWorkInformer starts + if err := c.cloudEventsClient.Resync(ctx); err != nil { + return nil, err + } + + return &workv1.ManifestWorkList{}, nil +} + +func (c *ManifestWorkAgentClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + // TODO (skeeey) consider resync the manifestworks when the ManifestWorkInformer reconnected + return c.watcher, nil +} + +func (c *ManifestWorkAgentClient) Patch(ctx context.Context, name string, pt kubetypes.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *workv1.ManifestWork, err error) { + klog.V(4).Infof("patching manifestwork %s", name) + + lastWork, err := c.lister.Get(name) + if err != nil { + return nil, err + } + + patchedWork, err := utils.Patch(pt, lastWork, data) + if err != nil { + return nil, err + } + + eventDataType, err := types.ParseCloudEventsDataType(patchedWork.Annotations[codec.CloudEventsDataTypeAnnotationKey]) + if err != nil { + return nil, err + } + + eventType := types.CloudEventsType{ + CloudEventsDataType: *eventDataType, + SubResource: types.SubResourceStatus, + } + + newWork := patchedWork.DeepCopy() + + statusUpdated, err := isStatusUpdate(subresources) + if err != nil { + return nil, err + } + + if statusUpdated { + eventType.Action = UpdateRequestAction + if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil { + return nil, err + } + + // refresh the work status in the ManifestWorkInformer local cache with patched work. + c.watcher.Receive(watch.Event{Type: watch.Modified, Object: newWork}) + return newWork, nil + } + + // the finalizers of a deleting manifestwork are removed, marking the manifestwork status to deleted and sending + // it back to source + if !newWork.DeletionTimestamp.IsZero() && len(newWork.Finalizers) == 0 { + meta.SetStatusCondition(&newWork.Status.Conditions, metav1.Condition{ + Type: ManifestsDeleted, + Status: metav1.ConditionTrue, + Reason: "ManifestsDeleted", + Message: fmt.Sprintf("The manifests are deleted from the cluster %s", newWork.Namespace), + }) + + eventType.Action = DeleteRequestAction + if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil { + return nil, err + } + + // delete the manifestwork from the ManifestWorkInformer local cache. + c.watcher.Receive(watch.Event{Type: watch.Deleted, Object: newWork}) + return newWork, nil + } + + // refresh the work in the ManifestWorkInformer local cache with patched work. + c.watcher.Receive(watch.Event{Type: watch.Modified, Object: newWork}) + return newWork, nil +} + +func isStatusUpdate(subresources []string) (bool, error) { + if len(subresources) == 0 { + return false, nil + } + + if len(subresources) == 1 && subresources[0] == "status" { + return true, nil + } + + return false, fmt.Errorf("unsupported subresources %v", subresources) +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/agent/codec/manifest.go b/vendor/open-cluster-management.io/api/cloudevents/work/agent/codec/manifest.go new file mode 100644 index 000000000..515bb1962 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/agent/codec/manifest.go @@ -0,0 +1,184 @@ +package codec + +import ( + "fmt" + "strconv" + + cloudevents "github.com/cloudevents/sdk-go/v2" + cloudeventstypes "github.com/cloudevents/sdk-go/v2/types" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + kubetypes "k8s.io/apimachinery/pkg/types" + + "open-cluster-management.io/api/cloudevents/generic/types" + "open-cluster-management.io/api/cloudevents/work/payload" + "open-cluster-management.io/api/utils/work/v1/utils" + "open-cluster-management.io/api/utils/work/v1/workvalidator" + workv1 "open-cluster-management.io/api/work/v1" +) + +const ( + // CloudEventsDataTypeAnnotationKey is the key of the cloudevents data type annotation. + CloudEventsDataTypeAnnotationKey = "cloudevents.open-cluster-management.io/datatype" + + // CloudEventsDataTypeAnnotationKey is the key of the cloudevents original source annotation. + CloudEventsOriginalSourceAnnotationKey = "cloudevents.open-cluster-management.io/originalsource" +) + +// ManifestCodec is a codec to encode/decode a ManifestWork/cloudevent with ManifestBundle for an agent. +type ManifestCodec struct { + restMapper meta.RESTMapper +} + +func NewManifestCodec(restMapper meta.RESTMapper) *ManifestCodec { + return &ManifestCodec{ + restMapper: restMapper, + } +} + +// EventDataType returns the event data type for `io.open-cluster-management.works.v1alpha1.manifests`. +func (c *ManifestCodec) EventDataType() types.CloudEventsDataType { + return payload.ManifestEventDataType +} + +// Encode the status of a ManifestWork to a cloudevent with ManifestStatus. +func (c *ManifestCodec) Encode(source string, eventType types.CloudEventsType, work *workv1.ManifestWork) (*cloudevents.Event, error) { + if eventType.CloudEventsDataType != payload.ManifestEventDataType { + return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) + } + + resourceVersion, err := strconv.ParseInt(work.ResourceVersion, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse the resourceversion of the work %s, %v", work.UID, err) + } + + originalSource, ok := work.Annotations[CloudEventsOriginalSourceAnnotationKey] + if !ok { + return nil, fmt.Errorf("failed to find originalsource from the work %s", work.UID) + } + + if len(work.Spec.Workload.Manifests) != 1 { + return nil, fmt.Errorf("too many manifests in the work %s", work.UID) + } + + evt := types.NewEventBuilder(source, eventType). + WithResourceID(string(work.UID)). + WithResourceVersion(resourceVersion). + WithClusterName(work.Namespace). + WithOriginalSource(originalSource). + NewEvent() + + statusPayload := &payload.ManifestStatus{ + Conditions: work.Status.Conditions, + } + + if len(work.Status.ResourceStatus.Manifests) != 0 { + statusPayload.Status = &work.Status.ResourceStatus.Manifests[0] + } + + if err := evt.SetData(cloudevents.ApplicationJSON, statusPayload); err != nil { + return nil, fmt.Errorf("failed to encode manifestwork status to a cloudevent: %v", err) + } + + return &evt, nil +} + +// Decode a cloudevent whose data is Manifest to a ManifestWork. +func (c *ManifestCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWork, error) { + eventType, err := types.ParseCloudEventsType(evt.Type()) + if err != nil { + return nil, fmt.Errorf("failed to parse cloud event type %s, %v", evt.Type(), err) + } + + if eventType.CloudEventsDataType != payload.ManifestEventDataType { + return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) + } + + evtExtensions := evt.Context.GetExtensions() + + resourceID, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionResourceID]) + if err != nil { + return nil, fmt.Errorf("failed to get resourceid extension: %v", err) + } + + resourceVersion, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionResourceVersion]) + if err != nil { + return nil, fmt.Errorf("failed to get resourceversion extension: %v", err) + } + + clusterName, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionClusterName]) + if err != nil { + return nil, fmt.Errorf("failed to get clustername extension: %v", err) + } + + work := &workv1.ManifestWork{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + UID: kubetypes.UID(resourceID), + ResourceVersion: resourceVersion, + Name: resourceID, + Namespace: clusterName, + Annotations: map[string]string{ + CloudEventsDataTypeAnnotationKey: eventType.CloudEventsDataType.String(), + CloudEventsOriginalSourceAnnotationKey: evt.Source(), + }, + }, + } + + if _, ok := evtExtensions[types.ExtensionDeletionTimestamp]; ok { + deletionTimestamp, err := cloudeventstypes.ToTime(evtExtensions[types.ExtensionDeletionTimestamp]) + if err != nil { + return nil, fmt.Errorf("failed to get deletiontimestamp, %v", err) + } + + work.DeletionTimestamp = &metav1.Time{Time: deletionTimestamp} + return work, nil + } + + manifestPayload := &payload.Manifest{} + if err := evt.DataAs(manifestPayload); err != nil { + return nil, fmt.Errorf("failed to unmarshal event data %s, %v", string(evt.Data()), err) + } + + unstructuredObj := manifestPayload.Manifest + rawJson, err := unstructuredObj.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("failed to get manifest GVR from event %s, %v", string(evt.Data()), err) + } + + work.Spec = workv1.ManifestWorkSpec{ + Workload: workv1.ManifestsTemplate{ + Manifests: []workv1.Manifest{{RawExtension: runtime.RawExtension{Raw: rawJson}}}, + }, + DeleteOption: manifestPayload.DeleteOption, + } + + if manifestPayload.ConfigOption != nil { + _, gvr, err := utils.BuildResourceMeta(0, &unstructuredObj, c.restMapper) + if err != nil { + return nil, fmt.Errorf("failed to get manifest GVR from event %s, %v", string(evt.Data()), err) + } + + work.Spec.ManifestConfigs = []workv1.ManifestConfigOption{ + { + ResourceIdentifier: workv1.ResourceIdentifier{ + Group: gvr.Group, + Resource: gvr.Resource, + Name: unstructuredObj.GetName(), + Namespace: unstructuredObj.GetNamespace(), + }, + FeedbackRules: manifestPayload.ConfigOption.FeedbackRules, + UpdateStrategy: manifestPayload.ConfigOption.UpdateStrategy, + }, + } + } + + // validate the manifest + if err := workvalidator.ManifestValidator.ValidateManifests(work.Spec.Workload.Manifests); err != nil { + return nil, fmt.Errorf("manifest is invalid, %v", err) + } + + return work, nil +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/agent/codec/manifestbundle.go b/vendor/open-cluster-management.io/api/cloudevents/work/agent/codec/manifestbundle.go new file mode 100644 index 000000000..7eb10b668 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/agent/codec/manifestbundle.go @@ -0,0 +1,137 @@ +package codec + +import ( + "fmt" + "strconv" + + cloudevents "github.com/cloudevents/sdk-go/v2" + cloudeventstypes "github.com/cloudevents/sdk-go/v2/types" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubetypes "k8s.io/apimachinery/pkg/types" + + "open-cluster-management.io/api/cloudevents/generic/types" + "open-cluster-management.io/api/cloudevents/work/payload" + "open-cluster-management.io/api/utils/work/v1/workvalidator" + workv1 "open-cluster-management.io/api/work/v1" +) + +// ManifestBundleCodec is a codec to encode/decode a ManifestWork/cloudevent with ManifestBundle for an agent. +type ManifestBundleCodec struct{} + +func NewManifestBundleCodec() *ManifestBundleCodec { + return &ManifestBundleCodec{} +} + +// EventDataType always returns the event data type `io.open-cluster-management.works.v1alpha1.manifestbundles`. +func (c *ManifestBundleCodec) EventDataType() types.CloudEventsDataType { + return payload.ManifestBundleEventDataType +} + +// Encode the status of a ManifestWork to a cloudevent with ManifestBundleStatus. +func (c *ManifestBundleCodec) Encode(source string, eventType types.CloudEventsType, work *workv1.ManifestWork) (*cloudevents.Event, error) { + if eventType.CloudEventsDataType != payload.ManifestBundleEventDataType { + return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) + } + + resourceVersion, err := strconv.ParseInt(work.ResourceVersion, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse the resourceversion of the work %s, %v", work.UID, err) + } + + originalSource, ok := work.Annotations[CloudEventsOriginalSourceAnnotationKey] + if !ok { + return nil, fmt.Errorf("failed to find originalsource from the work %s", work.UID) + } + + evt := types.NewEventBuilder(source, eventType). + WithResourceID(string(work.UID)). + WithResourceVersion(resourceVersion). + WithClusterName(work.Namespace). + WithOriginalSource(originalSource). + NewEvent() + + manifestBundleStatus := &payload.ManifestBundleStatus{ + Conditions: work.Status.Conditions, + ResourceStatus: work.Status.ResourceStatus.Manifests, + } + + if err := evt.SetData(cloudevents.ApplicationJSON, manifestBundleStatus); err != nil { + return nil, fmt.Errorf("failed to encode manifestwork status to a cloudevent: %v", err) + } + + return &evt, nil +} + +// Decode a cloudevent whose data is ManifestBundle to a ManifestWork. +func (c *ManifestBundleCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWork, error) { + eventType, err := types.ParseCloudEventsType(evt.Type()) + if err != nil { + return nil, fmt.Errorf("failed to parse cloud event type %s, %v", evt.Type(), err) + } + + if eventType.CloudEventsDataType != payload.ManifestBundleEventDataType { + return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) + } + + evtExtensions := evt.Context.GetExtensions() + + resourceID, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionResourceID]) + if err != nil { + return nil, fmt.Errorf("failed to get resourceid extension: %v", err) + } + + resourceVersion, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionResourceVersion]) + if err != nil { + return nil, fmt.Errorf("failed to get resourceversion extension: %v", err) + } + + clusterName, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionClusterName]) + if err != nil { + return nil, fmt.Errorf("failed to get clustername extension: %v", err) + } + + work := &workv1.ManifestWork{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + UID: kubetypes.UID(resourceID), + ResourceVersion: resourceVersion, + Name: resourceID, + Namespace: clusterName, + Annotations: map[string]string{ + CloudEventsDataTypeAnnotationKey: eventType.CloudEventsDataType.String(), + CloudEventsOriginalSourceAnnotationKey: evt.Source(), + }, + }, + } + + if _, ok := evtExtensions[types.ExtensionDeletionTimestamp]; ok { + deletionTimestamp, err := cloudeventstypes.ToTime(evtExtensions[types.ExtensionDeletionTimestamp]) + if err != nil { + return nil, fmt.Errorf("failed to get deletiontimestamp, %v", err) + } + + work.DeletionTimestamp = &metav1.Time{Time: deletionTimestamp} + return work, nil + } + + manifests := &payload.ManifestBundle{} + if err := evt.DataAs(manifests); err != nil { + return nil, fmt.Errorf("failed to unmarshal event data %s, %v", string(evt.Data()), err) + } + + work.Spec = workv1.ManifestWorkSpec{ + Workload: workv1.ManifestsTemplate{ + Manifests: manifests.Manifests, + }, + DeleteOption: manifests.DeleteOption, + ManifestConfigs: manifests.ManifestConfigs, + } + + // validate the manifests + if err := workvalidator.ManifestValidator.ValidateManifests(work.Spec.Workload.Manifests); err != nil { + return nil, fmt.Errorf("manifests are invalid, %v", err) + } + + return work, nil +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/agent/handler/resourcehandler.go b/vendor/open-cluster-management.io/api/cloudevents/work/agent/handler/resourcehandler.go new file mode 100644 index 000000000..986f9ccab --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/agent/handler/resourcehandler.go @@ -0,0 +1,76 @@ +package handler + +import ( + "fmt" + "strconv" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog/v2" + workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" + + "open-cluster-management.io/api/cloudevents/generic" + "open-cluster-management.io/api/cloudevents/generic/types" + "open-cluster-management.io/api/cloudevents/work/watcher" + workv1 "open-cluster-management.io/api/work/v1" +) + +// NewManifestWorkAgentHandler returns a ResourceHandler for a ManifestWork on managed cluster. It sends the kube events +// with ManifestWorWatcher after CloudEventAgentClient received the ManifestWork specs from source, then the +// ManifestWorkInformer handles the kube events in its local cache. +func NewManifestWorkAgentHandler(lister workv1lister.ManifestWorkNamespaceLister, watcher *watcher.ManifestWorkWatcher) generic.ResourceHandler[*workv1.ManifestWork] { + return func(action types.ResourceAction, work *workv1.ManifestWork) error { + switch action { + case types.Added: + watcher.Receive(watch.Event{Type: watch.Added, Object: work}) + case types.Modified: + lastWork, err := lister.Get(work.Name) + if err != nil { + return err + } + + resourceVersion, err := strconv.ParseInt(work.ResourceVersion, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse the resourceVersion of the manifestwork %s, %v", work.Name, err) + } + + lastResourceVersion, err := strconv.ParseInt(lastWork.ResourceVersion, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse the resourceVersion of the manifestwork %s, %v", lastWork.Name, err) + } + + if resourceVersion <= lastResourceVersion { + klog.Infof("The work %s resource version is less than or equal to cached, ignore", work.Name) + return nil + } + + updatedWork := work.DeepCopy() + + // restore the fields that are maintained by local agent + updatedWork.Labels = lastWork.Labels + updatedWork.Annotations = lastWork.Annotations + updatedWork.Finalizers = lastWork.Finalizers + updatedWork.Status = lastWork.Status + + watcher.Receive(watch.Event{Type: watch.Modified, Object: updatedWork}) + case types.Deleted: + // the manifestwork is deleting on the source, we just update its deletion timestamp. + lastWork, err := lister.Get(work.Name) + if errors.IsNotFound(err) { + return nil + } + + if err != nil { + return err + } + + updatedWork := lastWork.DeepCopy() + updatedWork.DeletionTimestamp = work.DeletionTimestamp + watcher.Receive(watch.Event{Type: watch.Modified, Object: updatedWork}) + default: + return fmt.Errorf("unsupported resource action %s", action) + } + + return nil + } +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/clientbuilder.go b/vendor/open-cluster-management.io/api/cloudevents/work/clientbuilder.go new file mode 100644 index 000000000..810bf2758 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/clientbuilder.go @@ -0,0 +1,152 @@ +package work + +import ( + "context" + "fmt" + "time" + + "k8s.io/client-go/rest" + + workclientset "open-cluster-management.io/api/client/work/clientset/versioned" + workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" + workinformers "open-cluster-management.io/api/client/work/informers/externalversions" + workv1informers "open-cluster-management.io/api/client/work/informers/externalversions/work/v1" + "open-cluster-management.io/api/cloudevents/generic" + "open-cluster-management.io/api/cloudevents/generic/options/mqtt" + agentclient "open-cluster-management.io/api/cloudevents/work/agent/client" + agenthandler "open-cluster-management.io/api/cloudevents/work/agent/handler" + "open-cluster-management.io/api/cloudevents/work/internal" + "open-cluster-management.io/api/cloudevents/work/watcher" + workv1 "open-cluster-management.io/api/work/v1" +) + +const defaultInformerResyncTime = 10 * time.Minute + +// ClientHolder holds a manifestwork client that implements the ManifestWorkInterface based on different configuration +// and a ManifestWorkInformer that is built with the manifestWork client. +// +// ClientHolder also implements the ManifestWorksGetter interface. +type ClientHolder struct { + workClient workv1client.WorkV1Interface + manifestWorkInformer workv1informers.ManifestWorkInformer +} + +var _ workv1client.ManifestWorksGetter = &ClientHolder{} + +// ManifestWorks returns a ManifestWorkInterface +func (h *ClientHolder) ManifestWorks(namespace string) workv1client.ManifestWorkInterface { + return h.workClient.ManifestWorks(namespace) +} + +// ManifestWorkInformer returns a ManifestWorkInformer +func (h *ClientHolder) ManifestWorkInformer() workv1informers.ManifestWorkInformer { + return h.manifestWorkInformer +} + +// ClientHolderBuilder builds the ClientHolder with different configuration. +type ClientHolderBuilder struct { + config any + codecs []generic.Codec[*workv1.ManifestWork] + informerOptions []workinformers.SharedInformerOption + informerResyncTime time.Duration + clusterName string + clientID string +} + +// NewClientHolderBuilder returns a ClientHolderBuilder with a given configuration. +// +// Available configurations: +// - Kubeconfig (*rest.Config): builds a manifestwork client with kubeconfig +// - MQTTOptions (*mqtt.MQTTOptions): builds a manifestwork client based on cloudevents with MQTT +func NewClientHolderBuilder(clientID string, config any) *ClientHolderBuilder { + return &ClientHolderBuilder{ + clientID: clientID, + config: config, + informerResyncTime: defaultInformerResyncTime, + } +} + +// WithClusterName set the managed cluster name when building a manifestwork client for an agent. +func (b *ClientHolderBuilder) WithClusterName(clusterName string) *ClientHolderBuilder { + b.clusterName = clusterName + return b +} + +// WithCodecs add codecs when building a manifestwork client based on cloudevents. +func (b *ClientHolderBuilder) WithCodecs(codecs ...generic.Codec[*workv1.ManifestWork]) *ClientHolderBuilder { + b.codecs = codecs + return b +} + +// WithInformerConfig set the ManifestWorkInformer configs. If the resync time is not set, the default time (10 minutes) +// will be used when building the ManifestWorkInformer. +func (b *ClientHolderBuilder) WithInformerConfig( + resyncTime time.Duration, options ...workinformers.SharedInformerOption) *ClientHolderBuilder { + b.informerResyncTime = resyncTime + b.informerOptions = options + return b +} + +// NewClientHolder returns a ClientHolder for works. +func (b *ClientHolderBuilder) NewClientHolder(ctx context.Context) (*ClientHolder, error) { + switch config := b.config.(type) { + case *rest.Config: + kubeWorkClientSet, err := workclientset.NewForConfig(config) + if err != nil { + return nil, err + } + + factory := workinformers.NewSharedInformerFactoryWithOptions(kubeWorkClientSet, b.informerResyncTime, b.informerOptions...) + + return &ClientHolder{ + workClient: kubeWorkClientSet.WorkV1(), + manifestWorkInformer: factory.Work().V1().ManifestWorks(), + }, nil + case *mqtt.MQTTOptions: + if len(b.clusterName) != 0 { + return b.newAgentClients(ctx, config) + } + + //TODO build manifestwork clients for source + return nil, nil + default: + return nil, fmt.Errorf("unsupported client configuration type %T", config) + } +} + +func (b *ClientHolderBuilder) newAgentClients(ctx context.Context, config *mqtt.MQTTOptions) (*ClientHolder, error) { + workLister := &ManifestWorkLister{} + watcher := watcher.NewManifestWorkWatcher() + agentOptions := mqtt.NewAgentOptions(config, b.clusterName, b.clientID) + cloudEventsClient, err := generic.NewCloudEventAgentClient[*workv1.ManifestWork]( + ctx, + agentOptions, + workLister, + ManifestWorkStatusHash, + b.codecs..., + ) + if err != nil { + return nil, err + } + + manifestWorkClient := agentclient.NewManifestWorkAgentClient(cloudEventsClient, watcher) + workClient := &internal.WorkV1ClientWrapper{ManifestWorkClient: manifestWorkClient} + workClientSet := &internal.WorkClientSetWrapper{WorkV1ClientWrapper: workClient} + factory := workinformers.NewSharedInformerFactoryWithOptions(workClientSet, b.informerResyncTime, b.informerOptions...) + informers := factory.Work().V1().ManifestWorks() + manifestWorkLister := informers.Lister() + namespacedLister := manifestWorkLister.ManifestWorks(b.clusterName) + + // Set informer lister back to work lister and client. + workLister.Lister = manifestWorkLister + // TODO the work client and informer share a same store in the current implementation, ideally, the store should be + // only written from the server. we may need to revisit the implementation in the future. + manifestWorkClient.SetLister(namespacedLister) + + cloudEventsClient.Subscribe(ctx, agenthandler.NewManifestWorkAgentHandler(namespacedLister, watcher)) + + return &ClientHolder{ + workClient: workClient, + manifestWorkInformer: informers, + }, nil +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/internal/clientset.go b/vendor/open-cluster-management.io/api/cloudevents/work/internal/clientset.go new file mode 100644 index 000000000..9b629d83f --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/internal/clientset.go @@ -0,0 +1,50 @@ +package internal + +import ( + discovery "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + + workclientset "open-cluster-management.io/api/client/work/clientset/versioned" + workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" + workv1alpha1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1alpha1" +) + +// WorkClientSetWrapper wraps a work client that has a manifestwork client to a work clientset interface, this wrapper +// will helps us to build manifestwork informer factory easily. +type WorkClientSetWrapper struct { + WorkV1ClientWrapper *WorkV1ClientWrapper +} + +var _ workclientset.Interface = &WorkClientSetWrapper{} + +func (c *WorkClientSetWrapper) WorkV1() workv1client.WorkV1Interface { + return c.WorkV1ClientWrapper +} + +func (c *WorkClientSetWrapper) WorkV1alpha1() workv1alpha1client.WorkV1alpha1Interface { + return nil +} + +func (c *WorkClientSetWrapper) Discovery() discovery.DiscoveryInterface { + return nil +} + +// WorkV1ClientWrapper wraps a manifestwork client to a WorkV1Interface +type WorkV1ClientWrapper struct { + ManifestWorkClient workv1client.ManifestWorkInterface +} + +var _ workv1client.WorkV1Interface = &WorkV1ClientWrapper{} + +func (c *WorkV1ClientWrapper) ManifestWorks(namespace string) workv1client.ManifestWorkInterface { + // TODO if the ManifestWorkClient is ManifestWorkSourceClient, we need set namespace here + return c.ManifestWorkClient +} + +func (c *WorkV1ClientWrapper) AppliedManifestWorks() workv1client.AppliedManifestWorkInterface { + return nil +} + +func (c *WorkV1ClientWrapper) RESTClient() rest.Interface { + return nil +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/lister.go b/vendor/open-cluster-management.io/api/cloudevents/work/lister.go new file mode 100644 index 000000000..1f1fbda70 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/lister.go @@ -0,0 +1,19 @@ +package work + +import ( + "k8s.io/apimachinery/pkg/labels" + + workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" + "open-cluster-management.io/api/cloudevents/generic/types" + workv1 "open-cluster-management.io/api/work/v1" +) + +// ManifestWorkLister list the ManifestWorks from a ManifestWorkInformer's local cache. +type ManifestWorkLister struct { + Lister workv1lister.ManifestWorkLister +} + +// List returns the ManifestWorks from a ManifestWorkInformer's local cache. +func (l *ManifestWorkLister) List(options types.ListOptions) ([]*workv1.ManifestWork, error) { + return l.Lister.ManifestWorks(options.ClusterName).List(labels.Everything()) +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/payload/mainfiest.go b/vendor/open-cluster-management.io/api/cloudevents/work/payload/mainfiest.go new file mode 100644 index 000000000..c6ae992fe --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/payload/mainfiest.go @@ -0,0 +1,54 @@ +package payload + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "open-cluster-management.io/api/cloudevents/generic/types" + workv1 "open-cluster-management.io/api/work/v1" +) + +var ManifestEventDataType = types.CloudEventsDataType{ + Group: "io.open-cluster-management.works", + Version: "v1alpha1", + Resource: "manifests", +} + +// Manifest represents the data in a cloudevent, it contains a single manifest. +type Manifest struct { + // Manifest represents a resource to be deployed on managed cluster. + Manifest unstructured.Unstructured `json:"manifest"` + + // DeleteOption represents deletion strategy when this manifest is deleted. + DeleteOption *workv1.DeleteOption `json:"deleteOption,omitempty"` + + // ConfigOption represents the configuration of this manifest. + ConfigOption *ManifestConfigOption `json:"configOption,omitempty"` +} + +// ManifestStatus represents the data in a cloudevent, it contains the status of a SingleManifest on a managed +// cluster. +type ManifestStatus struct { + // Conditions contains the different condition statuses for a SingleManifest on a managed cluster. + // Valid condition types are: + // 1. Applied represents the manifest of a SingleManifest is applied successfully on a managed cluster. + // 2. Progressing represents the manifest of a SingleManifest is being applied on a managed cluster. + // 3. Available represents the manifest of a SingleManifest exists on the managed cluster. + // 4. Degraded represents the current state of manifest of a SingleManifest does not match the desired state for a + // certain period. + // 5. Deleted represents the manifests of a SingleManifest is deleted from a managed cluster. + Conditions []metav1.Condition `json:"conditions"` + + // Status represents the conditions of this manifest on a managed cluster. + Status *workv1.ManifestCondition `json:"status,omitempty"` +} + +type ManifestConfigOption struct { + // FeedbackRules defines what resource status field should be returned. + // If it is not set or empty, no feedback rules will be honored. + FeedbackRules []workv1.FeedbackRule `json:"feedbackRules,omitempty"` + + // UpdateStrategy defines the strategy to update this manifest. + // UpdateStrategy is Update if it is not set. + UpdateStrategy *workv1.UpdateStrategy `json:"updateStrategy,omitempty"` +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/payload/manifestbundle.go b/vendor/open-cluster-management.io/api/cloudevents/work/payload/manifestbundle.go new file mode 100644 index 000000000..60f63fa9a --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/payload/manifestbundle.go @@ -0,0 +1,43 @@ +package payload + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "open-cluster-management.io/api/cloudevents/generic/types" + workv1 "open-cluster-management.io/api/work/v1" +) + +var ManifestBundleEventDataType = types.CloudEventsDataType{ + Group: "io.open-cluster-management.works", + Version: "v1alpha1", + Resource: "manifestbundles", +} + +// ManifestBundle represents the data in a cloudevent, it contains a bundle of manifests. +type ManifestBundle struct { + // Manifests represents a list of Kuberenetes resources to be deployed on a managed cluster. + Manifests []workv1.Manifest `json:"manifests"` + + // DeleteOption represents deletion strategy when the manifests are deleted. + DeleteOption *workv1.DeleteOption `json:"deleteOption,omitempty"` + + // ManifestConfigs represents the configurations of manifests. + ManifestConfigs []workv1.ManifestConfigOption `json:"manifestConfigs,omitempty"` +} + +// ManifestBundleStatus represents the data in a cloudevent, it contains the status of a ManifestBundle on a managed +// cluster. +type ManifestBundleStatus struct { + // Conditions contains the different condition statuses for a ManifestBundle on managed cluster. + // Valid condition types are: + // 1. Applied represents the manifests in a ManifestBundle are applied successfully on a managed cluster. + // 2. Progressing represents the manifests in a ManifestBundle are being applied on a managed cluster. + // 3. Available represents the manifests in a ManifestBundle exist on a managed cluster. + // 4. Degraded represents the current state of manifests in a ManifestBundle do not match the desired state for a + // certain period. + // 5. Deleted represents the manifests in a ManifestBundle are deleted from a managed cluster. + Conditions []metav1.Condition `json:"conditions"` + + // ManifestResourceStatus represents the status of each resource in manifest work deployed on managed cluster. + ResourceStatus []workv1.ManifestCondition `json:"resourceStatus,omitempty"` +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/statushash.go b/vendor/open-cluster-management.io/api/cloudevents/work/statushash.go new file mode 100644 index 000000000..642f78609 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/statushash.go @@ -0,0 +1,18 @@ +package work + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + + workv1 "open-cluster-management.io/api/work/v1" +) + +// ManifestWorkStatusHash returns the SHA256 checksum of a ManifestWork status. +func ManifestWorkStatusHash(work *workv1.ManifestWork) (string, error) { + statusBytes, err := json.Marshal(work.Status) + if err != nil { + return "", fmt.Errorf("failed to marshal work status, %v", err) + } + return fmt.Sprintf("%x", sha256.Sum256(statusBytes)), nil +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/utils/utils.go b/vendor/open-cluster-management.io/api/cloudevents/work/utils/utils.go new file mode 100644 index 000000000..7a4afcc72 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/utils/utils.go @@ -0,0 +1,47 @@ +package utils + +import ( + "encoding/json" + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/types" + workv1 "open-cluster-management.io/api/work/v1" +) + +// Patch applies the patch to a work with the patch type. +func Patch(patchType types.PatchType, work *workv1.ManifestWork, patchData []byte) (*workv1.ManifestWork, error) { + workData, err := json.Marshal(work) + if err != nil { + return nil, err + } + + var patchedData []byte + switch patchType { + case types.JSONPatchType: + var patchObj jsonpatch.Patch + patchObj, err = jsonpatch.DecodePatch(patchData) + if err != nil { + return nil, err + } + patchedData, err = patchObj.Apply(workData) + if err != nil { + return nil, err + } + + case types.MergePatchType: + patchedData, err = jsonpatch.MergePatch(workData, patchData) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unsupported patch type: %s", patchType) + } + + patchedWork := &workv1.ManifestWork{} + if err := json.Unmarshal(patchedData, patchedWork); err != nil { + return nil, err + } + + return patchedWork, nil +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/watcher/watcher.go b/vendor/open-cluster-management.io/api/cloudevents/work/watcher/watcher.go new file mode 100644 index 000000000..d22a7512a --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/watcher/watcher.go @@ -0,0 +1,64 @@ +package watcher + +import ( + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog/v2" +) + +// ManifestWorkWatcher implements the watch.Interface. It returns a chan which will receive all the events. +type ManifestWorkWatcher struct { + sync.Mutex + + result chan watch.Event + done chan struct{} +} + +var _ watch.Interface = &ManifestWorkWatcher{} + +func NewManifestWorkWatcher() *ManifestWorkWatcher { + mw := &ManifestWorkWatcher{ + // It's easy for a consumer to add buffering via an extra + // goroutine/channel, but impossible for them to remove it, + // so nonbuffered is better. + result: make(chan watch.Event), + // If the watcher is externally stopped there is no receiver anymore + // and the send operations on the result channel, especially the + // error reporting might block forever. + // Therefore a dedicated stop channel is used to resolve this blocking. + done: make(chan struct{}), + } + + return mw +} + +// ResultChan implements Interface. +func (mw *ManifestWorkWatcher) ResultChan() <-chan watch.Event { + return mw.result +} + +// Stop implements Interface. +func (mw *ManifestWorkWatcher) Stop() { + // Call Close() exactly once by locking and setting a flag. + mw.Lock() + defer mw.Unlock() + // closing a closed channel always panics, therefore check before closing + select { + case <-mw.done: + close(mw.result) + default: + close(mw.done) + } +} + +// Receive a event from the work client and sends down the result channel. +func (mw *ManifestWorkWatcher) Receive(evt watch.Event) { + if klog.V(4).Enabled() { + obj, _ := meta.Accessor(evt.Object) + klog.V(4).Infof("Receive the event %v for %v", evt.Type, obj.GetName()) + } + + mw.result <- evt +} diff --git a/vendor/open-cluster-management.io/api/cluster/v1alpha1/types_rolloutstrategy.go b/vendor/open-cluster-management.io/api/cluster/v1alpha1/types_rolloutstrategy.go index 4fbe6524a..a887ffc3d 100644 --- a/vendor/open-cluster-management.io/api/cluster/v1alpha1/types_rolloutstrategy.go +++ b/vendor/open-cluster-management.io/api/cluster/v1alpha1/types_rolloutstrategy.go @@ -81,7 +81,7 @@ type RolloutConfig struct { // Default is that no failures are tolerated. // +kubebuilder:validation:Pattern="^((100|[0-9]{1,2})%|[0-9]+)$" // +kubebuilder:validation:XIntOrString - // +kubebuilder:default="0" + // +kubebuilder:default=0 // +optional MaxFailures intstr.IntOrString `json:"maxFailures,omitempty"` // Timeout defines how long the workload applier controller will wait until the workload reaches a diff --git a/vendor/open-cluster-management.io/api/utils/work/v1/utils/utils.go b/vendor/open-cluster-management.io/api/utils/work/v1/utils/utils.go new file mode 100644 index 000000000..32b4557c1 --- /dev/null +++ b/vendor/open-cluster-management.io/api/utils/work/v1/utils/utils.go @@ -0,0 +1,76 @@ +package utils + +import ( + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/scheme" + + workv1 "open-cluster-management.io/api/work/v1" +) + +var genericScheme = runtime.NewScheme() + +// BuildResourceMeta builds manifest resource meta for the object +func BuildResourceMeta( + index int, + object runtime.Object, + restMapper meta.RESTMapper) (workv1.ManifestResourceMeta, schema.GroupVersionResource, error) { + resourceMeta := workv1.ManifestResourceMeta{ + Ordinal: int32(index), + } + + if object == nil || reflect.ValueOf(object).IsNil() { + return resourceMeta, schema.GroupVersionResource{}, nil + } + + // set gvk + gvk, err := GuessObjectGroupVersionKind(object) + if err != nil { + return resourceMeta, schema.GroupVersionResource{}, err + } + resourceMeta.Group = gvk.Group + resourceMeta.Version = gvk.Version + resourceMeta.Kind = gvk.Kind + + // set namespace/name + if accessor, e := meta.Accessor(object); e != nil { + err = fmt.Errorf("cannot access metadata of %v: %w", object, e) + } else { + resourceMeta.Namespace = accessor.GetNamespace() + resourceMeta.Name = accessor.GetName() + } + + // set resource + if restMapper == nil { + return resourceMeta, schema.GroupVersionResource{}, err + } + mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return resourceMeta, schema.GroupVersionResource{}, fmt.Errorf("the server doesn't have a resource type %q", gvk.Kind) + } + + resourceMeta.Resource = mapping.Resource.Resource + return resourceMeta, mapping.Resource, err +} + +// GuessObjectGroupVersionKind returns GVK for the passed runtime object. +func GuessObjectGroupVersionKind(object runtime.Object) (*schema.GroupVersionKind, error) { + if gvk := object.GetObjectKind().GroupVersionKind(); len(gvk.Kind) > 0 { + return &gvk, nil + } + + if kinds, _, _ := scheme.Scheme.ObjectKinds(object); len(kinds) > 0 { + return &kinds[0], nil + } + + // otherwise fall back to genericScheme + if kinds, _, _ := genericScheme.ObjectKinds(object); len(kinds) > 0 { + return &kinds[0], nil + } + + return nil, fmt.Errorf("cannot get gvk of %v", object) +} diff --git a/vendor/open-cluster-management.io/api/utils/work/v1/workvalidator/validator.go b/vendor/open-cluster-management.io/api/utils/work/v1/workvalidator/validator.go new file mode 100644 index 000000000..04559069c --- /dev/null +++ b/vendor/open-cluster-management.io/api/utils/work/v1/workvalidator/validator.go @@ -0,0 +1,64 @@ +package workvalidator + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + workv1 "open-cluster-management.io/api/work/v1" +) + +type Validator struct { + limit int +} + +var ManifestValidator = &Validator{limit: 500 * 1024} // the default manifest limit is 500k. + +func (m *Validator) WithLimit(limit int) { + m.limit = limit +} + +func (m *Validator) ValidateManifests(manifests []workv1.Manifest) error { + if len(manifests) == 0 { + return errors.NewBadRequest("Workload manifests should not be empty") + } + + totalSize := 0 + for _, manifest := range manifests { + totalSize = totalSize + manifest.Size() + } + + if totalSize > m.limit { + return fmt.Errorf("the size of manifests is %v bytes which exceeds the %v limit", totalSize, m.limit) + } + + for _, manifest := range manifests { + err := validateManifest(manifest.Raw) + if err != nil { + return err + } + } + + return nil +} + +func validateManifest(manifest []byte) error { + // If the manifest cannot be decoded, return err + unstructuredObj := &unstructured.Unstructured{} + err := unstructuredObj.UnmarshalJSON(manifest) + if err != nil { + return err + } + + // The object must have name specified, generateName is not allowed in manifestwork + if unstructuredObj.GetName() == "" { + return fmt.Errorf("name must be set in manifest") + } + + if unstructuredObj.GetGenerateName() != "" { + return fmt.Errorf("generateName must not be set in manifest") + } + + return nil +} diff --git a/vendor/open-cluster-management.io/api/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml b/vendor/open-cluster-management.io/api/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml index c5a083042..938832f75 100644 --- a/vendor/open-cluster-management.io/api/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml +++ b/vendor/open-cluster-management.io/api/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml @@ -341,7 +341,7 @@ spec: anyOf: - type: integer - type: string - default: "0" + default: 0 description: MaxFailures is a percentage or number of clusters in the current rollout that can fail before proceeding to the next rollout. MaxFailures is only @@ -435,7 +435,7 @@ spec: anyOf: - type: integer - type: string - default: "0" + default: 0 description: MaxFailures is a percentage or number of clusters in the current rollout that can fail before proceeding to the next rollout. MaxFailures is only @@ -519,7 +519,7 @@ spec: anyOf: - type: integer - type: string - default: "0" + default: 0 description: MaxFailures is a percentage or number of clusters in the current rollout that can fail before proceeding to the next rollout. MaxFailures is only