diff --git a/go.mod b/go.mod index 9133cbf1a..a3a72fc42 100644 --- a/go.mod +++ b/go.mod @@ -46,9 +46,12 @@ require ( github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995 // indirect + github.com/cloudevents/sdk-go/v2 v2.14.0 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect + github.com/eclipse/paho.golang v0.11.0 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/fatih/structs v1.1.0 // indirect diff --git a/go.sum b/go.sum index 738168798..9e67c4a74 100644 --- a/go.sum +++ b/go.sum @@ -68,6 +68,10 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995 h1:pXyRKZ0T5WoB6X9QnHS5cEyW0Got39bNQIECxGUKVO4= +github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995/go.mod h1:mz9oS2Yhh/S7cvrrsgGMMR+6Shy0ZyL2lDN1sHQO1wE= +github.com/cloudevents/sdk-go/v2 v2.14.0 h1:Nrob4FwVgi5L4tV9lhjzZcjYqFVyJzsA56CwPaPfv6s= +github.com/cloudevents/sdk-go/v2 v2.14.0/go.mod h1:xDmKfzNjM8gBvjaF8ijFjM1VYOVUEeUfapHMUX1T5To= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= @@ -87,6 +91,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/eclipse/paho.golang v0.11.0 h1:6Avu5dkkCfcB61/y1vx+XrPQ0oAl4TPYtY0uw3HbQdM= +github.com/eclipse/paho.golang v0.11.0/go.mod h1:rhrV37IEwauUyx8FHrvmXOKo+QRKng5ncoN1vJiJMcs= github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -205,6 +211,7 @@ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= @@ -481,6 +488,7 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= diff --git a/pkg/work/spoke/options.go b/pkg/work/spoke/options.go index b37d77f48..8c36a49f3 100644 --- a/pkg/work/spoke/options.go +++ b/pkg/work/spoke/options.go @@ -4,12 +4,14 @@ import ( "time" "github.com/spf13/pflag" + "open-cluster-management.io/api/cloudevents/generic/options/mqtt" ) // WorkloadAgentOptions defines the flags for workload agent type WorkloadAgentOptions struct { StatusSyncInterval time.Duration AppliedManifestWorkEvictionGracePeriod time.Duration + MQTTOptions *mqtt.MQTTOptions } // NewWorkloadAgentOptions returns the flags with default value set @@ -17,6 +19,7 @@ func NewWorkloadAgentOptions() *WorkloadAgentOptions { return &WorkloadAgentOptions{ StatusSyncInterval: 10 * time.Second, AppliedManifestWorkEvictionGracePeriod: 60 * time.Minute, + MQTTOptions: mqtt.NewMQTTOptions(), } } @@ -25,4 +28,6 @@ func (o *WorkloadAgentOptions) AddFlags(fs *pflag.FlagSet) { fs.DurationVar(&o.StatusSyncInterval, "status-sync-interval", o.StatusSyncInterval, "Interval to sync resource status to hub.") fs.DurationVar(&o.AppliedManifestWorkEvictionGracePeriod, "appliedmanifestwork-eviction-grace-period", o.AppliedManifestWorkEvictionGracePeriod, "Grace period for appliedmanifestwork eviction") + + o.MQTTOptions.AddFlags(fs) } diff --git a/pkg/work/spoke/spokeagent.go b/pkg/work/spoke/spokeagent.go index d9310142b..13d285325 100644 --- a/pkg/work/spoke/spokeagent.go +++ b/pkg/work/spoke/spokeagent.go @@ -2,10 +2,12 @@ package spoke import ( "context" + "fmt" "time" "github.com/openshift/library-go/pkg/controller/controllercmd" apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -14,6 +16,8 @@ import ( workclientset "open-cluster-management.io/api/client/work/clientset/versioned" workinformers "open-cluster-management.io/api/client/work/informers/externalversions" + cloudeventswork "open-cluster-management.io/api/cloudevents/work" + "open-cluster-management.io/api/cloudevents/work/agent/codec" ocmfeature "open-cluster-management.io/api/feature" commonoptions "open-cluster-management.io/ocm/pkg/common/options" @@ -53,26 +57,6 @@ func NewWorkAgentConfig(commonOpts *commonoptions.AgentOptions, opts *WorkloadAg // RunWorkloadAgent starts the controllers on agent to process work from hub. func (o *WorkAgentConfig) RunWorkloadAgent(ctx context.Context, controllerContext *controllercmd.ControllerContext) error { - // build hub client and informer - hubRestConfig, err := clientcmd.BuildConfigFromFlags("" /* leave masterurl as empty */, o.agentOptions.HubKubeconfigFile) - if err != nil { - return err - } - hubhash := helper.HubHash(hubRestConfig.Host) - - agentID := o.agentOptions.AgentID - if len(agentID) == 0 { - agentID = hubhash - } - - hubWorkClient, err := workclientset.NewForConfig(hubRestConfig) - if err != nil { - return err - } - // Only watch the cluster namespace on hub - workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(hubWorkClient, 5*time.Minute, - workinformers.WithNamespace(o.agentOptions.SpokeClusterName)) - // load spoke client config and create spoke clients, // the work agent may not running in the spoke/managed cluster. spokeRestConfig, err := o.agentOptions.SpokeKubeConfig(controllerContext.KubeConfig) @@ -107,10 +91,20 @@ func (o *WorkAgentConfig) RunWorkloadAgent(ctx context.Context, controllerContex return err } + // build hub client and informer + clientHolder, hubHash, agentID, err := o.buildHubClientHolder(ctx, o.agentOptions.SpokeClusterName, restMapper) + if err != nil { + return err + } + + hubWorkClient := clientHolder.ManifestWorks(o.agentOptions.SpokeClusterName) + hubWorkInformer := clientHolder.ManifestWorkInformer() + + // create controllers validator := auth.NewFactory( spokeRestConfig, spokeKubeClient, - workInformerFactory.Work().V1().ManifestWorks(), + hubWorkInformer, o.agentOptions.SpokeClusterName, controllerContext.EventRecorder, restMapper, @@ -121,20 +115,20 @@ func (o *WorkAgentConfig) RunWorkloadAgent(ctx context.Context, controllerContex spokeDynamicClient, spokeKubeClient, spokeAPIExtensionClient, - hubWorkClient.WorkV1().ManifestWorks(o.agentOptions.SpokeClusterName), - workInformerFactory.Work().V1().ManifestWorks(), - workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.agentOptions.SpokeClusterName), + hubWorkClient, + hubWorkInformer, + hubWorkInformer.Lister().ManifestWorks(o.agentOptions.SpokeClusterName), spokeWorkClient.WorkV1().AppliedManifestWorks(), spokeWorkInformerFactory.Work().V1().AppliedManifestWorks(), - hubhash, agentID, + hubHash, agentID, restMapper, validator, ) addFinalizerController := finalizercontroller.NewAddFinalizerController( controllerContext.EventRecorder, - hubWorkClient.WorkV1().ManifestWorks(o.agentOptions.SpokeClusterName), - workInformerFactory.Work().V1().ManifestWorks(), - workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.agentOptions.SpokeClusterName), + hubWorkClient, + hubWorkInformer, + hubWorkInformer.Lister().ManifestWorks(o.agentOptions.SpokeClusterName), ) appliedManifestWorkFinalizeController := finalizercontroller.NewAppliedManifestWorkFinalizeController( controllerContext.EventRecorder, @@ -145,42 +139,43 @@ func (o *WorkAgentConfig) RunWorkloadAgent(ctx context.Context, controllerContex ) manifestWorkFinalizeController := finalizercontroller.NewManifestWorkFinalizeController( controllerContext.EventRecorder, - hubWorkClient.WorkV1().ManifestWorks(o.agentOptions.SpokeClusterName), - workInformerFactory.Work().V1().ManifestWorks(), - workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.agentOptions.SpokeClusterName), + hubWorkClient, + hubWorkInformer, + hubWorkInformer.Lister().ManifestWorks(o.agentOptions.SpokeClusterName), spokeWorkClient.WorkV1().AppliedManifestWorks(), spokeWorkInformerFactory.Work().V1().AppliedManifestWorks(), - hubhash, + hubHash, ) unmanagedAppliedManifestWorkController := finalizercontroller.NewUnManagedAppliedWorkController( controllerContext.EventRecorder, - workInformerFactory.Work().V1().ManifestWorks(), - workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.agentOptions.SpokeClusterName), + hubWorkInformer, + hubWorkInformer.Lister().ManifestWorks(o.agentOptions.SpokeClusterName), spokeWorkClient.WorkV1().AppliedManifestWorks(), spokeWorkInformerFactory.Work().V1().AppliedManifestWorks(), o.workOptions.AppliedManifestWorkEvictionGracePeriod, - hubhash, agentID, + hubHash, agentID, ) appliedManifestWorkController := appliedmanifestcontroller.NewAppliedManifestWorkController( controllerContext.EventRecorder, spokeDynamicClient, - workInformerFactory.Work().V1().ManifestWorks(), - workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.agentOptions.SpokeClusterName), + hubWorkInformer, + hubWorkInformer.Lister().ManifestWorks(o.agentOptions.SpokeClusterName), spokeWorkClient.WorkV1().AppliedManifestWorks(), spokeWorkInformerFactory.Work().V1().AppliedManifestWorks(), - hubhash, + hubHash, ) availableStatusController := statuscontroller.NewAvailableStatusController( controllerContext.EventRecorder, spokeDynamicClient, - hubWorkClient.WorkV1().ManifestWorks(o.agentOptions.SpokeClusterName), - workInformerFactory.Work().V1().ManifestWorks(), - workInformerFactory.Work().V1().ManifestWorks().Lister().ManifestWorks(o.agentOptions.SpokeClusterName), + hubWorkClient, + hubWorkInformer, + hubWorkInformer.Lister().ManifestWorks(o.agentOptions.SpokeClusterName), o.workOptions.StatusSyncInterval, ) - go workInformerFactory.Start(ctx.Done()) go spokeWorkInformerFactory.Start(ctx.Done()) + go hubWorkInformer.Informer().Run(ctx.Done()) + go addFinalizerController.Run(ctx, 1) go appliedManifestWorkFinalizeController.Run(ctx, appliedManifestWorkFinalizeControllerWorkers) go unmanagedAppliedManifestWorkController.Run(ctx, 1) @@ -188,6 +183,62 @@ func (o *WorkAgentConfig) RunWorkloadAgent(ctx context.Context, controllerContex go manifestWorkController.Run(ctx, 1) go manifestWorkFinalizeController.Run(ctx, manifestWorkFinalizeControllerWorkers) go availableStatusController.Run(ctx, availableStatusControllerWorkers) + <-ctx.Done() + return nil } + +// To support consuming ManifestWorks from different drivers (like the Kubernetes apiserver or MQTT broker), we build +// ManifestWork client that implements the ManifestWorkInterface and ManifestWork informer based on different +// driver configuration. +// Refer to Event Based Manifestwork enhancement (https://github.com/open-cluster-management-io/enhancements/tree/main/enhancements/sig-architecture/224-event-based-manifestwork) +// to get more details. +func (o *WorkAgentConfig) buildHubClientHolder(ctx context.Context, + clusterName string, restMapper meta.RESTMapper) (*cloudeventswork.ClientHolder, string, string, error) { + if o.agentOptions.HubKubeconfigFile != "" && o.workOptions.MQTTOptions.BrokerHost != "" { + return nil, "", "", fmt.Errorf("the hub kubeconfig and MQTT broker cannot be specified at the same time") + } + + agentID := o.agentOptions.AgentID + if o.agentOptions.HubKubeconfigFile != "" { + hubRestConfig, err := clientcmd.BuildConfigFromFlags("", o.agentOptions.HubKubeconfigFile) + if err != nil { + return nil, "", "", err + } + + hubHash := helper.HubHash(hubRestConfig.Host) + if len(agentID) == 0 { + agentID = hubHash + } + + // Only watch the cluster namespace on hub + clientHolder, err := cloudeventswork.NewClientHolderBuilder(agentID, hubRestConfig). + WithInformerConfig(5*time.Minute, workinformers.WithNamespace(o.agentOptions.SpokeClusterName)). + NewClientHolder(ctx) + if err != nil { + return nil, "", "", err + } + + return clientHolder, hubHash, agentID, nil + } + + if o.workOptions.MQTTOptions.BrokerHost != "" { + hubHash := helper.HubHash(o.workOptions.MQTTOptions.BrokerHost) + if len(agentID) == 0 { + agentID = fmt.Sprintf("%s-work-agent", clusterName) + } + + clientHolder, err := cloudeventswork.NewClientHolderBuilder(agentID, o.workOptions.MQTTOptions). + WithClusterName(clusterName). + WithCodecs(codec.NewManifestCodec(restMapper)). + NewClientHolder(ctx) + if err != nil { + return nil, "", "", err + } + + return clientHolder, hubHash, agentID, nil + } + + return nil, "", "", fmt.Errorf("the hub kubeconfig or MQTT broker is not specified") +} diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/LICENSE b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/message.go b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/message.go new file mode 100644 index 000000000..8dd938545 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/message.go @@ -0,0 +1,119 @@ +/* + Copyright 2023 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package mqtt_paho + +import ( + "bytes" + "context" + "strings" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/eclipse/paho.golang/paho" +) + +const ( + prefix = "ce-" + contentType = "Content-Type" +) + +var specs = spec.WithPrefix(prefix) + +// Message represents a MQTT message. +// This message *can* be read several times safely +type Message struct { + internal *paho.Publish + version spec.Version + format format.Format +} + +// Check if Message implements binding.Message +var ( + _ binding.Message = (*Message)(nil) + _ binding.MessageMetadataReader = (*Message)(nil) +) + +func NewMessage(msg *paho.Publish) *Message { + var f format.Format + var v spec.Version + if msg.Properties != nil { + // Use properties.User["Content-type"] to determine if message is structured + if s := msg.Properties.User.Get(contentType); format.IsFormat(s) { + f = format.Lookup(s) + } else if s := msg.Properties.User.Get(specs.PrefixedSpecVersionName()); s != "" { + v = specs.Version(s) + } + } + return &Message{ + internal: msg, + version: v, + format: f, + } +} + +func (m *Message) ReadEncoding() binding.Encoding { + if m.version != nil { + return binding.EncodingBinary + } + if m.format != nil { + return binding.EncodingStructured + } + return binding.EncodingUnknown +} + +func (m *Message) ReadStructured(ctx context.Context, encoder binding.StructuredWriter) error { + if m.version != nil { + return binding.ErrNotStructured + } + if m.format == nil { + return binding.ErrNotStructured + } + return encoder.SetStructuredEvent(ctx, m.format, bytes.NewReader(m.internal.Payload)) +} + +func (m *Message) ReadBinary(ctx context.Context, encoder binding.BinaryWriter) (err error) { + if m.format != nil { + return binding.ErrNotBinary + } + + for _, userProperty := range m.internal.Properties.User { + if strings.HasPrefix(userProperty.Key, prefix) { + attr := m.version.Attribute(userProperty.Key) + if attr != nil { + err = encoder.SetAttribute(attr, userProperty.Value) + } else { + err = encoder.SetExtension(strings.TrimPrefix(userProperty.Key, prefix), userProperty.Value) + } + } else if userProperty.Key == contentType { + err = encoder.SetAttribute(m.version.AttributeFromKind(spec.DataContentType), string(userProperty.Value)) + } + if err != nil { + return + } + } + + if m.internal.Payload != nil { + return encoder.SetData(bytes.NewBuffer(m.internal.Payload)) + } + return nil +} + +func (m *Message) Finish(error) error { + return nil +} + +func (m *Message) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) { + attr := m.version.AttributeFromKind(k) + if attr != nil { + return attr, m.internal.Properties.User.Get(prefix + attr.Name()) + } + return nil, nil +} + +func (m *Message) GetExtension(name string) interface{} { + return m.internal.Properties.User.Get(prefix + name) +} diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/option.go b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/option.go new file mode 100644 index 000000000..955a16219 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/option.go @@ -0,0 +1,48 @@ +/* + Copyright 2023 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package mqtt_paho + +import ( + "fmt" + + "github.com/eclipse/paho.golang/paho" +) + +// Option is the function signature required to be considered an mqtt_paho.Option. +type Option func(*Protocol) error + +// WithConnect sets the paho.Connect configuration for the client. This option is not required. +func WithConnect(connOpt *paho.Connect) Option { + return func(p *Protocol) error { + if connOpt == nil { + return fmt.Errorf("the paho.Connect option must not be nil") + } + p.connOption = connOpt + return nil + } +} + +// WithPublish sets the paho.Publish configuration for the client. This option is required if you want to send messages. +func WithPublish(publishOpt *paho.Publish) Option { + return func(p *Protocol) error { + if publishOpt == nil { + return fmt.Errorf("the paho.Publish option must not be nil") + } + p.publishOption = publishOpt + return nil + } +} + +// WithSubscribe sets the paho.Subscribe configuration for the client. This option is required if you want to receive messages. +func WithSubscribe(subscribeOpt *paho.Subscribe) Option { + return func(p *Protocol) error { + if subscribeOpt == nil { + return fmt.Errorf("the paho.Subscribe option must not be nil") + } + p.subscribeOption = subscribeOpt + return nil + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/protocol.go b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/protocol.go new file mode 100644 index 000000000..261fc6c37 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/protocol.go @@ -0,0 +1,155 @@ +/* + Copyright 2023 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package mqtt_paho + +import ( + "context" + "fmt" + "io" + "sync" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/protocol" + "github.com/eclipse/paho.golang/paho" + + cecontext "github.com/cloudevents/sdk-go/v2/context" +) + +type Protocol struct { + client *paho.Client + config *paho.ClientConfig + connOption *paho.Connect + publishOption *paho.Publish + subscribeOption *paho.Subscribe + + // receiver + incoming chan *paho.Publish + // inOpen + openerMutex sync.Mutex + + closeChan chan struct{} +} + +var ( + _ protocol.Sender = (*Protocol)(nil) + _ protocol.Opener = (*Protocol)(nil) + _ protocol.Receiver = (*Protocol)(nil) + _ protocol.Closer = (*Protocol)(nil) +) + +func New(ctx context.Context, config *paho.ClientConfig, opts ...Option) (*Protocol, error) { + if config == nil { + return nil, fmt.Errorf("the paho.ClientConfig must not be nil") + } + + p := &Protocol{ + client: paho.NewClient(*config), + // default connect option + connOption: &paho.Connect{ + KeepAlive: 30, + CleanStart: true, + }, + incoming: make(chan *paho.Publish), + closeChan: make(chan struct{}), + } + if err := p.applyOptions(opts...); err != nil { + return nil, err + } + + // Connect to the MQTT broker + connAck, err := p.client.Connect(ctx, p.connOption) + if err != nil { + return nil, err + } + if connAck.ReasonCode != 0 { + return nil, fmt.Errorf("failed to connect to %q : %d - %q", p.client.Conn.RemoteAddr(), connAck.ReasonCode, + connAck.Properties.ReasonString) + } + + return p, nil +} + +func (p *Protocol) applyOptions(opts ...Option) error { + for _, fn := range opts { + if err := fn(p); err != nil { + return err + } + } + return nil +} + +func (p *Protocol) Send(ctx context.Context, m binding.Message, transformers ...binding.Transformer) error { + if p.publishOption == nil { + return fmt.Errorf("the paho.Publish option must not be nil") + } + + var err error + defer m.Finish(err) + + msg := p.publishOption + if cecontext.TopicFrom(ctx) != "" { + msg.Topic = cecontext.TopicFrom(ctx) + cecontext.WithTopic(ctx, "") + } + + err = WritePubMessage(ctx, m, msg, transformers...) + if err != nil { + return err + } + + _, err = p.client.Publish(ctx, msg) + if err != nil { + return err + } + return err +} + +func (p *Protocol) OpenInbound(ctx context.Context) error { + if p.subscribeOption == nil { + return fmt.Errorf("the paho.Subscribe option must not be nil") + } + + p.openerMutex.Lock() + defer p.openerMutex.Unlock() + + logger := cecontext.LoggerFrom(ctx) + + p.client.Router = paho.NewSingleHandlerRouter(func(m *paho.Publish) { + p.incoming <- m + }) + + logger.Infof("subscribing to topics: %v", p.subscribeOption.Subscriptions) + _, err := p.client.Subscribe(ctx, p.subscribeOption) + if err != nil { + return err + } + + // Wait until external or internal context done + select { + case <-ctx.Done(): + case <-p.closeChan: + } + return p.client.Disconnect(&paho.Disconnect{ReasonCode: 0}) +} + +// Receive implements Receiver.Receive +func (p *Protocol) Receive(ctx context.Context) (binding.Message, error) { + select { + case m, ok := <-p.incoming: + if !ok { + return nil, io.EOF + } + msg := NewMessage(m) + return msg, nil + case <-ctx.Done(): + return nil, io.EOF + } +} + +func (p *Protocol) Close(ctx context.Context) error { + close(p.closeChan) + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/write_message.go b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/write_message.go new file mode 100644 index 000000000..a4b87f4aa --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2/write_message.go @@ -0,0 +1,133 @@ +/* +Copyright 2023 The CloudEvents Authors +SPDX-License-Identifier: Apache-2.0 +*/ + +package mqtt_paho + +import ( + "bytes" + "context" + "io" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/types" + "github.com/eclipse/paho.golang/paho" +) + +// WritePubMessage fills the provided pubMessage with the message m. +// Using context you can tweak the encoding processing (more details on binding.Write documentation). +func WritePubMessage(ctx context.Context, m binding.Message, pubMessage *paho.Publish, transformers ...binding.Transformer) error { + structuredWriter := (*pubMessageWriter)(pubMessage) + binaryWriter := (*pubMessageWriter)(pubMessage) + + _, err := binding.Write( + ctx, + m, + structuredWriter, + binaryWriter, + transformers..., + ) + return err +} + +type pubMessageWriter paho.Publish + +var ( + _ binding.StructuredWriter = (*pubMessageWriter)(nil) + _ binding.BinaryWriter = (*pubMessageWriter)(nil) +) + +func (b *pubMessageWriter) SetStructuredEvent(ctx context.Context, f format.Format, event io.Reader) error { + if b.Properties == nil { + b.Properties = &paho.PublishProperties{ + User: make([]paho.UserProperty, 0), + } + } + b.Properties.User.Add(contentType, f.MediaType()) + var buf bytes.Buffer + _, err := io.Copy(&buf, event) + if err != nil { + return err + } + b.Payload = buf.Bytes() + return nil +} + +func (b *pubMessageWriter) Start(ctx context.Context) error { + if b.Properties == nil { + b.Properties = &paho.PublishProperties{} + } + // the UserProperties of publish message is used to load event extensions + b.Properties.User = make([]paho.UserProperty, 0) + return nil +} + +func (b *pubMessageWriter) End(ctx context.Context) error { + return nil +} + +func (b *pubMessageWriter) SetData(reader io.Reader) error { + buf, ok := reader.(*bytes.Buffer) + if !ok { + buf = new(bytes.Buffer) + _, err := io.Copy(buf, reader) + if err != nil { + return err + } + } + b.Payload = buf.Bytes() + return nil +} + +func (b *pubMessageWriter) SetAttribute(attribute spec.Attribute, value interface{}) error { + if attribute.Kind() == spec.DataContentType { + if value == nil { + b.removeProperty(contentType) + } + s, err := types.Format(value) + if err != nil { + return err + } + if err := b.addProperty(contentType, s); err != nil { + return err + } + } else { + if value == nil { + b.removeProperty(prefix + attribute.Name()) + } + return b.addProperty(prefix+attribute.Name(), value) + } + return nil +} + +func (b *pubMessageWriter) SetExtension(name string, value interface{}) error { + if value == nil { + b.removeProperty(prefix + name) + } + return b.addProperty(prefix+name, value) +} + +func (b *pubMessageWriter) removeProperty(key string) { + for i, v := range b.Properties.User { + if v.Key == key { + b.Properties.User = append(b.Properties.User[:i], b.Properties.User[i+1:]...) + break + } + } +} + +func (b *pubMessageWriter) addProperty(key string, value interface{}) error { + s, err := types.Format(value) + if err != nil { + return err + } + + b.Properties.User = append(b.Properties.User, paho.UserProperty{ + Key: key, + Value: s, + }) + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/LICENSE b/vendor/github.com/cloudevents/sdk-go/v2/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/alias.go b/vendor/github.com/cloudevents/sdk-go/v2/alias.go new file mode 100644 index 000000000..2fbfaa9a7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/alias.go @@ -0,0 +1,187 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +// Package v2 reexports a subset of the SDK v2 API. +package v2 + +// Package cloudevents alias' common functions and types to improve discoverability and reduce +// the number of imports for simple HTTP clients. + +import ( + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/client" + "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" + "github.com/cloudevents/sdk-go/v2/protocol/http" + "github.com/cloudevents/sdk-go/v2/types" +) + +// Client + +type ClientOption = client.Option +type Client = client.Client + +// Event + +type Event = event.Event +type Result = protocol.Result + +// Context + +type EventContext = event.EventContext +type EventContextV1 = event.EventContextV1 +type EventContextV03 = event.EventContextV03 + +// Custom Types + +type Timestamp = types.Timestamp +type URIRef = types.URIRef + +// HTTP Protocol + +type HTTPOption = http.Option + +type HTTPProtocol = http.Protocol + +// Encoding + +type Encoding = binding.Encoding + +// Message + +type Message = binding.Message + +const ( + // ReadEncoding + + ApplicationXML = event.ApplicationXML + ApplicationJSON = event.ApplicationJSON + TextPlain = event.TextPlain + ApplicationCloudEventsJSON = event.ApplicationCloudEventsJSON + ApplicationCloudEventsBatchJSON = event.ApplicationCloudEventsBatchJSON + Base64 = event.Base64 + + // Event Versions + + VersionV1 = event.CloudEventsVersionV1 + VersionV03 = event.CloudEventsVersionV03 + + // Encoding + + EncodingBinary = binding.EncodingBinary + EncodingStructured = binding.EncodingStructured +) + +var ( + + // ContentType Helpers + + StringOfApplicationJSON = event.StringOfApplicationJSON + StringOfApplicationXML = event.StringOfApplicationXML + StringOfTextPlain = event.StringOfTextPlain + StringOfApplicationCloudEventsJSON = event.StringOfApplicationCloudEventsJSON + StringOfApplicationCloudEventsBatchJSON = event.StringOfApplicationCloudEventsBatchJSON + StringOfBase64 = event.StringOfBase64 + + // Client Creation + + NewClient = client.New + NewClientHTTP = client.NewHTTP + // Deprecated: please use New with the observability options. + NewClientObserved = client.NewObserved + // Deprecated: Please use NewClientHTTP with the observability options. + NewDefaultClient = client.NewDefault + NewHTTPReceiveHandler = client.NewHTTPReceiveHandler + + // Client Options + + WithEventDefaulter = client.WithEventDefaulter + WithUUIDs = client.WithUUIDs + WithTimeNow = client.WithTimeNow + // Deprecated: this is now noop and will be removed in future releases. + WithTracePropagation = client.WithTracePropagation() + + // Event Creation + + NewEvent = event.New + + // Results + + NewResult = protocol.NewResult + ResultIs = protocol.ResultIs + ResultAs = protocol.ResultAs + + // Receipt helpers + + NewReceipt = protocol.NewReceipt + + ResultACK = protocol.ResultACK + ResultNACK = protocol.ResultNACK + + IsACK = protocol.IsACK + IsNACK = protocol.IsNACK + IsUndelivered = protocol.IsUndelivered + + // HTTP Results + + NewHTTPResult = http.NewResult + NewHTTPRetriesResult = http.NewRetriesResult + + // Message Creation + + ToMessage = binding.ToMessage + + // Event Creation + + NewEventFromHTTPRequest = http.NewEventFromHTTPRequest + NewEventFromHTTPResponse = http.NewEventFromHTTPResponse + NewEventsFromHTTPRequest = http.NewEventsFromHTTPRequest + NewEventsFromHTTPResponse = http.NewEventsFromHTTPResponse + NewHTTPRequestFromEvent = http.NewHTTPRequestFromEvent + NewHTTPRequestFromEvents = http.NewHTTPRequestFromEvents + IsHTTPBatch = http.IsHTTPBatch + + // HTTP Messages + + WriteHTTPRequest = http.WriteRequest + + // Context + + ContextWithTarget = context.WithTarget + TargetFromContext = context.TargetFrom + ContextWithRetriesConstantBackoff = context.WithRetriesConstantBackoff + ContextWithRetriesLinearBackoff = context.WithRetriesLinearBackoff + ContextWithRetriesExponentialBackoff = context.WithRetriesExponentialBackoff + + WithEncodingBinary = binding.WithForceBinary + WithEncodingStructured = binding.WithForceStructured + + // Custom Types + + ParseTimestamp = types.ParseTimestamp + ParseURIRef = types.ParseURIRef + ParseURI = types.ParseURI + + // HTTP Protocol + + NewHTTP = http.New + + // HTTP Protocol Options + + WithTarget = http.WithTarget + WithHeader = http.WithHeader + WithShutdownTimeout = http.WithShutdownTimeout + //WithEncoding = http.WithEncoding + //WithStructuredEncoding = http.WithStructuredEncoding // TODO: expose new way + WithPort = http.WithPort + WithPath = http.WithPath + WithMiddleware = http.WithMiddleware + WithListener = http.WithListener + WithRoundTripper = http.WithRoundTripper + WithGetHandlerFunc = http.WithGetHandlerFunc + WithOptionsHandlerFunc = http.WithOptionsHandlerFunc + WithDefaultOptionsHandlerFunc = http.WithDefaultOptionsHandlerFunc +) diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go new file mode 100644 index 000000000..97f2c4dd7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go @@ -0,0 +1,52 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "context" + "io" + + "github.com/cloudevents/sdk-go/v2/binding/spec" +) + +// MessageMetadataWriter is used to set metadata when a binary Message is visited. +type MessageMetadataWriter interface { + // Set a standard attribute. + // + // The value can either be the correct golang type for the attribute, or a canonical + // string encoding, or nil. If value is nil, then the attribute should be deleted. + // See package types to perform the needed conversions. + SetAttribute(attribute spec.Attribute, value interface{}) error + + // Set an extension attribute. + // + // The value can either be the correct golang type for the attribute, or a canonical + // string encoding, or nil. If value is nil, then the extension should be deleted. + // See package types to perform the needed conversions. + SetExtension(name string, value interface{}) error +} + +// BinaryWriter is used to visit a binary Message and generate a new representation. +// +// Protocols that supports binary encoding should implement this interface to implement direct +// binary to binary encoding and event to binary encoding. +// +// Start() and End() methods must be invoked by the caller of Message.ReadBinary() every time +// the BinaryWriter implementation is used to visit a Message. +type BinaryWriter interface { + MessageMetadataWriter + + // Method invoked at the beginning of the visit. Useful to perform initial memory allocations + Start(ctx context.Context) error + + // SetData receives an io.Reader for the data attribute. + // io.Reader is not invoked when the data attribute is empty + SetData(data io.Reader) error + + // End method is invoked only after the whole encoding process ends successfully. + // If it fails, it's never invoked. It can be used to finalize the message. + End(ctx context.Context) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go new file mode 100644 index 000000000..8fa999789 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go @@ -0,0 +1,68 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* + +Package binding defines interfaces for protocol bindings. + +NOTE: Most applications that emit or consume events should use the ../client +package, which provides a simpler API to the underlying binding. + +The interfaces in this package provide extra encoding and protocol information +to allow efficient forwarding and end-to-end reliable delivery between a +Receiver and a Sender belonging to different bindings. This is useful for +intermediary applications that route or forward events, but not necessary for +most "endpoint" applications that emit or consume events. + +Protocol Bindings + +A protocol binding usually implements a Message, a Sender and Receiver, a StructuredWriter and a BinaryWriter (depending on the supported encodings of the protocol) and an Write[ProtocolMessage] method. + +Read and write events + +The core of this package is the binding.Message interface. +Through binding.MessageReader It defines how to read a protocol specific message for an +encoded event in structured mode or binary mode. +The entity who receives a protocol specific data structure representing a message +(e.g. an HttpRequest) encapsulates it in a binding.Message implementation using a NewMessage method (e.g. http.NewMessage). +Then the entity that wants to send the binding.Message back on the wire, +translates it back to the protocol specific data structure (e.g. a Kafka ConsumerMessage), using +the writers BinaryWriter and StructuredWriter specific to that protocol. +Binding implementations exposes their writers +through a specific Write[ProtocolMessage] function (e.g. kafka.EncodeProducerMessage), +in order to simplify the encoding process. + +The encoding process can be customized in order to mutate the final result with binding.TransformerFactory. +A bunch of these are provided directly by the binding/transformer module. + +Usually binding.Message implementations can be encoded only one time, because the encoding process drain the message itself. +In order to consume a message several times, the binding/buffering package provides several APIs to buffer the Message. + +A message can be converted to an event.Event using binding.ToEvent() method. +An event.Event can be used as Message casting it to binding.EventMessage. + +In order to simplify the encoding process for each protocol, this package provide several utility methods like binding.Write and binding.DirectWrite. +The binding.Write method tries to preserve the structured/binary encoding, in order to be as much efficient as possible. + +Messages can be eventually wrapped to change their behaviours and binding their lifecycle, like the binding.FinishMessage. +Every Message wrapper implements the MessageWrapper interface + +Sender and Receiver + +A Receiver receives protocol specific messages and wraps them to into binding.Message implementations. + +A Sender converts arbitrary Message implementations to a protocol-specific form using the protocol specific Write method +and sends them. + +Message and ExactlyOnceMessage provide methods to allow acknowledgments to +propagate when a reliable messages is forwarded from a Receiver to a Sender. +QoS 0 (unreliable), 1 (at-least-once) and 2 (exactly-once) are supported. + +Transport + +A binding implementation providing Sender and Receiver implementations can be used as a Transport through the BindingTransport adapter. + +*/ +package binding diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go new file mode 100644 index 000000000..5070b7295 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go @@ -0,0 +1,50 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import "errors" + +// Encoding enum specifies the type of encodings supported by binding interfaces +type Encoding int + +const ( + // Binary encoding as specified in https://github.com/cloudevents/spec/blob/master/spec.md#message + EncodingBinary Encoding = iota + // Structured encoding as specified in https://github.com/cloudevents/spec/blob/master/spec.md#message + EncodingStructured + // Message is an instance of EventMessage or it contains EventMessage nested (through MessageWrapper) + EncodingEvent + // When the encoding is unknown (which means that the message is a non-event) + EncodingUnknown + + // EncodingBatch is an instance of JSON Batched Events + EncodingBatch +) + +func (e Encoding) String() string { + switch e { + case EncodingBinary: + return "binary" + case EncodingStructured: + return "structured" + case EncodingEvent: + return "event" + case EncodingBatch: + return "batch" + case EncodingUnknown: + return "unknown" + } + return "" +} + +// ErrUnknownEncoding specifies that the Message is not an event or it is encoded with an unknown encoding +var ErrUnknownEncoding = errors.New("unknown Message encoding") + +// ErrNotStructured returned by Message.Structured for non-structured messages. +var ErrNotStructured = errors.New("message is not in structured mode") + +// ErrNotBinary returned by Message.Binary for non-binary messages. +var ErrNotBinary = errors.New("message is not in binary mode") diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go new file mode 100644 index 000000000..f82c729c4 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go @@ -0,0 +1,108 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "bytes" + "context" + + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/event" +) + +type eventFormatKey int + +const ( + formatEventStructured eventFormatKey = iota +) + +// EventMessage type-converts a event.Event object to implement Message. +// This allows local event.Event objects to be sent directly via Sender.Send() +// s.Send(ctx, binding.EventMessage(e)) +// When an event is wrapped into a EventMessage, the original event could be +// potentially mutated. If you need to use the Event again, after wrapping it into +// an Event message, you should copy it before +type EventMessage event.Event + +func ToMessage(e *event.Event) Message { + return (*EventMessage)(e) +} + +func (m *EventMessage) ReadEncoding() Encoding { + return EncodingEvent +} + +func (m *EventMessage) ReadStructured(ctx context.Context, builder StructuredWriter) error { + f := GetOrDefaultFromCtx(ctx, formatEventStructured, format.JSON).(format.Format) + b, err := f.Marshal((*event.Event)(m)) + if err != nil { + return err + } + return builder.SetStructuredEvent(ctx, f, bytes.NewReader(b)) +} + +func (m *EventMessage) ReadBinary(ctx context.Context, b BinaryWriter) (err error) { + err = eventContextToBinaryWriter(m.Context, b) + if err != nil { + return err + } + // Pass the body + body := (*event.Event)(m).Data() + if len(body) > 0 { + err = b.SetData(bytes.NewBuffer(body)) + if err != nil { + return err + } + } + return nil +} + +func (m *EventMessage) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) { + sv := spec.VS.Version(m.Context.GetSpecVersion()) + a := sv.AttributeFromKind(k) + if a != nil { + return a, a.Get(m.Context) + } + return nil, nil +} + +func (m *EventMessage) GetExtension(name string) interface{} { + ext, _ := m.Context.GetExtension(name) + return ext +} + +func eventContextToBinaryWriter(c event.EventContext, b BinaryWriter) (err error) { + // Pass all attributes + sv := spec.VS.Version(c.GetSpecVersion()) + for _, a := range sv.Attributes() { + value := a.Get(c) + if value != nil { + err = b.SetAttribute(a, value) + } + if err != nil { + return err + } + } + // Pass all extensions + for k, v := range c.GetExtensions() { + err = b.SetExtension(k, v) + if err != nil { + return err + } + } + return nil +} + +func (*EventMessage) Finish(error) error { return nil } + +var _ Message = (*EventMessage)(nil) // Test it conforms to the interface +var _ MessageMetadataReader = (*EventMessage)(nil) // Test it conforms to the interface + +// UseFormatForEvent configures which format to use when marshalling the event to structured mode +func UseFormatForEvent(ctx context.Context, f format.Format) context.Context { + return context.WithValue(ctx, formatEventStructured, f) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go new file mode 100644 index 000000000..8b51c4c61 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go @@ -0,0 +1,42 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import "github.com/cloudevents/sdk-go/v2/binding/spec" + +type finishMessage struct { + Message + finish func(error) +} + +func (m *finishMessage) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) { + return m.Message.(MessageMetadataReader).GetAttribute(k) +} + +func (m *finishMessage) GetExtension(s string) interface{} { + return m.Message.(MessageMetadataReader).GetExtension(s) +} + +func (m *finishMessage) GetWrappedMessage() Message { + return m.Message +} + +func (m *finishMessage) Finish(err error) error { + err2 := m.Message.Finish(err) // Finish original message first + if m.finish != nil { + m.finish(err) // Notify callback + } + return err2 +} + +var _ MessageWrapper = (*finishMessage)(nil) + +// WithFinish returns a wrapper for m that calls finish() and +// m.Finish() in its Finish(). +// Allows code to be notified when a message is Finished. +func WithFinish(m Message, finish func(error)) Message { + return &finishMessage{Message: m, finish: finish} +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go new file mode 100644 index 000000000..54c3f1a8c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go @@ -0,0 +1,12 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package format formats structured events. + +The "application/cloudevents+json" format is built-in and always +available. Other formats may be added. +*/ +package format diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go new file mode 100644 index 000000000..6bdd1842b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go @@ -0,0 +1,105 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package format + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + + "github.com/cloudevents/sdk-go/v2/event" +) + +// Format marshals and unmarshals structured events to bytes. +type Format interface { + // MediaType identifies the format + MediaType() string + // Marshal event to bytes + Marshal(*event.Event) ([]byte, error) + // Unmarshal bytes to event + Unmarshal([]byte, *event.Event) error +} + +// Prefix for event-format media types. +const Prefix = "application/cloudevents" + +// IsFormat returns true if mediaType begins with "application/cloudevents" +func IsFormat(mediaType string) bool { return strings.HasPrefix(mediaType, Prefix) } + +// JSON is the built-in "application/cloudevents+json" format. +var JSON = jsonFmt{} + +type jsonFmt struct{} + +func (jsonFmt) MediaType() string { return event.ApplicationCloudEventsJSON } + +func (jsonFmt) Marshal(e *event.Event) ([]byte, error) { return json.Marshal(e) } +func (jsonFmt) Unmarshal(b []byte, e *event.Event) error { + return json.Unmarshal(b, e) +} + +// JSONBatch is the built-in "application/cloudevents-batch+json" format. +var JSONBatch = jsonBatchFmt{} + +type jsonBatchFmt struct{} + +func (jb jsonBatchFmt) MediaType() string { + return event.ApplicationCloudEventsBatchJSON +} + +// Marshal will return an error for jsonBatchFmt since the Format interface doesn't support batch Marshalling, and we +// know it's structured batch json, we'll go direct to the json.UnMarshall() (see `ToEvents()`) since that is the best +// way to support batch operations for now. +func (jb jsonBatchFmt) Marshal(e *event.Event) ([]byte, error) { + return nil, errors.New("not supported for batch events") +} + +func (jb jsonBatchFmt) Unmarshal(b []byte, e *event.Event) error { + return errors.New("not supported for batch events") +} + +// built-in formats +var formats map[string]Format + +func init() { + formats = map[string]Format{} + Add(JSON) + Add(JSONBatch) +} + +// Lookup returns the format for contentType, or nil if not found. +func Lookup(contentType string) Format { + i := strings.IndexRune(contentType, ';') + if i == -1 { + i = len(contentType) + } + contentType = strings.TrimSpace(strings.ToLower(contentType[0:i])) + return formats[contentType] +} + +func unknown(mediaType string) error { + return fmt.Errorf("unknown event format media-type %#v", mediaType) +} + +// Add a new Format. It can be retrieved by Lookup(f.MediaType()) +func Add(f Format) { formats[f.MediaType()] = f } + +// Marshal an event to bytes using the mediaType event format. +func Marshal(mediaType string, e *event.Event) ([]byte, error) { + if f := formats[mediaType]; f != nil { + return f.Marshal(e) + } + return nil, unknown(mediaType) +} + +// Unmarshal bytes to an event using the mediaType event format. +func Unmarshal(mediaType string, b []byte, e *event.Event) error { + if f := formats[mediaType]; f != nil { + return f.Unmarshal(b, e) + } + return unknown(mediaType) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go new file mode 100644 index 000000000..e30e150c0 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go @@ -0,0 +1,153 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/binding/spec" +) + +// MessageReader defines the read-related portion of the Message interface. +// +// The ReadStructured and ReadBinary methods allows to perform an optimized encoding of a Message to a specific data structure. +// +// If MessageReader.ReadEncoding() can be equal to EncodingBinary, then the implementation of MessageReader +// MUST also implement MessageMetadataReader. +// +// A Sender should try each method of interest and fall back to binding.ToEvent() if none are supported. +// An out of the box algorithm is provided for writing a message: binding.Write(). +type MessageReader interface { + // Return the type of the message Encoding. + // The encoding should be preferably computed when the message is constructed. + ReadEncoding() Encoding + + // ReadStructured transfers a structured-mode event to a StructuredWriter. + // It must return ErrNotStructured if message is not in structured mode. + // + // Returns a different err if something wrong happened while trying to read the structured event. + // In this case, the caller must Finish the message with appropriate error. + // + // This allows Senders to avoid re-encoding messages that are + // already in suitable structured form. + ReadStructured(context.Context, StructuredWriter) error + + // ReadBinary transfers a binary-mode event to an BinaryWriter. + // It must return ErrNotBinary if message is not in binary mode. + // + // The implementation of ReadBinary must not control the lifecycle with BinaryWriter.Start() and BinaryWriter.End(), + // because the caller must control the lifecycle. + // + // Returns a different err if something wrong happened while trying to read the binary event + // In this case, the caller must Finish the message with appropriate error + // + // This allows Senders to avoid re-encoding messages that are + // already in suitable binary form. + ReadBinary(context.Context, BinaryWriter) error +} + +// MessageMetadataReader defines how to read metadata from a binary/event message +// +// If a message implementing MessageReader is encoded as binary (MessageReader.ReadEncoding() == EncodingBinary) +// or it's an EventMessage, then it's safe to assume that it also implements this interface +type MessageMetadataReader interface { + // GetAttribute returns: + // + // * attribute, value: if the message contains an attribute of that attribute kind + // * attribute, nil: if the message spec version supports the attribute kind, but doesn't have any value + // * nil, nil: if the message spec version doesn't support the attribute kind + GetAttribute(attributeKind spec.Kind) (spec.Attribute, interface{}) + // GetExtension returns the value of that extension, if any. + GetExtension(name string) interface{} +} + +// Message is the interface to a binding-specific message containing an event. +// +// Reliable Delivery +// +// There are 3 reliable qualities of service for messages: +// +// 0/at-most-once/unreliable: messages can be dropped silently. +// +// 1/at-least-once: messages are not dropped without signaling an error +// to the sender, but they may be duplicated in the event of a re-send. +// +// 2/exactly-once: messages are never dropped (without error) or +// duplicated, as long as both sending and receiving ends maintain +// some binding-specific delivery state. Whether this is persisted +// depends on the configuration of the binding implementations. +// +// The Message interface supports QoS 0 and 1, the ExactlyOnceMessage interface +// supports QoS 2 +// +// Message includes the MessageReader interface to read messages. Every binding.Message implementation *must* specify if the message can be accessed one or more times. +// +// When a Message can be forgotten by the entity who produced the message, Message.Finish() *must* be invoked. +type Message interface { + MessageReader + + // Finish *must* be called when message from a Receiver can be forgotten by + // the receiver. A QoS 1 sender should not call Finish() until it gets an acknowledgment of + // receipt on the underlying transport. For QoS 2 see ExactlyOnceMessage. + // + // Note that, depending on the Message implementation, forgetting to Finish the message + // could produce memory/resources leaks! + // + // Passing a non-nil err indicates sending or processing failed. + // A non-nil return indicates that the message was not accepted + // by the receivers peer. + Finish(error) error +} + +// ExactlyOnceMessage is implemented by received Messages +// that support QoS 2. Only transports that support QoS 2 need to +// implement or use this interface. +type ExactlyOnceMessage interface { + Message + + // Received is called by a forwarding QoS2 Sender when it gets + // acknowledgment of receipt (e.g. AMQP 'accept' or MQTT PUBREC) + // + // The receiver must call settle(nil) when it get's the ack-of-ack + // (e.g. AMQP 'settle' or MQTT PUBCOMP) or settle(err) if the + // transfer fails. + // + // Finally the Sender calls Finish() to indicate the message can be + // discarded. + // + // If sending fails, or if the sender does not support QoS 2, then + // Finish() may be called without any call to Received() + Received(settle func(error)) +} + +// MessageContext interface exposes the internal context that a message might contain +// Only some Message implementations implement this interface. +type MessageContext interface { + // Get the context associated with this message + Context() context.Context +} + +// MessageWrapper interface is used to walk through a decorated Message and unwrap it. +type MessageWrapper interface { + Message + MessageMetadataReader + + // Method to get the wrapped message + GetWrappedMessage() Message +} + +func UnwrapMessage(message Message) Message { + m := message + for m != nil { + switch mt := m.(type) { + case MessageWrapper: + m = mt.GetWrappedMessage() + default: + return m + } + } + return m +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go new file mode 100644 index 000000000..3c3021d46 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go @@ -0,0 +1,141 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package spec + +import ( + "fmt" + "time" + + "github.com/cloudevents/sdk-go/v2/event" + + "github.com/cloudevents/sdk-go/v2/types" +) + +// Kind is a version-independent identifier for a CloudEvent context attribute. +type Kind uint8 + +const ( + // Required cloudevents attributes + ID Kind = iota + Source + SpecVersion + Type + // Optional cloudevents attributes + DataContentType + DataSchema + Subject + Time +) +const nAttrs = int(Time) + 1 + +var kindNames = [nAttrs]string{ + "id", + "source", + "specversion", + "type", + "datacontenttype", + "dataschema", + "subject", + "time", +} + +// String is a human-readable string, for a valid attribute name use Attribute.Name +func (k Kind) String() string { return kindNames[k] } + +// IsRequired returns true for attributes defined as "required" by the CE spec. +func (k Kind) IsRequired() bool { return k < DataContentType } + +// Attribute is a named attribute accessor. +// The attribute name is specific to a Version. +type Attribute interface { + Kind() Kind + // Name of the attribute with respect to the current spec Version() with prefix + PrefixedName() string + // Name of the attribute with respect to the current spec Version() + Name() string + // Version of the spec that this attribute belongs to + Version() Version + // Get the value of this attribute from an event context + Get(event.EventContextReader) interface{} + // Set the value of this attribute on an event context + Set(event.EventContextWriter, interface{}) error + // Delete this attribute from and event context, when possible + Delete(event.EventContextWriter) error +} + +// accessor provides Kind, Get, Set. +type accessor interface { + Kind() Kind + Get(event.EventContextReader) interface{} + Set(event.EventContextWriter, interface{}) error + Delete(event.EventContextWriter) error +} + +var acc = [nAttrs]accessor{ + &aStr{aKind(ID), event.EventContextReader.GetID, event.EventContextWriter.SetID}, + &aStr{aKind(Source), event.EventContextReader.GetSource, event.EventContextWriter.SetSource}, + &aStr{aKind(SpecVersion), event.EventContextReader.GetSpecVersion, func(writer event.EventContextWriter, s string) error { return nil }}, + &aStr{aKind(Type), event.EventContextReader.GetType, event.EventContextWriter.SetType}, + &aStr{aKind(DataContentType), event.EventContextReader.GetDataContentType, event.EventContextWriter.SetDataContentType}, + &aStr{aKind(DataSchema), event.EventContextReader.GetDataSchema, event.EventContextWriter.SetDataSchema}, + &aStr{aKind(Subject), event.EventContextReader.GetSubject, event.EventContextWriter.SetSubject}, + &aTime{aKind(Time), event.EventContextReader.GetTime, event.EventContextWriter.SetTime}, +} + +// aKind implements Kind() +type aKind Kind + +func (kind aKind) Kind() Kind { return Kind(kind) } + +type aStr struct { + aKind + get func(event.EventContextReader) string + set func(event.EventContextWriter, string) error +} + +func (a *aStr) Get(c event.EventContextReader) interface{} { + if s := a.get(c); s != "" { + return s + } + return nil // Treat blank as missing +} + +func (a *aStr) Set(c event.EventContextWriter, v interface{}) error { + s, err := types.ToString(v) + if err != nil { + return fmt.Errorf("invalid value for %s: %#v", a.Kind(), v) + } + return a.set(c, s) +} + +func (a *aStr) Delete(c event.EventContextWriter) error { + return a.set(c, "") +} + +type aTime struct { + aKind + get func(event.EventContextReader) time.Time + set func(event.EventContextWriter, time.Time) error +} + +func (a *aTime) Get(c event.EventContextReader) interface{} { + if v := a.get(c); !v.IsZero() { + return v + } + return nil // Treat zero time as missing. +} + +func (a *aTime) Set(c event.EventContextWriter, v interface{}) error { + t, err := types.ToTime(v) + if err != nil { + return fmt.Errorf("invalid value for %s: %#v", a.Kind(), v) + } + return a.set(c, t) +} + +func (a *aTime) Delete(c event.EventContextWriter) error { + return a.set(c, time.Time{}) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go new file mode 100644 index 000000000..44c0b3145 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go @@ -0,0 +1,13 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package spec provides spec-version metadata. + +For use by code that maps events using (prefixed) attribute name strings. +Supports handling multiple spec versions uniformly. + +*/ +package spec diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go new file mode 100644 index 000000000..110787ddc --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go @@ -0,0 +1,81 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package spec + +import ( + "github.com/cloudevents/sdk-go/v2/event" +) + +type matchExactVersion struct { + version +} + +func (v *matchExactVersion) Attribute(name string) Attribute { return v.attrMap[name] } + +var _ Version = (*matchExactVersion)(nil) + +func newMatchExactVersionVersion( + prefix string, + attributeNameMatchMapper func(string) string, + context event.EventContext, + convert func(event.EventContextConverter) event.EventContext, + attrs ...*attribute, +) *matchExactVersion { + v := &matchExactVersion{ + version: version{ + prefix: prefix, + context: context, + convert: convert, + attrMap: map[string]Attribute{}, + attrs: make([]Attribute, len(attrs)), + }, + } + for i, a := range attrs { + a.version = v + v.attrs[i] = a + v.attrMap[attributeNameMatchMapper(a.name)] = a + } + return v +} + +// WithPrefixMatchExact returns a set of versions with prefix added to all attribute names. +func WithPrefixMatchExact(attributeNameMatchMapper func(string) string, prefix string) *Versions { + attr := func(name string, kind Kind) *attribute { + return &attribute{accessor: acc[kind], name: name} + } + vs := &Versions{ + m: map[string]Version{}, + prefix: prefix, + all: []Version{ + newMatchExactVersionVersion(prefix, attributeNameMatchMapper, event.EventContextV1{}.AsV1(), + func(c event.EventContextConverter) event.EventContext { return c.AsV1() }, + attr("id", ID), + attr("source", Source), + attr("specversion", SpecVersion), + attr("type", Type), + attr("datacontenttype", DataContentType), + attr("dataschema", DataSchema), + attr("subject", Subject), + attr("time", Time), + ), + newMatchExactVersionVersion(prefix, attributeNameMatchMapper, event.EventContextV03{}.AsV03(), + func(c event.EventContextConverter) event.EventContext { return c.AsV03() }, + attr("specversion", SpecVersion), + attr("type", Type), + attr("source", Source), + attr("schemaurl", DataSchema), + attr("subject", Subject), + attr("id", ID), + attr("time", Time), + attr("datacontenttype", DataContentType), + ), + }, + } + for _, v := range vs.all { + vs.m[v.String()] = v + } + return vs +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go new file mode 100644 index 000000000..7fa0f5840 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go @@ -0,0 +1,189 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package spec + +import ( + "strings" + + "github.com/cloudevents/sdk-go/v2/event" +) + +// Version provides meta-data for a single spec-version. +type Version interface { + // String name of the version, e.g. "1.0" + String() string + // Prefix for attribute names. + Prefix() string + // Attribute looks up a prefixed attribute name (case insensitive). + // Returns nil if not found. + Attribute(prefixedName string) Attribute + // Attribute looks up the attribute from kind. + // Returns nil if not found. + AttributeFromKind(kind Kind) Attribute + // Attributes returns all the context attributes for this version. + Attributes() []Attribute + // Convert translates a context to this version. + Convert(event.EventContextConverter) event.EventContext + // NewContext returns a new context for this version. + NewContext() event.EventContext + // SetAttribute sets named attribute to value. + // + // Name is case insensitive. + // Does nothing if name does not start with prefix. + SetAttribute(context event.EventContextWriter, name string, value interface{}) error +} + +// Versions contains all known versions with the same attribute prefix. +type Versions struct { + prefix string + all []Version + m map[string]Version +} + +// Versions returns the list of all known versions, most recent first. +func (vs *Versions) Versions() []Version { return vs.all } + +// Version returns the named version. +func (vs *Versions) Version(name string) Version { + return vs.m[name] +} + +// Latest returns the latest Version +func (vs *Versions) Latest() Version { return vs.all[0] } + +// PrefixedSpecVersionName returns the specversion attribute PrefixedName +func (vs *Versions) PrefixedSpecVersionName() string { return vs.prefix + "specversion" } + +// Prefix is the lowercase attribute name prefix. +func (vs *Versions) Prefix() string { return vs.prefix } + +type attribute struct { + accessor + name string + version Version +} + +func (a *attribute) PrefixedName() string { return a.version.Prefix() + a.name } +func (a *attribute) Name() string { return a.name } +func (a *attribute) Version() Version { return a.version } + +type version struct { + prefix string + context event.EventContext + convert func(event.EventContextConverter) event.EventContext + attrMap map[string]Attribute + attrs []Attribute +} + +func (v *version) Attribute(name string) Attribute { return v.attrMap[strings.ToLower(name)] } +func (v *version) Attributes() []Attribute { return v.attrs } +func (v *version) String() string { return v.context.GetSpecVersion() } +func (v *version) Prefix() string { return v.prefix } +func (v *version) NewContext() event.EventContext { return v.context.Clone() } + +// HasPrefix is a case-insensitive prefix check. +func (v *version) HasPrefix(name string) bool { + return strings.HasPrefix(strings.ToLower(name), v.prefix) +} + +func (v *version) Convert(c event.EventContextConverter) event.EventContext { return v.convert(c) } + +func (v *version) SetAttribute(c event.EventContextWriter, name string, value interface{}) error { + if a := v.Attribute(name); a != nil { // Standard attribute + return a.Set(c, value) + } + name = strings.ToLower(name) + var err error + if v.HasPrefix(name) { // Extension attribute + return c.SetExtension(strings.TrimPrefix(name, v.prefix), value) + } + return err +} + +func (v *version) AttributeFromKind(kind Kind) Attribute { + for _, a := range v.Attributes() { + if a.Kind() == kind { + return a + } + } + return nil +} + +func newVersion( + prefix string, + context event.EventContext, + convert func(event.EventContextConverter) event.EventContext, + attrs ...*attribute, +) *version { + v := &version{ + prefix: strings.ToLower(prefix), + context: context, + convert: convert, + attrMap: map[string]Attribute{}, + attrs: make([]Attribute, len(attrs)), + } + for i, a := range attrs { + a.version = v + v.attrs[i] = a + v.attrMap[strings.ToLower(a.PrefixedName())] = a + } + return v +} + +// WithPrefix returns a set of versions with prefix added to all attribute names. +func WithPrefix(prefix string) *Versions { + attr := func(name string, kind Kind) *attribute { + return &attribute{accessor: acc[kind], name: name} + } + vs := &Versions{ + m: map[string]Version{}, + prefix: prefix, + all: []Version{ + newVersion(prefix, event.EventContextV1{}.AsV1(), + func(c event.EventContextConverter) event.EventContext { return c.AsV1() }, + attr("id", ID), + attr("source", Source), + attr("specversion", SpecVersion), + attr("type", Type), + attr("datacontenttype", DataContentType), + attr("dataschema", DataSchema), + attr("subject", Subject), + attr("time", Time), + ), + newVersion(prefix, event.EventContextV03{}.AsV03(), + func(c event.EventContextConverter) event.EventContext { return c.AsV03() }, + attr("specversion", SpecVersion), + attr("type", Type), + attr("source", Source), + attr("schemaurl", DataSchema), + attr("subject", Subject), + attr("id", ID), + attr("time", Time), + attr("datacontenttype", DataContentType), + ), + }, + } + for _, v := range vs.all { + vs.m[v.String()] = v + } + return vs +} + +// New returns a set of versions +func New() *Versions { return WithPrefix("") } + +// Built-in un-prefixed versions. +var ( + VS *Versions + V03 Version + V1 Version +) + +func init() { + VS = New() + V03 = VS.Version("0.3") + V1 = VS.Version("1.0") +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go new file mode 100644 index 000000000..60256f2b3 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go @@ -0,0 +1,22 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "context" + "io" + + "github.com/cloudevents/sdk-go/v2/binding/format" +) + +// StructuredWriter is used to visit a structured Message and generate a new representation. +// +// Protocols that supports structured encoding should implement this interface to implement direct +// structured to structured encoding and event to structured encoding. +type StructuredWriter interface { + // Event receives an io.Reader for the whole event. + SetStructuredEvent(ctx context.Context, format format.Format, event io.Reader) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go new file mode 100644 index 000000000..d3332c158 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go @@ -0,0 +1,153 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/types" +) + +// ErrCannotConvertToEvent is a generic error when a conversion of a Message to an Event fails +var ErrCannotConvertToEvent = errors.New("cannot convert message to event") + +// ErrCannotConvertToEvents is a generic error when a conversion of a Message to a Batched Event fails +var ErrCannotConvertToEvents = errors.New("cannot convert message to batched events") + +// ToEvent translates a Message with a valid Structured or Binary representation to an Event. +// This function returns the Event generated from the Message and the original encoding of the message or +// an error that points the conversion error. +// transformers can be nil and this function guarantees that they are invoked only once during the encoding process. +func ToEvent(ctx context.Context, message MessageReader, transformers ...Transformer) (*event.Event, error) { + if message == nil { + return nil, nil + } + + messageEncoding := message.ReadEncoding() + if messageEncoding == EncodingEvent { + m := message + for m != nil { + switch mt := m.(type) { + case *EventMessage: + e := (*event.Event)(mt) + return e, Transformers(transformers).Transform(mt, (*messageToEventBuilder)(e)) + case MessageWrapper: + m = mt.GetWrappedMessage() + default: + break + } + } + return nil, ErrCannotConvertToEvent + } + + e := event.New() + encoder := (*messageToEventBuilder)(&e) + _, err := DirectWrite( + context.Background(), + message, + encoder, + encoder, + ) + if err != nil { + return nil, err + } + return &e, Transformers(transformers).Transform((*EventMessage)(&e), encoder) +} + +// ToEvents translates a Batch Message and corresponding Reader data to a slice of Events. +// This function returns the Events generated from the body data, or an error that points +// to the conversion issue. +func ToEvents(ctx context.Context, message MessageReader, body io.Reader) ([]event.Event, error) { + messageEncoding := message.ReadEncoding() + if messageEncoding != EncodingBatch { + return nil, ErrCannotConvertToEvents + } + + // Since Format doesn't support batch Marshalling, and we know it's structured batch json, we'll go direct to the + // json.UnMarshall(), since that is the best way to support batch operations for now. + var events []event.Event + return events, json.NewDecoder(body).Decode(&events) +} + +type messageToEventBuilder event.Event + +var _ StructuredWriter = (*messageToEventBuilder)(nil) +var _ BinaryWriter = (*messageToEventBuilder)(nil) + +func (b *messageToEventBuilder) SetStructuredEvent(ctx context.Context, format format.Format, ev io.Reader) error { + var buf bytes.Buffer + _, err := io.Copy(&buf, ev) + if err != nil { + return err + } + return format.Unmarshal(buf.Bytes(), (*event.Event)(b)) +} + +func (b *messageToEventBuilder) Start(ctx context.Context) error { + return nil +} + +func (b *messageToEventBuilder) End(ctx context.Context) error { + return nil +} + +func (b *messageToEventBuilder) SetData(data io.Reader) error { + buf, ok := data.(*bytes.Buffer) + if !ok { + buf = new(bytes.Buffer) + _, err := io.Copy(buf, data) + if err != nil { + return err + } + } + if buf.Len() > 0 { + b.DataEncoded = buf.Bytes() + } + return nil +} + +func (b *messageToEventBuilder) SetAttribute(attribute spec.Attribute, value interface{}) error { + if value == nil { + _ = attribute.Delete(b.Context) + return nil + } + // If spec version we need to change to right context struct + if attribute.Kind() == spec.SpecVersion { + str, err := types.ToString(value) + if err != nil { + return err + } + switch str { + case event.CloudEventsVersionV03: + b.Context = b.Context.AsV03() + case event.CloudEventsVersionV1: + b.Context = b.Context.AsV1() + default: + return fmt.Errorf("unrecognized event version %s", str) + } + return nil + } + return attribute.Set(b.Context, value) +} + +func (b *messageToEventBuilder) SetExtension(name string, value interface{}) error { + if value == nil { + return b.Context.SetExtension(name, nil) + } + value, err := types.Validate(value) + if err != nil { + return err + } + return b.Context.SetExtension(name, value) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go new file mode 100644 index 000000000..de3bec44f --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go @@ -0,0 +1,42 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +// Transformer is an interface that implements a transformation +// process while transferring the event from the Message +// implementation to the provided encoder +// +// When a write function (binding.Write, binding.ToEvent, buffering.CopyMessage, etc.) +// takes Transformer(s) as parameter, it eventually converts the message to a form +// which correctly implements MessageMetadataReader, in order to guarantee that transformation +// is applied +type Transformer interface { + Transform(MessageMetadataReader, MessageMetadataWriter) error +} + +// TransformerFunc is a type alias to implement a Transformer through a function pointer +type TransformerFunc func(MessageMetadataReader, MessageMetadataWriter) error + +func (t TransformerFunc) Transform(r MessageMetadataReader, w MessageMetadataWriter) error { + return t(r, w) +} + +var _ Transformer = (TransformerFunc)(nil) + +// Transformers is a utility alias to run several Transformer +type Transformers []Transformer + +func (t Transformers) Transform(r MessageMetadataReader, w MessageMetadataWriter) error { + for _, transformer := range t { + err := transformer.Transform(r, w) + if err != nil { + return err + } + } + return nil +} + +var _ Transformer = (Transformers)(nil) diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go new file mode 100644 index 000000000..cb498e62d --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go @@ -0,0 +1,179 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package binding + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/event" +) + +type eventEncodingKey int + +const ( + skipDirectStructuredEncoding eventEncodingKey = iota + skipDirectBinaryEncoding + preferredEventEncoding +) + +// DirectWrite invokes the encoders. structuredWriter and binaryWriter could be nil if the protocol doesn't support it. +// transformers can be nil and this function guarantees that they are invoked only once during the encoding process. +// This function MUST be invoked only if message.ReadEncoding() == EncodingBinary or message.ReadEncoding() == EncodingStructured +// +// Returns: +// * EncodingStructured, nil if message is correctly encoded in structured encoding +// * EncodingBinary, nil if message is correctly encoded in binary encoding +// * EncodingStructured, err if message was structured but error happened during the encoding +// * EncodingBinary, err if message was binary but error happened during the encoding +// * EncodingUnknown, ErrUnknownEncoding if message is not a structured or a binary Message +func DirectWrite( + ctx context.Context, + message MessageReader, + structuredWriter StructuredWriter, + binaryWriter BinaryWriter, + transformers ...Transformer, +) (Encoding, error) { + if structuredWriter != nil && len(transformers) == 0 && !GetOrDefaultFromCtx(ctx, skipDirectStructuredEncoding, false).(bool) { + if err := message.ReadStructured(ctx, structuredWriter); err == nil { + return EncodingStructured, nil + } else if err != ErrNotStructured { + return EncodingStructured, err + } + } + + if binaryWriter != nil && !GetOrDefaultFromCtx(ctx, skipDirectBinaryEncoding, false).(bool) && message.ReadEncoding() == EncodingBinary { + return EncodingBinary, writeBinaryWithTransformer(ctx, message, binaryWriter, transformers) + } + + return EncodingUnknown, ErrUnknownEncoding +} + +// Write executes the full algorithm to encode a Message using transformers: +// 1. It first tries direct encoding using DirectWrite +// 2. If no direct encoding is possible, it uses ToEvent to generate an Event representation +// 3. From the Event, the message is encoded back to the provided structured or binary encoders +// You can tweak the encoding process using the context decorators WithForceStructured, WithForceStructured, etc. +// transformers can be nil and this function guarantees that they are invoked only once during the encoding process. +// Returns: +// * EncodingStructured, nil if message is correctly encoded in structured encoding +// * EncodingBinary, nil if message is correctly encoded in binary encoding +// * EncodingUnknown, ErrUnknownEncoding if message.ReadEncoding() == EncodingUnknown +// * _, err if error happened during the encoding +func Write( + ctx context.Context, + message MessageReader, + structuredWriter StructuredWriter, + binaryWriter BinaryWriter, + transformers ...Transformer, +) (Encoding, error) { + enc := message.ReadEncoding() + var err error + // Skip direct encoding if the event is an event message + if enc != EncodingEvent { + enc, err = DirectWrite(ctx, message, structuredWriter, binaryWriter, transformers...) + if enc != EncodingUnknown { + // Message directly encoded, nothing else to do here + return enc, err + } + } + + var e *event.Event + e, err = ToEvent(ctx, message, transformers...) + if err != nil { + return enc, err + } + + message = (*EventMessage)(e) + + if GetOrDefaultFromCtx(ctx, preferredEventEncoding, EncodingBinary).(Encoding) == EncodingStructured { + if structuredWriter != nil { + return EncodingStructured, message.ReadStructured(ctx, structuredWriter) + } + if binaryWriter != nil { + return EncodingBinary, writeBinary(ctx, message, binaryWriter) + } + } else { + if binaryWriter != nil { + return EncodingBinary, writeBinary(ctx, message, binaryWriter) + } + if structuredWriter != nil { + return EncodingStructured, message.ReadStructured(ctx, structuredWriter) + } + } + + return EncodingUnknown, ErrUnknownEncoding +} + +// WithSkipDirectStructuredEncoding skips direct structured to structured encoding during the encoding process +func WithSkipDirectStructuredEncoding(ctx context.Context, skip bool) context.Context { + return context.WithValue(ctx, skipDirectStructuredEncoding, skip) +} + +// WithSkipDirectBinaryEncoding skips direct binary to binary encoding during the encoding process +func WithSkipDirectBinaryEncoding(ctx context.Context, skip bool) context.Context { + return context.WithValue(ctx, skipDirectBinaryEncoding, skip) +} + +// WithPreferredEventEncoding defines the preferred encoding from event to message during the encoding process +func WithPreferredEventEncoding(ctx context.Context, enc Encoding) context.Context { + return context.WithValue(ctx, preferredEventEncoding, enc) +} + +// WithForceStructured forces structured encoding during the encoding process +func WithForceStructured(ctx context.Context) context.Context { + return context.WithValue(context.WithValue(ctx, preferredEventEncoding, EncodingStructured), skipDirectBinaryEncoding, true) +} + +// WithForceBinary forces binary encoding during the encoding process +func WithForceBinary(ctx context.Context) context.Context { + return context.WithValue(context.WithValue(ctx, preferredEventEncoding, EncodingBinary), skipDirectStructuredEncoding, true) +} + +// GetOrDefaultFromCtx gets a configuration value from the provided context +func GetOrDefaultFromCtx(ctx context.Context, key interface{}, def interface{}) interface{} { + if val := ctx.Value(key); val != nil { + return val + } else { + return def + } +} + +func writeBinaryWithTransformer( + ctx context.Context, + message MessageReader, + binaryWriter BinaryWriter, + transformers Transformers, +) error { + err := binaryWriter.Start(ctx) + if err != nil { + return err + } + err = message.ReadBinary(ctx, binaryWriter) + if err != nil { + return err + } + err = transformers.Transform(message.(MessageMetadataReader), binaryWriter) + if err != nil { + return err + } + return binaryWriter.End(ctx) +} + +func writeBinary( + ctx context.Context, + message MessageReader, + binaryWriter BinaryWriter, +) error { + err := binaryWriter.Start(ctx) + if err != nil { + return err + } + err = message.ReadBinary(ctx, binaryWriter) + if err != nil { + return err + } + return binaryWriter.End(ctx) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client.go new file mode 100644 index 000000000..ea8fbfbb4 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client.go @@ -0,0 +1,288 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "errors" + "fmt" + "io" + "runtime" + "sync" + + "go.uber.org/zap" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// Client interface defines the runtime contract the CloudEvents client supports. +type Client interface { + // Send will transmit the given event over the client's configured transport. + Send(ctx context.Context, event event.Event) protocol.Result + + // Request will transmit the given event over the client's configured + // transport and return any response event. + Request(ctx context.Context, event event.Event) (*event.Event, protocol.Result) + + // StartReceiver will register the provided function for callback on receipt + // of a cloudevent. It will also start the underlying protocol as it has + // been configured. + // This call is blocking. + // Valid fn signatures are: + // * func() + // * func() error + // * func(context.Context) + // * func(context.Context) protocol.Result + // * func(event.Event) + // * func(event.Event) protocol.Result + // * func(context.Context, event.Event) + // * func(context.Context, event.Event) protocol.Result + // * func(event.Event) *event.Event + // * func(event.Event) (*event.Event, protocol.Result) + // * func(context.Context, event.Event) *event.Event + // * func(context.Context, event.Event) (*event.Event, protocol.Result) + StartReceiver(ctx context.Context, fn interface{}) error +} + +// New produces a new client with the provided transport object and applied +// client options. +func New(obj interface{}, opts ...Option) (Client, error) { + c := &ceClient{ + // Running runtime.GOMAXPROCS(0) doesn't update the value, just returns the current one + pollGoroutines: runtime.GOMAXPROCS(0), + observabilityService: noopObservabilityService{}, + } + + if p, ok := obj.(protocol.Sender); ok { + c.sender = p + } + if p, ok := obj.(protocol.Requester); ok { + c.requester = p + } + if p, ok := obj.(protocol.Responder); ok { + c.responder = p + } + if p, ok := obj.(protocol.Receiver); ok { + c.receiver = p + } + if p, ok := obj.(protocol.Opener); ok { + c.opener = p + } + + if err := c.applyOptions(opts...); err != nil { + return nil, err + } + return c, nil +} + +type ceClient struct { + sender protocol.Sender + requester protocol.Requester + receiver protocol.Receiver + responder protocol.Responder + // Optional. + opener protocol.Opener + + observabilityService ObservabilityService + + inboundContextDecorators []func(context.Context, binding.Message) context.Context + outboundContextDecorators []func(context.Context) context.Context + invoker Invoker + receiverMu sync.Mutex + eventDefaulterFns []EventDefaulter + pollGoroutines int + blockingCallback bool +} + +func (c *ceClient) applyOptions(opts ...Option) error { + for _, fn := range opts { + if err := fn(c); err != nil { + return err + } + } + return nil +} + +func (c *ceClient) Send(ctx context.Context, e event.Event) protocol.Result { + var err error + if c.sender == nil { + err = errors.New("sender not set") + return err + } + + for _, f := range c.outboundContextDecorators { + ctx = f(ctx) + } + + if len(c.eventDefaulterFns) > 0 { + for _, fn := range c.eventDefaulterFns { + e = fn(ctx, e) + } + } + if err = e.Validate(); err != nil { + return err + } + + // Event has been defaulted and validated, record we are going to perform send. + ctx, cb := c.observabilityService.RecordSendingEvent(ctx, e) + err = c.sender.Send(ctx, (*binding.EventMessage)(&e)) + defer cb(err) + return err +} + +func (c *ceClient) Request(ctx context.Context, e event.Event) (*event.Event, protocol.Result) { + var resp *event.Event + var err error + + if c.requester == nil { + err = errors.New("requester not set") + return nil, err + } + for _, f := range c.outboundContextDecorators { + ctx = f(ctx) + } + + if len(c.eventDefaulterFns) > 0 { + for _, fn := range c.eventDefaulterFns { + e = fn(ctx, e) + } + } + + if err = e.Validate(); err != nil { + return nil, err + } + + // Event has been defaulted and validated, record we are going to perform request. + ctx, cb := c.observabilityService.RecordRequestEvent(ctx, e) + + // If provided a requester, use it to do request/response. + var msg binding.Message + msg, err = c.requester.Request(ctx, (*binding.EventMessage)(&e)) + if msg != nil { + defer func() { + if err := msg.Finish(err); err != nil { + cecontext.LoggerFrom(ctx).Warnw("failed calling message.Finish", zap.Error(err)) + } + }() + } + if protocol.IsUndelivered(err) { + return nil, err + } + + // try to turn msg into an event, it might not work and that is ok. + if rs, rserr := binding.ToEvent(ctx, msg); rserr != nil { + cecontext.LoggerFrom(ctx).Debugw("response: failed calling ToEvent", zap.Error(rserr), zap.Any("resp", msg)) + // If the protocol returns no error, it is an ACK on the request, but we had + // issues turning the response into an event, so make an ACK Result and pass + // down the ToEvent error as well. + err = protocol.NewReceipt(true, "failed to convert response into event: %v\n%w", rserr, err) + } else { + resp = rs + } + defer cb(err, resp) + return resp, err +} + +// StartReceiver sets up the given fn to handle Receive. +// See Client.StartReceiver for details. This is a blocking call. +func (c *ceClient) StartReceiver(ctx context.Context, fn interface{}) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + c.receiverMu.Lock() + defer c.receiverMu.Unlock() + + if c.invoker != nil { + return fmt.Errorf("client already has a receiver") + } + + invoker, err := newReceiveInvoker(fn, c.observabilityService, c.inboundContextDecorators, c.eventDefaulterFns...) + if err != nil { + return err + } + if invoker.IsReceiver() && c.receiver == nil { + return fmt.Errorf("mismatched receiver callback without protocol.Receiver supported by protocol") + } + if invoker.IsResponder() && c.responder == nil { + return fmt.Errorf("mismatched receiver callback without protocol.Responder supported by protocol") + } + c.invoker = invoker + + if c.responder == nil && c.receiver == nil { + return errors.New("responder nor receiver set") + } + + defer func() { + c.invoker = nil + }() + + // Start Polling. + wg := sync.WaitGroup{} + for i := 0; i < c.pollGoroutines; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + var msg binding.Message + var respFn protocol.ResponseFn + var err error + + if c.responder != nil { + msg, respFn, err = c.responder.Respond(ctx) + } else if c.receiver != nil { + msg, err = c.receiver.Receive(ctx) + respFn = noRespFn + } + + if err == io.EOF { // Normal close + return + } + + if err != nil { + cecontext.LoggerFrom(ctx).Warn("Error while receiving a message: ", err) + continue + } + + callback := func() { + if err := c.invoker.Invoke(ctx, msg, respFn); err != nil { + cecontext.LoggerFrom(ctx).Warn("Error while handling a message: ", err) + } + } + + if c.blockingCallback { + callback() + } else { + // Do not block on the invoker. + wg.Add(1) + go func() { + defer wg.Done() + callback() + }() + } + } + }() + } + + // Start the opener, if set. + if c.opener != nil { + if err = c.opener.OpenInbound(ctx); err != nil { + err = fmt.Errorf("error while opening the inbound connection: %w", err) + cancel() + } + } + + wg.Wait() + + return err +} + +// noRespFn is used to simply forward the protocol.Result for receivers that aren't responders +func noRespFn(_ context.Context, _ binding.Message, r protocol.Result, _ ...binding.Transformer) error { + return r +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go new file mode 100644 index 000000000..d48cc2042 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go @@ -0,0 +1,35 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "github.com/cloudevents/sdk-go/v2/protocol/http" +) + +// NewHTTP provides the good defaults for the common case using an HTTP +// Protocol client. +// The WithTimeNow, and WithUUIDs client options are also applied to the +// client, all outbound events will have a time and id set if not already +// present. +func NewHTTP(opts ...http.Option) (Client, error) { + p, err := http.New(opts...) + if err != nil { + return nil, err + } + + c, err := New(p, WithTimeNow(), WithUUIDs()) + if err != nil { + return nil, err + } + + return c, nil +} + +// NewDefault has been replaced by NewHTTP +// Deprecated. To get the same as NewDefault provided, please use NewHTTP with +// the observability service passed as an option, or client.NewClientHTTP from +// package github.com/cloudevents/sdk-go/observability/opencensus/v2/client +var NewDefault = NewHTTP diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go new file mode 100644 index 000000000..82985b8a7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go @@ -0,0 +1,12 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +// NewObserved produces a new client with the provided transport object and applied +// client options. +// Deprecated: This now has the same behaviour of New, and will be removed in future releases. +// As New, you must provide the observability service to use. +var NewObserved = New diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go b/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go new file mode 100644 index 000000000..7bfebf35c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go @@ -0,0 +1,57 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "time" + + "github.com/cloudevents/sdk-go/v2/event" + + "github.com/google/uuid" +) + +// EventDefaulter is the function signature for extensions that are able +// to perform event defaulting. +type EventDefaulter func(ctx context.Context, event event.Event) event.Event + +// DefaultIDToUUIDIfNotSet will inspect the provided event and assign a UUID to +// context.ID if it is found to be empty. +func DefaultIDToUUIDIfNotSet(ctx context.Context, event event.Event) event.Event { + if event.Context != nil { + if event.ID() == "" { + event.Context = event.Context.Clone() + event.SetID(uuid.New().String()) + } + } + return event +} + +// DefaultTimeToNowIfNotSet will inspect the provided event and assign a new +// Timestamp to context.Time if it is found to be nil or zero. +func DefaultTimeToNowIfNotSet(ctx context.Context, event event.Event) event.Event { + if event.Context != nil { + if event.Time().IsZero() { + event.Context = event.Context.Clone() + event.SetTime(time.Now()) + } + } + return event +} + +// NewDefaultDataContentTypeIfNotSet returns a defaulter that will inspect the +// provided event and set the provided content type if content type is found +// to be empty. +func NewDefaultDataContentTypeIfNotSet(contentType string) EventDefaulter { + return func(ctx context.Context, event event.Event) event.Event { + if event.Context != nil { + if event.DataContentType() == "" { + event.SetDataContentType(contentType) + } + } + return event + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go new file mode 100644 index 000000000..e09962ce6 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go @@ -0,0 +1,11 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package client holds the recommended entry points for interacting with the CloudEvents Golang SDK. The client wraps +a selected transport. The client adds validation and defaulting for sending events, and flexible receiver method +registration. For full details, read the `client.Client` documentation. +*/ +package client diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go b/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go new file mode 100644 index 000000000..94a4b4e65 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go @@ -0,0 +1,45 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + cecontext "github.com/cloudevents/sdk-go/v2/context" + thttp "github.com/cloudevents/sdk-go/v2/protocol/http" + "go.uber.org/zap" + "net/http" +) + +func NewHTTPReceiveHandler(ctx context.Context, p *thttp.Protocol, fn interface{}) (*EventReceiver, error) { + invoker, err := newReceiveInvoker(fn, noopObservabilityService{}, nil) //TODO(slinkydeveloper) maybe not nil? + if err != nil { + return nil, err + } + + return &EventReceiver{ + p: p, + invoker: invoker, + }, nil +} + +type EventReceiver struct { + p *thttp.Protocol + invoker Invoker +} + +func (r *EventReceiver) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + // Prepare to handle the message if there's one (context cancellation will ensure this closes) + go func() { + ctx := req.Context() + msg, respFn, err := r.p.Respond(ctx) + if err != nil { + cecontext.LoggerFrom(context.TODO()).Debugw("failed to call Respond", zap.Error(err)) + } else if err := r.invoker.Invoke(ctx, msg, respFn); err != nil { + cecontext.LoggerFrom(context.TODO()).Debugw("failed to call Invoke", zap.Error(err)) + } + }() + r.p.ServeHTTP(rw, req) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go b/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go new file mode 100644 index 000000000..403fb0f55 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go @@ -0,0 +1,137 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "fmt" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +type Invoker interface { + Invoke(context.Context, binding.Message, protocol.ResponseFn) error + IsReceiver() bool + IsResponder() bool +} + +var _ Invoker = (*receiveInvoker)(nil) + +func newReceiveInvoker(fn interface{}, observabilityService ObservabilityService, inboundContextDecorators []func(context.Context, binding.Message) context.Context, fns ...EventDefaulter) (Invoker, error) { + r := &receiveInvoker{ + eventDefaulterFns: fns, + observabilityService: observabilityService, + inboundContextDecorators: inboundContextDecorators, + } + + if fn, err := receiver(fn); err != nil { + return nil, err + } else { + r.fn = fn + } + + return r, nil +} + +type receiveInvoker struct { + fn *receiverFn + observabilityService ObservabilityService + eventDefaulterFns []EventDefaulter + inboundContextDecorators []func(context.Context, binding.Message) context.Context +} + +func (r *receiveInvoker) Invoke(ctx context.Context, m binding.Message, respFn protocol.ResponseFn) (err error) { + defer func() { + err = m.Finish(err) + }() + + var respMsg binding.Message + var result protocol.Result + + e, eventErr := binding.ToEvent(ctx, m) + switch { + case eventErr != nil && r.fn.hasEventIn: + r.observabilityService.RecordReceivedMalformedEvent(ctx, eventErr) + return respFn(ctx, nil, protocol.NewReceipt(false, "failed to convert Message to Event: %w", eventErr)) + case r.fn != nil: + // Check if event is valid before invoking the receiver function + if e != nil { + if validationErr := e.Validate(); validationErr != nil { + r.observabilityService.RecordReceivedMalformedEvent(ctx, validationErr) + return respFn(ctx, nil, protocol.NewReceipt(false, "validation error in incoming event: %w", validationErr)) + } + } + + // Let's invoke the receiver fn + var resp *event.Event + resp, result = func() (resp *event.Event, result protocol.Result) { + defer func() { + if r := recover(); r != nil { + result = fmt.Errorf("call to Invoker.Invoke(...) has panicked: %v", r) + cecontext.LoggerFrom(ctx).Error(result) + } + }() + ctx = computeInboundContext(m, ctx, r.inboundContextDecorators) + + var cb func(error) + ctx, cb = r.observabilityService.RecordCallingInvoker(ctx, e) + + resp, result = r.fn.invoke(ctx, e) + defer cb(result) + return + }() + + if respFn == nil { + break + } + + // Apply the defaulter chain to the outgoing event. + if resp != nil && len(r.eventDefaulterFns) > 0 { + for _, fn := range r.eventDefaulterFns { + *resp = fn(ctx, *resp) + } + // Validate the event conforms to the CloudEvents Spec. + if vErr := resp.Validate(); vErr != nil { + cecontext.LoggerFrom(ctx).Errorf("cloudevent validation failed on response event: %v", vErr) + } + } + + // because binding.Message is an interface, casting a nil resp + // here would make future comparisons to nil false + if resp != nil { + respMsg = (*binding.EventMessage)(resp) + } + } + + if respFn == nil { + // let the protocol ACK based on the result + return result + } + + return respFn(ctx, respMsg, result) +} + +func (r *receiveInvoker) IsReceiver() bool { + return !r.fn.hasEventOut +} + +func (r *receiveInvoker) IsResponder() bool { + return r.fn.hasEventOut +} + +func computeInboundContext(message binding.Message, fallback context.Context, inboundContextDecorators []func(context.Context, binding.Message) context.Context) context.Context { + result := fallback + if mctx, ok := message.(binding.MessageContext); ok { + result = cecontext.ValuesDelegating(mctx.Context(), fallback) + } + for _, f := range inboundContextDecorators { + result = f(result, message) + } + return result +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go b/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go new file mode 100644 index 000000000..75005d3bb --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go @@ -0,0 +1,54 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/event" +) + +// ObservabilityService is an interface users can implement to record metrics, create tracing spans, and plug other observability tools in the Client +type ObservabilityService interface { + // InboundContextDecorators is a method that returns the InboundContextDecorators that must be mounted in the Client to properly propagate some tracing informations. + InboundContextDecorators() []func(context.Context, binding.Message) context.Context + + // RecordReceivedMalformedEvent is invoked when an event was received but it's malformed or invalid. + RecordReceivedMalformedEvent(ctx context.Context, err error) + // RecordCallingInvoker is invoked before the user function is invoked. + // The returned callback will be invoked after the user finishes to process the event with the eventual processing error + // The error provided to the callback could be both a processing error, or a result + RecordCallingInvoker(ctx context.Context, event *event.Event) (context.Context, func(errOrResult error)) + // RecordSendingEvent is invoked before the event is sent. + // The returned callback will be invoked when the response is received + // The error provided to the callback could be both a processing error, or a result + RecordSendingEvent(ctx context.Context, event event.Event) (context.Context, func(errOrResult error)) + + // RecordRequestEvent is invoked before the event is requested. + // The returned callback will be invoked when the response is received + RecordRequestEvent(ctx context.Context, event event.Event) (context.Context, func(errOrResult error, event *event.Event)) +} + +type noopObservabilityService struct{} + +func (n noopObservabilityService) InboundContextDecorators() []func(context.Context, binding.Message) context.Context { + return nil +} + +func (n noopObservabilityService) RecordReceivedMalformedEvent(ctx context.Context, err error) {} + +func (n noopObservabilityService) RecordCallingInvoker(ctx context.Context, event *event.Event) (context.Context, func(errOrResult error)) { + return ctx, func(errOrResult error) {} +} + +func (n noopObservabilityService) RecordSendingEvent(ctx context.Context, event event.Event) (context.Context, func(errOrResult error)) { + return ctx, func(errOrResult error) {} +} + +func (n noopObservabilityService) RecordRequestEvent(ctx context.Context, e event.Event) (context.Context, func(errOrResult error, event *event.Event)) { + return ctx, func(errOrResult error, event *event.Event) {} +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/options.go b/vendor/github.com/cloudevents/sdk-go/v2/client/options.go new file mode 100644 index 000000000..938478162 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/options.go @@ -0,0 +1,128 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "fmt" + + "github.com/cloudevents/sdk-go/v2/binding" +) + +// Option is the function signature required to be considered an client.Option. +type Option func(interface{}) error + +// WithEventDefaulter adds an event defaulter to the end of the defaulter chain. +func WithEventDefaulter(fn EventDefaulter) Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + if fn == nil { + return fmt.Errorf("client option was given an nil event defaulter") + } + c.eventDefaulterFns = append(c.eventDefaulterFns, fn) + } + return nil + } +} + +func WithForceBinary() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.outboundContextDecorators = append(c.outboundContextDecorators, binding.WithForceBinary) + } + return nil + } +} + +func WithForceStructured() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.outboundContextDecorators = append(c.outboundContextDecorators, binding.WithForceStructured) + } + return nil + } +} + +// WithUUIDs adds DefaultIDToUUIDIfNotSet event defaulter to the end of the +// defaulter chain. +func WithUUIDs() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.eventDefaulterFns = append(c.eventDefaulterFns, DefaultIDToUUIDIfNotSet) + } + return nil + } +} + +// WithTimeNow adds DefaultTimeToNowIfNotSet event defaulter to the end of the +// defaulter chain. +func WithTimeNow() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.eventDefaulterFns = append(c.eventDefaulterFns, DefaultTimeToNowIfNotSet) + } + return nil + } +} + +// WithTracePropagation enables trace propagation via the distributed tracing +// extension. +// Deprecated: this is now noop and will be removed in future releases. +// Don't use distributed tracing extension to propagate traces: +// https://github.com/cloudevents/spec/blob/v1.0.1/extensions/distributed-tracing.md#using-the-distributed-tracing-extension +func WithTracePropagation() Option { + return func(i interface{}) error { + return nil + } +} + +// WithPollGoroutines configures how much goroutines should be used to +// poll the Receiver/Responder/Protocol implementations. +// Default value is GOMAXPROCS +func WithPollGoroutines(pollGoroutines int) Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.pollGoroutines = pollGoroutines + } + return nil + } +} + +// WithObservabilityService configures the observability service to use +// to record traces and metrics +func WithObservabilityService(service ObservabilityService) Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.observabilityService = service + c.inboundContextDecorators = append(c.inboundContextDecorators, service.InboundContextDecorators()...) + } + return nil + } +} + +// WithInboundContextDecorator configures a new inbound context decorator. +// Inbound context decorators are invoked to wrap additional informations from the binding.Message +// and propagate these informations in the context passed to the event receiver. +func WithInboundContextDecorator(dec func(context.Context, binding.Message) context.Context) Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.inboundContextDecorators = append(c.inboundContextDecorators, dec) + } + return nil + } +} + +// WithBlockingCallback makes the callback passed into StartReceiver is executed as a blocking call, +// i.e. in each poll go routine, the next event will not be received until the callback on current event completes. +// To make event processing serialized (no concurrency), use this option along with WithPollGoroutines(1) +func WithBlockingCallback() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.blockingCallback = true + } + return nil + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go b/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go new file mode 100644 index 000000000..b1ab532d7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go @@ -0,0 +1,194 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package client + +import ( + "context" + "errors" + "fmt" + "reflect" + + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// ReceiveFull is the signature of a fn to be invoked for incoming cloudevents. +type ReceiveFull func(context.Context, event.Event) protocol.Result + +type receiverFn struct { + numIn int + numOut int + fnValue reflect.Value + + hasContextIn bool + hasEventIn bool + + hasEventOut bool + hasResultOut bool +} + +const ( + inParamUsage = "expected a function taking either no parameters, one or more of (context.Context, event.Event) ordered" + outParamUsage = "expected a function returning one or mode of (*event.Event, protocol.Result) ordered" +) + +var ( + contextType = reflect.TypeOf((*context.Context)(nil)).Elem() + eventType = reflect.TypeOf((*event.Event)(nil)).Elem() + eventPtrType = reflect.TypeOf((*event.Event)(nil)) // want the ptr type + resultType = reflect.TypeOf((*protocol.Result)(nil)).Elem() +) + +// receiver creates a receiverFn wrapper class that is used by the client to +// validate and invoke the provided function. +// Valid fn signatures are: +// * func() +// * func() protocol.Result +// * func(context.Context) +// * func(context.Context) protocol.Result +// * func(event.Event) +// * func(event.Event) transport.Result +// * func(context.Context, event.Event) +// * func(context.Context, event.Event) protocol.Result +// * func(event.Event) *event.Event +// * func(event.Event) (*event.Event, protocol.Result) +// * func(context.Context, event.Event) *event.Event +// * func(context.Context, event.Event) (*event.Event, protocol.Result) +// +func receiver(fn interface{}) (*receiverFn, error) { + fnType := reflect.TypeOf(fn) + if fnType.Kind() != reflect.Func { + return nil, errors.New("must pass a function to handle events") + } + + r := &receiverFn{ + fnValue: reflect.ValueOf(fn), + numIn: fnType.NumIn(), + numOut: fnType.NumOut(), + } + + if err := r.validate(fnType); err != nil { + return nil, err + } + + return r, nil +} + +func (r *receiverFn) invoke(ctx context.Context, e *event.Event) (*event.Event, protocol.Result) { + args := make([]reflect.Value, 0, r.numIn) + + if r.numIn > 0 { + if r.hasContextIn { + args = append(args, reflect.ValueOf(ctx)) + } + if r.hasEventIn { + args = append(args, reflect.ValueOf(*e)) + } + } + v := r.fnValue.Call(args) + var respOut protocol.Result + var eOut *event.Event + if r.numOut > 0 { + i := 0 + if r.hasEventOut { + if eo, ok := v[i].Interface().(*event.Event); ok { + eOut = eo + } + i++ // <-- note, need to inc i. + } + if r.hasResultOut { + if resp, ok := v[i].Interface().(protocol.Result); ok { + respOut = resp + } + } + } + return eOut, respOut +} + +// Verifies that the inputs to a function have a valid signature +// Valid input is to be [0, all] of +// context.Context, event.Event in this order. +func (r *receiverFn) validateInParamSignature(fnType reflect.Type) error { + r.hasContextIn = false + r.hasEventIn = false + + switch fnType.NumIn() { + case 2: + // has to be (context.Context, event.Event) + if !eventType.ConvertibleTo(fnType.In(1)) { + return fmt.Errorf("%s; cannot convert parameter 2 to %s from event.Event", inParamUsage, fnType.In(1)) + } else { + r.hasEventIn = true + } + fallthrough + case 1: + if !contextType.ConvertibleTo(fnType.In(0)) { + if !eventType.ConvertibleTo(fnType.In(0)) { + return fmt.Errorf("%s; cannot convert parameter 1 to %s from context.Context or event.Event", inParamUsage, fnType.In(0)) + } else if r.hasEventIn { + return fmt.Errorf("%s; duplicate parameter of type event.Event", inParamUsage) + } else { + r.hasEventIn = true + } + } else { + r.hasContextIn = true + } + fallthrough + case 0: + return nil + + default: + return fmt.Errorf("%s; function has too many parameters (%d)", inParamUsage, fnType.NumIn()) + } +} + +// Verifies that the outputs of a function have a valid signature +// Valid output signatures to be [0, all] of +// *event.Event, transport.Result in this order +func (r *receiverFn) validateOutParamSignature(fnType reflect.Type) error { + r.hasEventOut = false + r.hasResultOut = false + + switch fnType.NumOut() { + case 2: + // has to be (*event.Event, transport.Result) + if !fnType.Out(1).ConvertibleTo(resultType) { + return fmt.Errorf("%s; cannot convert parameter 2 from %s to event.Response", outParamUsage, fnType.Out(1)) + } else { + r.hasResultOut = true + } + fallthrough + case 1: + if !fnType.Out(0).ConvertibleTo(resultType) { + if !fnType.Out(0).ConvertibleTo(eventPtrType) { + return fmt.Errorf("%s; cannot convert parameter 1 from %s to *event.Event or transport.Result", outParamUsage, fnType.Out(0)) + } else { + r.hasEventOut = true + } + } else if r.hasResultOut { + return fmt.Errorf("%s; duplicate parameter of type event.Response", outParamUsage) + } else { + r.hasResultOut = true + } + fallthrough + case 0: + return nil + default: + return fmt.Errorf("%s; function has too many return types (%d)", outParamUsage, fnType.NumOut()) + } +} + +// validateReceiverFn validates that a function has the right number of in and +// out params and that they are of allowed types. +func (r *receiverFn) validate(fnType reflect.Type) error { + if err := r.validateInParamSignature(fnType); err != nil { + return err + } + if err := r.validateOutParamSignature(fnType); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/context.go b/vendor/github.com/cloudevents/sdk-go/v2/context/context.go new file mode 100644 index 000000000..fc9ef0315 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/context.go @@ -0,0 +1,110 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package context + +import ( + "context" + "net/url" + "time" +) + +// Opaque key type used to store target +type targetKeyType struct{} + +var targetKey = targetKeyType{} + +// WithTarget returns back a new context with the given target. Target is intended to be transport dependent. +// For http transport, `target` should be a full URL and will be injected into the outbound http request. +func WithTarget(ctx context.Context, target string) context.Context { + return context.WithValue(ctx, targetKey, target) +} + +// TargetFrom looks in the given context and returns `target` as a parsed url if found and valid, otherwise nil. +func TargetFrom(ctx context.Context) *url.URL { + c := ctx.Value(targetKey) + if c != nil { + if s, ok := c.(string); ok && s != "" { + if target, err := url.Parse(s); err == nil { + return target + } + } + } + return nil +} + +// Opaque key type used to store topic +type topicKeyType struct{} + +var topicKey = topicKeyType{} + +// WithTopic returns back a new context with the given topic. Topic is intended to be transport dependent. +// For pubsub transport, `topic` should be a Pub/Sub Topic ID. +func WithTopic(ctx context.Context, topic string) context.Context { + return context.WithValue(ctx, topicKey, topic) +} + +// TopicFrom looks in the given context and returns `topic` as a string if found and valid, otherwise "". +func TopicFrom(ctx context.Context) string { + c := ctx.Value(topicKey) + if c != nil { + if s, ok := c.(string); ok { + return s + } + } + return "" +} + +// Opaque key type used to store retry parameters +type retriesKeyType struct{} + +var retriesKey = retriesKeyType{} + +// WithRetriesConstantBackoff returns back a new context with retries parameters using constant backoff strategy. +// MaxTries is the maximum number for retries and delay is the time interval between retries +func WithRetriesConstantBackoff(ctx context.Context, delay time.Duration, maxTries int) context.Context { + return WithRetryParams(ctx, &RetryParams{ + Strategy: BackoffStrategyConstant, + Period: delay, + MaxTries: maxTries, + }) +} + +// WithRetriesLinearBackoff returns back a new context with retries parameters using linear backoff strategy. +// MaxTries is the maximum number for retries and delay*tries is the time interval between retries +func WithRetriesLinearBackoff(ctx context.Context, delay time.Duration, maxTries int) context.Context { + return WithRetryParams(ctx, &RetryParams{ + Strategy: BackoffStrategyLinear, + Period: delay, + MaxTries: maxTries, + }) +} + +// WithRetriesExponentialBackoff returns back a new context with retries parameters using exponential backoff strategy. +// MaxTries is the maximum number for retries and period is the amount of time to wait, used as `period * 2^retries`. +func WithRetriesExponentialBackoff(ctx context.Context, period time.Duration, maxTries int) context.Context { + return WithRetryParams(ctx, &RetryParams{ + Strategy: BackoffStrategyExponential, + Period: period, + MaxTries: maxTries, + }) +} + +// WithRetryParams returns back a new context with retries parameters. +func WithRetryParams(ctx context.Context, rp *RetryParams) context.Context { + return context.WithValue(ctx, retriesKey, rp) +} + +// RetriesFrom looks in the given context and returns the retries parameters if found. +// Otherwise returns the default retries configuration (ie. no retries). +func RetriesFrom(ctx context.Context) *RetryParams { + c := ctx.Value(retriesKey) + if c != nil { + if s, ok := c.(*RetryParams); ok { + return s + } + } + return &DefaultRetryParams +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go b/vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go new file mode 100644 index 000000000..434a4da7a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go @@ -0,0 +1,25 @@ +package context + +import "context" + +type valuesDelegating struct { + context.Context + parent context.Context +} + +// ValuesDelegating wraps a child and parent context. It will perform Value() +// lookups first on the child, and then fall back to the child. All other calls +// go solely to the child context. +func ValuesDelegating(child, parent context.Context) context.Context { + return &valuesDelegating{ + Context: child, + parent: parent, + } +} + +func (c *valuesDelegating) Value(key interface{}) interface{} { + if val := c.Context.Value(key); val != nil { + return val + } + return c.parent.Value(key) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go new file mode 100644 index 000000000..0b2dcaf70 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go @@ -0,0 +1,10 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package context holds the last resort overrides and fyi objects that can be passed to clients and transports added to +context.Context objects. +*/ +package context diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go b/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go new file mode 100644 index 000000000..b3087a79f --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go @@ -0,0 +1,48 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package context + +import ( + "context" + + "go.uber.org/zap" +) + +// Opaque key type used to store logger +type loggerKeyType struct{} + +var loggerKey = loggerKeyType{} + +// fallbackLogger is the logger is used when there is no logger attached to the context. +var fallbackLogger *zap.SugaredLogger + +func init() { + if logger, err := zap.NewProduction(); err != nil { + // We failed to create a fallback logger. + fallbackLogger = zap.NewNop().Sugar() + } else { + fallbackLogger = logger.Named("fallback").Sugar() + } +} + +// WithLogger returns a new context with the logger injected into the given context. +func WithLogger(ctx context.Context, logger *zap.SugaredLogger) context.Context { + if logger == nil { + return context.WithValue(ctx, loggerKey, fallbackLogger) + } + return context.WithValue(ctx, loggerKey, logger) +} + +// LoggerFrom returns the logger stored in context. +func LoggerFrom(ctx context.Context) *zap.SugaredLogger { + l := ctx.Value(loggerKey) + if l != nil { + if logger, ok := l.(*zap.SugaredLogger); ok { + return logger + } + } + return fallbackLogger +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/retry.go b/vendor/github.com/cloudevents/sdk-go/v2/context/retry.go new file mode 100644 index 000000000..ec17df72e --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/retry.go @@ -0,0 +1,76 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package context + +import ( + "context" + "errors" + "math" + "time" +) + +type BackoffStrategy string + +const ( + BackoffStrategyNone = "none" + BackoffStrategyConstant = "constant" + BackoffStrategyLinear = "linear" + BackoffStrategyExponential = "exponential" +) + +var DefaultRetryParams = RetryParams{Strategy: BackoffStrategyNone} + +// RetryParams holds parameters applied to retries +type RetryParams struct { + // Strategy is the backoff strategy to applies between retries + Strategy BackoffStrategy + + // MaxTries is the maximum number of times to retry request before giving up + MaxTries int + + // Period is + // - for none strategy: no delay + // - for constant strategy: the delay interval between retries + // - for linear strategy: interval between retries = Period * retries + // - for exponential strategy: interval between retries = Period * retries^2 + Period time.Duration +} + +// BackoffFor tries will return the time duration that should be used for this +// current try count. +// `tries` is assumed to be the number of times the caller has already retried. +func (r *RetryParams) BackoffFor(tries int) time.Duration { + switch r.Strategy { + case BackoffStrategyConstant: + return r.Period + case BackoffStrategyLinear: + return r.Period * time.Duration(tries) + case BackoffStrategyExponential: + exp := math.Exp2(float64(tries)) + return r.Period * time.Duration(exp) + case BackoffStrategyNone: + fallthrough // default + default: + return r.Period + } +} + +// Backoff is a blocking call to wait for the correct amount of time for the retry. +// `tries` is assumed to be the number of times the caller has already retried. +func (r *RetryParams) Backoff(ctx context.Context, tries int) error { + if tries > r.MaxTries { + return errors.New("too many retries") + } + ticker := time.NewTicker(r.BackoffFor(tries)) + select { + case <-ctx.Done(): + ticker.Stop() + return errors.New("context has been cancelled") + case <-ticker.C: + ticker.Stop() + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go b/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go new file mode 100644 index 000000000..a49522f82 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go @@ -0,0 +1,47 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +const ( + TextPlain = "text/plain" + TextJSON = "text/json" + ApplicationJSON = "application/json" + ApplicationXML = "application/xml" + ApplicationCloudEventsJSON = "application/cloudevents+json" + ApplicationCloudEventsBatchJSON = "application/cloudevents-batch+json" +) + +// StringOfApplicationJSON returns a string pointer to "application/json" +func StringOfApplicationJSON() *string { + a := ApplicationJSON + return &a +} + +// StringOfApplicationXML returns a string pointer to "application/xml" +func StringOfApplicationXML() *string { + a := ApplicationXML + return &a +} + +// StringOfTextPlain returns a string pointer to "text/plain" +func StringOfTextPlain() *string { + a := TextPlain + return &a +} + +// StringOfApplicationCloudEventsJSON returns a string pointer to +// "application/cloudevents+json" +func StringOfApplicationCloudEventsJSON() *string { + a := ApplicationCloudEventsJSON + return &a +} + +// StringOfApplicationCloudEventsBatchJSON returns a string pointer to +// "application/cloudevents-batch+json" +func StringOfApplicationCloudEventsBatchJSON() *string { + a := ApplicationCloudEventsBatchJSON + return &a +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go b/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go new file mode 100644 index 000000000..cf2152693 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go @@ -0,0 +1,16 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +const ( + Base64 = "base64" +) + +// StringOfBase64 returns a string pointer to "Base64" +func StringOfBase64() *string { + a := Base64 + return &a +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go new file mode 100644 index 000000000..3e077740b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go @@ -0,0 +1,78 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package datacodec + +import ( + "context" + "fmt" + + "github.com/cloudevents/sdk-go/v2/event/datacodec/json" + "github.com/cloudevents/sdk-go/v2/event/datacodec/text" + "github.com/cloudevents/sdk-go/v2/event/datacodec/xml" +) + +// Decoder is the expected function signature for decoding `in` to `out`. +// If Event sent the payload as base64, Decoder assumes that `in` is the +// decoded base64 byte array. +type Decoder func(ctx context.Context, in []byte, out interface{}) error + +// Encoder is the expected function signature for encoding `in` to bytes. +// Returns an error if the encoder has an issue encoding `in`. +type Encoder func(ctx context.Context, in interface{}) ([]byte, error) + +var decoder map[string]Decoder +var encoder map[string]Encoder + +func init() { + decoder = make(map[string]Decoder, 10) + encoder = make(map[string]Encoder, 10) + + AddDecoder("", json.Decode) + AddDecoder("application/json", json.Decode) + AddDecoder("text/json", json.Decode) + AddDecoder("application/xml", xml.Decode) + AddDecoder("text/xml", xml.Decode) + AddDecoder("text/plain", text.Decode) + + AddEncoder("", json.Encode) + AddEncoder("application/json", json.Encode) + AddEncoder("text/json", json.Encode) + AddEncoder("application/xml", xml.Encode) + AddEncoder("text/xml", xml.Encode) + AddEncoder("text/plain", text.Encode) +} + +// AddDecoder registers a decoder for a given content type. The codecs will use +// these to decode the data payload from a cloudevent.Event object. +func AddDecoder(contentType string, fn Decoder) { + decoder[contentType] = fn +} + +// AddEncoder registers an encoder for a given content type. The codecs will +// use these to encode the data payload for a cloudevent.Event object. +func AddEncoder(contentType string, fn Encoder) { + encoder[contentType] = fn +} + +// Decode looks up and invokes the decoder registered for the given content +// type. An error is returned if no decoder is registered for the given +// content type. +func Decode(ctx context.Context, contentType string, in []byte, out interface{}) error { + if fn, ok := decoder[contentType]; ok { + return fn(ctx, in, out) + } + return fmt.Errorf("[decode] unsupported content type: %q", contentType) +} + +// Encode looks up and invokes the encoder registered for the given content +// type. An error is returned if no encoder is registered for the given +// content type. +func Encode(ctx context.Context, contentType string, in interface{}) ([]byte, error) { + if fn, ok := encoder[contentType]; ok { + return fn(ctx, in) + } + return nil, fmt.Errorf("[encode] unsupported content type: %q", contentType) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go new file mode 100644 index 000000000..b681af887 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go @@ -0,0 +1,10 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package datacodec holds the data codec registry and adds known encoders and decoders supporting media types such as +`application/json` and `application/xml`. +*/ +package datacodec diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go new file mode 100644 index 000000000..734ade59f --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go @@ -0,0 +1,56 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package json + +import ( + "context" + "encoding/json" + "fmt" + "reflect" +) + +// Decode takes `in` as []byte. +// If Event sent the payload as base64, Decoder assumes that `in` is the +// decoded base64 byte array. +func Decode(ctx context.Context, in []byte, out interface{}) error { + if in == nil { + return nil + } + if out == nil { + return fmt.Errorf("out is nil") + } + + if err := json.Unmarshal(in, out); err != nil { + return fmt.Errorf("[json] found bytes \"%s\", but failed to unmarshal: %s", string(in), err.Error()) + } + return nil +} + +// Encode attempts to json.Marshal `in` into bytes. Encode will inspect `in` +// and returns `in` unmodified if it is detected that `in` is already a []byte; +// Or json.Marshal errors. +func Encode(ctx context.Context, in interface{}) ([]byte, error) { + if in == nil { + return nil, nil + } + + it := reflect.TypeOf(in) + switch it.Kind() { + case reflect.Slice: + if it.Elem().Kind() == reflect.Uint8 { + + if b, ok := in.([]byte); ok && len(b) > 0 { + // check to see if it is a pre-encoded byte string. + if b[0] == byte('"') || b[0] == byte('{') || b[0] == byte('[') { + return b, nil + } + } + + } + } + + return json.Marshal(in) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go new file mode 100644 index 000000000..33e1323c7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package json holds the encoder/decoder implementation for `application/json`. +*/ +package json diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go new file mode 100644 index 000000000..761a10113 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go @@ -0,0 +1,30 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package text + +import ( + "context" + "fmt" +) + +// Text codec converts []byte or string to string and vice-versa. + +func Decode(_ context.Context, in []byte, out interface{}) error { + p, _ := out.(*string) + if p == nil { + return fmt.Errorf("text.Decode out: want *string, got %T", out) + } + *p = string(in) + return nil +} + +func Encode(_ context.Context, in interface{}) ([]byte, error) { + s, ok := in.(string) + if !ok { + return nil, fmt.Errorf("text.Encode in: want string, got %T", in) + } + return []byte(s), nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go new file mode 100644 index 000000000..af10577aa --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package text holds the encoder/decoder implementation for `text/plain`. +*/ +package text diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go new file mode 100644 index 000000000..de68ec3dc --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go @@ -0,0 +1,40 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package xml + +import ( + "context" + "encoding/xml" + "fmt" +) + +// Decode takes `in` as []byte. +// If Event sent the payload as base64, Decoder assumes that `in` is the +// decoded base64 byte array. +func Decode(ctx context.Context, in []byte, out interface{}) error { + if in == nil { + return nil + } + + if err := xml.Unmarshal(in, out); err != nil { + return fmt.Errorf("[xml] found bytes, but failed to unmarshal: %s %s", err.Error(), string(in)) + } + return nil +} + +// Encode attempts to xml.Marshal `in` into bytes. Encode will inspect `in` +// and returns `in` unmodified if it is detected that `in` is already a []byte; +// Or xml.Marshal errors. +func Encode(ctx context.Context, in interface{}) ([]byte, error) { + if b, ok := in.([]byte); ok { + // check to see if it is a pre-encoded byte string. + if len(b) > 0 && b[0] == byte('"') { + return b, nil + } + } + + return xml.Marshal(in) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go new file mode 100644 index 000000000..c8d73213f --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package xml holds the encoder/decoder implementation for `application/xml`. +*/ +package xml diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go new file mode 100644 index 000000000..31c22ce67 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package event provides primitives to work with CloudEvents specification: https://github.com/cloudevents/spec. +*/ +package event diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event.go new file mode 100644 index 000000000..94b5aa0ad --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event.go @@ -0,0 +1,126 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "bytes" + "encoding/json" + "strings" +) + +// Event represents the canonical representation of a CloudEvent. +type Event struct { + Context EventContext + DataEncoded []byte + // DataBase64 indicates if the event, when serialized, represents + // the data field using the base64 encoding. + // In v0.3, this field is superseded by DataContentEncoding + DataBase64 bool + FieldErrors map[string]error +} + +const ( + defaultEventVersion = CloudEventsVersionV1 +) + +func (e *Event) fieldError(field string, err error) { + if e.FieldErrors == nil { + e.FieldErrors = make(map[string]error) + } + e.FieldErrors[field] = err +} + +func (e *Event) fieldOK(field string) { + if e.FieldErrors != nil { + delete(e.FieldErrors, field) + } +} + +// New returns a new Event, an optional version can be passed to change the +// default spec version from 1.0 to the provided version. +func New(version ...string) Event { + specVersion := defaultEventVersion + if len(version) >= 1 { + specVersion = version[0] + } + e := &Event{} + e.SetSpecVersion(specVersion) + return *e +} + +// ExtensionAs is deprecated: access extensions directly via the e.Extensions() map. +// Use functions in the types package to convert extension values. +// For example replace this: +// +// var i int +// err := e.ExtensionAs("foo", &i) +// +// With this: +// +// i, err := types.ToInteger(e.Extensions["foo"]) +// +func (e Event) ExtensionAs(name string, obj interface{}) error { + return e.Context.ExtensionAs(name, obj) +} + +// String returns a pretty-printed representation of the Event. +func (e Event) String() string { + b := strings.Builder{} + + b.WriteString(e.Context.String()) + + if e.DataEncoded != nil { + if e.DataBase64 { + b.WriteString("Data (binary),\n ") + } else { + b.WriteString("Data,\n ") + } + switch e.DataMediaType() { + case ApplicationJSON: + var prettyJSON bytes.Buffer + err := json.Indent(&prettyJSON, e.DataEncoded, " ", " ") + if err != nil { + b.Write(e.DataEncoded) + } else { + b.Write(prettyJSON.Bytes()) + } + default: + b.Write(e.DataEncoded) + } + b.WriteString("\n") + } + + return b.String() +} + +func (e Event) Clone() Event { + out := Event{} + out.Context = e.Context.Clone() + out.DataEncoded = cloneBytes(e.DataEncoded) + out.DataBase64 = e.DataBase64 + out.FieldErrors = e.cloneFieldErrors() + return out +} + +func cloneBytes(in []byte) []byte { + if in == nil { + return nil + } + out := make([]byte, len(in)) + copy(out, in) + return out +} + +func (e Event) cloneFieldErrors() map[string]error { + if e.FieldErrors == nil { + return nil + } + newFE := make(map[string]error, len(e.FieldErrors)) + for k, v := range e.FieldErrors { + newFE[k] = v + } + return newFE +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go new file mode 100644 index 000000000..8fc449ed9 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go @@ -0,0 +1,118 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "context" + "encoding/base64" + "fmt" + "strconv" + + "github.com/cloudevents/sdk-go/v2/event/datacodec" +) + +// SetData encodes the given payload with the given content type. +// If the provided payload is a byte array, when marshalled to json it will be encoded as base64. +// If the provided payload is different from byte array, datacodec.Encode is invoked to attempt a +// marshalling to byte array. +func (e *Event) SetData(contentType string, obj interface{}) error { + e.SetDataContentType(contentType) + + if e.SpecVersion() != CloudEventsVersionV1 { + return e.legacySetData(obj) + } + + // Version 1.0 and above. + switch obj := obj.(type) { + case []byte: + e.DataEncoded = obj + e.DataBase64 = true + default: + data, err := datacodec.Encode(context.Background(), e.DataMediaType(), obj) + if err != nil { + return err + } + e.DataEncoded = data + e.DataBase64 = false + } + + return nil +} + +// Deprecated: Delete when we do not have to support Spec v0.3. +func (e *Event) legacySetData(obj interface{}) error { + data, err := datacodec.Encode(context.Background(), e.DataMediaType(), obj) + if err != nil { + return err + } + if e.DeprecatedDataContentEncoding() == Base64 { + buf := make([]byte, base64.StdEncoding.EncodedLen(len(data))) + base64.StdEncoding.Encode(buf, data) + e.DataEncoded = buf + e.DataBase64 = false + } else { + data, err := datacodec.Encode(context.Background(), e.DataMediaType(), obj) + if err != nil { + return err + } + e.DataEncoded = data + e.DataBase64 = false + } + return nil +} + +const ( + quotes = `"'` +) + +func (e Event) Data() []byte { + return e.DataEncoded +} + +// DataAs attempts to populate the provided data object with the event payload. +// obj should be a pointer type. +func (e Event) DataAs(obj interface{}) error { + data := e.Data() + + if len(data) == 0 { + // No data. + return nil + } + + if e.SpecVersion() != CloudEventsVersionV1 { + var err error + if data, err = e.legacyConvertData(data); err != nil { + return err + } + } + + return datacodec.Decode(context.Background(), e.DataMediaType(), data, obj) +} + +func (e Event) legacyConvertData(data []byte) ([]byte, error) { + if e.Context.DeprecatedGetDataContentEncoding() == Base64 { + var bs []byte + // test to see if we need to unquote the data. + if data[0] == quotes[0] || data[0] == quotes[1] { + str, err := strconv.Unquote(string(data)) + if err != nil { + return nil, err + } + bs = []byte(str) + } else { + bs = data + } + + buf := make([]byte, base64.StdEncoding.DecodedLen(len(bs))) + n, err := base64.StdEncoding.Decode(buf, bs) + if err != nil { + return nil, fmt.Errorf("failed to decode data from base64: %s", err.Error()) + } + data = buf[:n] + } + + return data, nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go new file mode 100644 index 000000000..2809fed57 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go @@ -0,0 +1,102 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "time" +) + +// EventReader is the interface for reading through an event from attributes. +type EventReader interface { + // SpecVersion returns event.Context.GetSpecVersion(). + SpecVersion() string + // Type returns event.Context.GetType(). + Type() string + // Source returns event.Context.GetSource(). + Source() string + // Subject returns event.Context.GetSubject(). + Subject() string + // ID returns event.Context.GetID(). + ID() string + // Time returns event.Context.GetTime(). + Time() time.Time + // DataSchema returns event.Context.GetDataSchema(). + DataSchema() string + // DataContentType returns event.Context.GetDataContentType(). + DataContentType() string + // DataMediaType returns event.Context.GetDataMediaType(). + DataMediaType() string + // DeprecatedDataContentEncoding returns event.Context.DeprecatedGetDataContentEncoding(). + DeprecatedDataContentEncoding() string + + // Extension Attributes + + // Extensions returns the event.Context.GetExtensions(). + // Extensions use the CloudEvents type system, details in package cloudevents/types. + Extensions() map[string]interface{} + + // ExtensionAs returns event.Context.ExtensionAs(name, obj). + // + // DEPRECATED: Access extensions directly via the e.Extensions() map. + // Use functions in the types package to convert extension values. + // For example replace this: + // + // var i int + // err := e.ExtensionAs("foo", &i) + // + // With this: + // + // i, err := types.ToInteger(e.Extensions["foo"]) + // + ExtensionAs(string, interface{}) error + + // Data Attribute + + // Data returns the raw data buffer + // If the event was encoded with base64 encoding, Data returns the already decoded + // byte array + Data() []byte + + // DataAs attempts to populate the provided data object with the event payload. + DataAs(interface{}) error +} + +// EventWriter is the interface for writing through an event onto attributes. +// If an error is thrown by a sub-component, EventWriter caches the error +// internally and exposes errors with a call to event.Validate(). +type EventWriter interface { + // Context Attributes + + // SetSpecVersion performs event.Context.SetSpecVersion. + SetSpecVersion(string) + // SetType performs event.Context.SetType. + SetType(string) + // SetSource performs event.Context.SetSource. + SetSource(string) + // SetSubject( performs event.Context.SetSubject. + SetSubject(string) + // SetID performs event.Context.SetID. + SetID(string) + // SetTime performs event.Context.SetTime. + SetTime(time.Time) + // SetDataSchema performs event.Context.SetDataSchema. + SetDataSchema(string) + // SetDataContentType performs event.Context.SetDataContentType. + SetDataContentType(string) + // DeprecatedSetDataContentEncoding performs event.Context.DeprecatedSetDataContentEncoding. + SetDataContentEncoding(string) + + // Extension Attributes + + // SetExtension performs event.Context.SetExtension. + SetExtension(string, interface{}) + + // SetData encodes the given payload with the given content type. + // If the provided payload is a byte array, when marshalled to json it will be encoded as base64. + // If the provided payload is different from byte array, datacodec.Encode is invoked to attempt a + // marshalling to byte array. + SetData(string, interface{}) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go new file mode 100644 index 000000000..c5f2dc03c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go @@ -0,0 +1,203 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "strings" + + jsoniter "github.com/json-iterator/go" +) + +// WriteJson writes the in event in the provided writer. +// Note: this function assumes the input event is valid. +func WriteJson(in *Event, writer io.Writer) error { + stream := jsoniter.ConfigFastest.BorrowStream(writer) + defer jsoniter.ConfigFastest.ReturnStream(stream) + stream.WriteObjectStart() + + var ext map[string]interface{} + var dct *string + var isBase64 bool + + // Write the context (without the extensions) + switch eventContext := in.Context.(type) { + case *EventContextV03: + // Set a bunch of variables we need later + ext = eventContext.Extensions + dct = eventContext.DataContentType + + stream.WriteObjectField("specversion") + stream.WriteString(CloudEventsVersionV03) + stream.WriteMore() + + stream.WriteObjectField("id") + stream.WriteString(eventContext.ID) + stream.WriteMore() + + stream.WriteObjectField("source") + stream.WriteString(eventContext.Source.String()) + stream.WriteMore() + + stream.WriteObjectField("type") + stream.WriteString(eventContext.Type) + + if eventContext.Subject != nil { + stream.WriteMore() + stream.WriteObjectField("subject") + stream.WriteString(*eventContext.Subject) + } + + if eventContext.DataContentEncoding != nil { + isBase64 = true + stream.WriteMore() + stream.WriteObjectField("datacontentencoding") + stream.WriteString(*eventContext.DataContentEncoding) + } + + if eventContext.DataContentType != nil { + stream.WriteMore() + stream.WriteObjectField("datacontenttype") + stream.WriteString(*eventContext.DataContentType) + } + + if eventContext.SchemaURL != nil { + stream.WriteMore() + stream.WriteObjectField("schemaurl") + stream.WriteString(eventContext.SchemaURL.String()) + } + + if eventContext.Time != nil { + stream.WriteMore() + stream.WriteObjectField("time") + stream.WriteString(eventContext.Time.String()) + } + case *EventContextV1: + // Set a bunch of variables we need later + ext = eventContext.Extensions + dct = eventContext.DataContentType + isBase64 = in.DataBase64 + + stream.WriteObjectField("specversion") + stream.WriteString(CloudEventsVersionV1) + stream.WriteMore() + + stream.WriteObjectField("id") + stream.WriteString(eventContext.ID) + stream.WriteMore() + + stream.WriteObjectField("source") + stream.WriteString(eventContext.Source.String()) + stream.WriteMore() + + stream.WriteObjectField("type") + stream.WriteString(eventContext.Type) + + if eventContext.Subject != nil { + stream.WriteMore() + stream.WriteObjectField("subject") + stream.WriteString(*eventContext.Subject) + } + + if eventContext.DataContentType != nil { + stream.WriteMore() + stream.WriteObjectField("datacontenttype") + stream.WriteString(*eventContext.DataContentType) + } + + if eventContext.DataSchema != nil { + stream.WriteMore() + stream.WriteObjectField("dataschema") + stream.WriteString(eventContext.DataSchema.String()) + } + + if eventContext.Time != nil { + stream.WriteMore() + stream.WriteObjectField("time") + stream.WriteString(eventContext.Time.String()) + } + default: + return fmt.Errorf("missing event context") + } + + // Let's do a check on the error + if stream.Error != nil { + return fmt.Errorf("error while writing the event attributes: %w", stream.Error) + } + + // Let's write the body + if in.DataEncoded != nil { + stream.WriteMore() + + // We need to figure out the media type first + var mediaType string + if dct == nil { + mediaType = ApplicationJSON + } else { + // This code is required to extract the media type from the full content type string (which might contain encoding and stuff) + contentType := *dct + i := strings.IndexRune(contentType, ';') + if i == -1 { + i = len(contentType) + } + mediaType = strings.TrimSpace(strings.ToLower(contentType[0:i])) + } + + isJson := mediaType == "" || mediaType == ApplicationJSON || mediaType == TextJSON + + // If isJson and no encoding to base64, we don't need to perform additional steps + if isJson && !isBase64 { + stream.WriteObjectField("data") + _, err := stream.Write(in.DataEncoded) + if err != nil { + return fmt.Errorf("error while writing data: %w", err) + } + } else { + if in.Context.GetSpecVersion() == CloudEventsVersionV1 && isBase64 { + stream.WriteObjectField("data_base64") + } else { + stream.WriteObjectField("data") + } + // At this point of we need to write to base 64 string, or we just need to write the plain string + if isBase64 { + stream.WriteString(base64.StdEncoding.EncodeToString(in.DataEncoded)) + } else { + stream.WriteString(string(in.DataEncoded)) + } + } + + } + + // Let's do a check on the error + if stream.Error != nil { + return fmt.Errorf("error while writing the event data: %w", stream.Error) + } + + for k, v := range ext { + stream.WriteMore() + stream.WriteObjectField(k) + stream.WriteVal(v) + } + + stream.WriteObjectEnd() + + // Let's do a check on the error + if stream.Error != nil { + return fmt.Errorf("error while writing the event extensions: %w", stream.Error) + } + return stream.Flush() +} + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (e Event) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + err := WriteJson(&e, &buf) + return buf.Bytes(), err +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go new file mode 100644 index 000000000..9d1aeeb65 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go @@ -0,0 +1,103 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "time" +) + +var _ EventReader = (*Event)(nil) + +// SpecVersion implements EventReader.SpecVersion +func (e Event) SpecVersion() string { + if e.Context != nil { + return e.Context.GetSpecVersion() + } + return "" +} + +// Type implements EventReader.Type +func (e Event) Type() string { + if e.Context != nil { + return e.Context.GetType() + } + return "" +} + +// Source implements EventReader.Source +func (e Event) Source() string { + if e.Context != nil { + return e.Context.GetSource() + } + return "" +} + +// Subject implements EventReader.Subject +func (e Event) Subject() string { + if e.Context != nil { + return e.Context.GetSubject() + } + return "" +} + +// ID implements EventReader.ID +func (e Event) ID() string { + if e.Context != nil { + return e.Context.GetID() + } + return "" +} + +// Time implements EventReader.Time +func (e Event) Time() time.Time { + if e.Context != nil { + return e.Context.GetTime() + } + return time.Time{} +} + +// DataSchema implements EventReader.DataSchema +func (e Event) DataSchema() string { + if e.Context != nil { + return e.Context.GetDataSchema() + } + return "" +} + +// DataContentType implements EventReader.DataContentType +func (e Event) DataContentType() string { + if e.Context != nil { + return e.Context.GetDataContentType() + } + return "" +} + +// DataMediaType returns the parsed DataMediaType of the event. If parsing +// fails, the empty string is returned. To retrieve the parsing error, use +// `Context.GetDataMediaType` instead. +func (e Event) DataMediaType() string { + if e.Context != nil { + mediaType, _ := e.Context.GetDataMediaType() + return mediaType + } + return "" +} + +// DeprecatedDataContentEncoding implements EventReader.DeprecatedDataContentEncoding +func (e Event) DeprecatedDataContentEncoding() string { + if e.Context != nil { + return e.Context.DeprecatedGetDataContentEncoding() + } + return "" +} + +// Extensions implements EventReader.Extensions +func (e Event) Extensions() map[string]interface{} { + if e.Context != nil { + return e.Context.GetExtensions() + } + return map[string]interface{}(nil) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go new file mode 100644 index 000000000..0dd88ae5a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go @@ -0,0 +1,480 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "encoding/base64" + "errors" + "fmt" + "io" + "sync" + + jsoniter "github.com/json-iterator/go" + + "github.com/cloudevents/sdk-go/v2/types" +) + +const specVersionV03Flag uint8 = 1 << 4 +const specVersionV1Flag uint8 = 1 << 5 +const dataBase64Flag uint8 = 1 << 6 +const dataContentTypeFlag uint8 = 1 << 7 + +func checkFlag(state uint8, flag uint8) bool { + return state&flag != 0 +} + +func appendFlag(state *uint8, flag uint8) { + *state = (*state) | flag +} + +var iterPool = sync.Pool{ + New: func() interface{} { + return jsoniter.Parse(jsoniter.ConfigFastest, nil, 1024) + }, +} + +func borrowIterator(reader io.Reader) *jsoniter.Iterator { + iter := iterPool.Get().(*jsoniter.Iterator) + iter.Reset(reader) + return iter +} + +func returnIterator(iter *jsoniter.Iterator) { + iter.Error = nil + iter.Attachment = nil + iterPool.Put(iter) +} + +func ReadJson(out *Event, reader io.Reader) error { + iterator := borrowIterator(reader) + defer returnIterator(iterator) + + return readJsonFromIterator(out, iterator) +} + +// ReadJson allows you to read the bytes reader as an event +func readJsonFromIterator(out *Event, iterator *jsoniter.Iterator) error { + // Parsing dependency graph: + // SpecVersion + // ^ ^ + // | +--------------+ + // + + + // All Attributes datacontenttype (and datacontentencoding for v0.3) + // (except datacontenttype) ^ + // | + // | + // + + // Data + + var state uint8 = 0 + var cachedData []byte + + var ( + // Universally parseable fields. + id string + typ string + source types.URIRef + subject *string + time *types.Timestamp + datacontenttype *string + extensions = make(map[string]interface{}) + + // These fields require knowledge about the specversion to be parsed. + schemaurl jsoniter.Any + datacontentencoding jsoniter.Any + dataschema jsoniter.Any + dataBase64 jsoniter.Any + ) + + for key := iterator.ReadObject(); key != ""; key = iterator.ReadObject() { + // Check if we have some error in our error cache + if iterator.Error != nil { + return iterator.Error + } + + // We have a key, now we need to figure out what to do + // depending on the parsing state + + // If it's a specversion, trigger state change + if key == "specversion" { + if checkFlag(state, specVersionV1Flag|specVersionV03Flag) { + return fmt.Errorf("specversion was already provided") + } + sv := iterator.ReadString() + + // Check proper specversion + switch sv { + case CloudEventsVersionV1: + con := &EventContextV1{ + ID: id, + Type: typ, + Source: source, + Subject: subject, + Time: time, + DataContentType: datacontenttype, + } + + // Add the fields relevant for the version ... + if dataschema != nil { + var err error + con.DataSchema, err = toUriPtr(dataschema) + if err != nil { + return err + } + } + if dataBase64 != nil { + stream := jsoniter.ConfigFastest.BorrowStream(nil) + defer jsoniter.ConfigFastest.ReturnStream(stream) + dataBase64.WriteTo(stream) + cachedData = stream.Buffer() + if stream.Error != nil { + return stream.Error + } + appendFlag(&state, dataBase64Flag) + } + + // ... add all remaining fields as extensions. + if schemaurl != nil { + extensions["schemaurl"] = schemaurl.GetInterface() + } + if datacontentencoding != nil { + extensions["datacontentencoding"] = datacontentencoding.GetInterface() + } + + out.Context = con + appendFlag(&state, specVersionV1Flag) + case CloudEventsVersionV03: + con := &EventContextV03{ + ID: id, + Type: typ, + Source: source, + Subject: subject, + Time: time, + DataContentType: datacontenttype, + } + var err error + // Add the fields relevant for the version ... + if schemaurl != nil { + con.SchemaURL, err = toUriRefPtr(schemaurl) + if err != nil { + return err + } + } + if datacontentencoding != nil { + con.DataContentEncoding, err = toStrPtr(datacontentencoding) + if *con.DataContentEncoding != Base64 { + err = ValidationError{"datacontentencoding": errors.New("invalid datacontentencoding value, the only allowed value is 'base64'")} + } + if err != nil { + return err + } + appendFlag(&state, dataBase64Flag) + } + + // ... add all remaining fields as extensions. + if dataschema != nil { + extensions["dataschema"] = dataschema.GetInterface() + } + if dataBase64 != nil { + extensions["data_base64"] = dataBase64.GetInterface() + } + + out.Context = con + appendFlag(&state, specVersionV03Flag) + default: + return ValidationError{"specversion": errors.New("unknown value: " + sv)} + } + + // Apply all extensions to the context object. + for key, val := range extensions { + if err := out.Context.SetExtension(key, val); err != nil { + return err + } + } + continue + } + + // If no specversion ... + if !checkFlag(state, specVersionV03Flag|specVersionV1Flag) { + switch key { + case "id": + id = iterator.ReadString() + case "type": + typ = iterator.ReadString() + case "source": + source = readUriRef(iterator) + case "subject": + subject = readStrPtr(iterator) + case "time": + time = readTimestamp(iterator) + case "datacontenttype": + datacontenttype = readStrPtr(iterator) + appendFlag(&state, dataContentTypeFlag) + case "data": + cachedData = iterator.SkipAndReturnBytes() + case "data_base64": + dataBase64 = iterator.ReadAny() + case "dataschema": + dataschema = iterator.ReadAny() + case "schemaurl": + schemaurl = iterator.ReadAny() + case "datacontentencoding": + datacontentencoding = iterator.ReadAny() + default: + extensions[key] = iterator.Read() + } + continue + } + + // From this point downward -> we can assume the event has a context pointer non nil + + // If it's a datacontenttype, trigger state change + if key == "datacontenttype" { + if checkFlag(state, dataContentTypeFlag) { + return fmt.Errorf("datacontenttype was already provided") + } + + dct := iterator.ReadString() + + switch ctx := out.Context.(type) { + case *EventContextV03: + ctx.DataContentType = &dct + case *EventContextV1: + ctx.DataContentType = &dct + } + appendFlag(&state, dataContentTypeFlag) + continue + } + + // If it's a datacontentencoding and it's v0.3, trigger state change + if checkFlag(state, specVersionV03Flag) && key == "datacontentencoding" { + if checkFlag(state, dataBase64Flag) { + return ValidationError{"datacontentencoding": errors.New("datacontentencoding was specified twice")} + } + + dce := iterator.ReadString() + + if dce != Base64 { + return ValidationError{"datacontentencoding": errors.New("invalid datacontentencoding value, the only allowed value is 'base64'")} + } + + out.Context.(*EventContextV03).DataContentEncoding = &dce + appendFlag(&state, dataBase64Flag) + continue + } + + // We can parse all attributes, except data. + // If it's data or data_base64 and we don't have the attributes to process it, then we cache it + // The expanded form of this condition is: + // (checkFlag(state, specVersionV1Flag) && !checkFlag(state, dataContentTypeFlag) && (key == "data" || key == "data_base64")) || + // (checkFlag(state, specVersionV03Flag) && !(checkFlag(state, dataContentTypeFlag) && checkFlag(state, dataBase64Flag)) && key == "data") + if (state&(specVersionV1Flag|dataContentTypeFlag) == specVersionV1Flag && (key == "data" || key == "data_base64")) || + ((state&specVersionV03Flag == specVersionV03Flag) && (state&(dataContentTypeFlag|dataBase64Flag) != (dataContentTypeFlag | dataBase64Flag)) && key == "data") { + if key == "data_base64" { + appendFlag(&state, dataBase64Flag) + } + cachedData = iterator.SkipAndReturnBytes() + continue + } + + // At this point or this value is an attribute (excluding datacontenttype and datacontentencoding), or this value is data and this condition is valid: + // (specVersionV1Flag & dataContentTypeFlag) || (specVersionV03Flag & dataContentTypeFlag & dataBase64Flag) + switch eventContext := out.Context.(type) { + case *EventContextV03: + switch key { + case "id": + eventContext.ID = iterator.ReadString() + case "type": + eventContext.Type = iterator.ReadString() + case "source": + eventContext.Source = readUriRef(iterator) + case "subject": + eventContext.Subject = readStrPtr(iterator) + case "time": + eventContext.Time = readTimestamp(iterator) + case "schemaurl": + eventContext.SchemaURL = readUriRefPtr(iterator) + case "data": + iterator.Error = consumeData(out, checkFlag(state, dataBase64Flag), iterator) + default: + if eventContext.Extensions == nil { + eventContext.Extensions = make(map[string]interface{}, 1) + } + iterator.Error = eventContext.SetExtension(key, iterator.Read()) + } + case *EventContextV1: + switch key { + case "id": + eventContext.ID = iterator.ReadString() + case "type": + eventContext.Type = iterator.ReadString() + case "source": + eventContext.Source = readUriRef(iterator) + case "subject": + eventContext.Subject = readStrPtr(iterator) + case "time": + eventContext.Time = readTimestamp(iterator) + case "dataschema": + eventContext.DataSchema = readUriPtr(iterator) + case "data": + iterator.Error = consumeData(out, false, iterator) + case "data_base64": + iterator.Error = consumeData(out, true, iterator) + default: + if eventContext.Extensions == nil { + eventContext.Extensions = make(map[string]interface{}, 1) + } + iterator.Error = eventContext.SetExtension(key, iterator.Read()) + } + } + } + + if state&(specVersionV03Flag|specVersionV1Flag) == 0 { + return ValidationError{"specversion": errors.New("no specversion")} + } + + if iterator.Error != nil { + return iterator.Error + } + + // If there is a dataToken cached, we always defer at the end the processing + // because nor datacontenttype or datacontentencoding are mandatory. + if cachedData != nil { + return consumeDataAsBytes(out, checkFlag(state, dataBase64Flag), cachedData) + } + return nil +} + +func consumeDataAsBytes(e *Event, isBase64 bool, b []byte) error { + if isBase64 { + e.DataBase64 = true + + // Allocate payload byte buffer + base64Encoded := b[1 : len(b)-1] // remove quotes + e.DataEncoded = make([]byte, base64.StdEncoding.DecodedLen(len(base64Encoded))) + length, err := base64.StdEncoding.Decode(e.DataEncoded, base64Encoded) + if err != nil { + return err + } + e.DataEncoded = e.DataEncoded[0:length] + return nil + } + + mt, _ := e.Context.GetDataMediaType() + // Empty content type assumes json + if mt != "" && mt != ApplicationJSON && mt != TextJSON { + // If not json, then data is encoded as string + iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, b) + src := iter.ReadString() // handles escaping + e.DataEncoded = []byte(src) + if iter.Error != nil { + return fmt.Errorf("unexpected data payload for media type %q, expected a string: %w", mt, iter.Error) + } + return nil + } + + e.DataEncoded = b + return nil +} + +func consumeData(e *Event, isBase64 bool, iter *jsoniter.Iterator) error { + if isBase64 { + e.DataBase64 = true + + // Allocate payload byte buffer + base64Encoded := iter.ReadStringAsSlice() + e.DataEncoded = make([]byte, base64.StdEncoding.DecodedLen(len(base64Encoded))) + length, err := base64.StdEncoding.Decode(e.DataEncoded, base64Encoded) + if err != nil { + return err + } + e.DataEncoded = e.DataEncoded[0:length] + return nil + } + + mt, _ := e.Context.GetDataMediaType() + if mt != ApplicationJSON && mt != TextJSON { + // If not json, then data is encoded as string + src := iter.ReadString() // handles escaping + e.DataEncoded = []byte(src) + if iter.Error != nil { + return fmt.Errorf("unexpected data payload for media type %q, expected a string: %w", mt, iter.Error) + } + return nil + } + + e.DataEncoded = iter.SkipAndReturnBytes() + return nil +} + +func readUriRef(iter *jsoniter.Iterator) types.URIRef { + str := iter.ReadString() + uriRef := types.ParseURIRef(str) + if uriRef == nil { + iter.Error = fmt.Errorf("cannot parse uri ref: %v", str) + return types.URIRef{} + } + return *uriRef +} + +func readStrPtr(iter *jsoniter.Iterator) *string { + str := iter.ReadString() + if str == "" { + return nil + } + return &str +} + +func readUriRefPtr(iter *jsoniter.Iterator) *types.URIRef { + return types.ParseURIRef(iter.ReadString()) +} + +func readUriPtr(iter *jsoniter.Iterator) *types.URI { + return types.ParseURI(iter.ReadString()) +} + +func readTimestamp(iter *jsoniter.Iterator) *types.Timestamp { + t, err := types.ParseTimestamp(iter.ReadString()) + if err != nil { + iter.Error = err + } + return t +} + +func toStrPtr(val jsoniter.Any) (*string, error) { + str := val.ToString() + if val.LastError() != nil { + return nil, val.LastError() + } + if str == "" { + return nil, nil + } + return &str, nil +} + +func toUriRefPtr(val jsoniter.Any) (*types.URIRef, error) { + str := val.ToString() + if val.LastError() != nil { + return nil, val.LastError() + } + return types.ParseURIRef(str), nil +} + +func toUriPtr(val jsoniter.Any) (*types.URI, error) { + str := val.ToString() + if val.LastError() != nil { + return nil, val.LastError() + } + return types.ParseURI(str), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (e *Event) UnmarshalJSON(b []byte) error { + iterator := jsoniter.ConfigFastest.BorrowIterator(b) + defer jsoniter.ConfigFastest.ReturnIterator(iterator) + return readJsonFromIterator(e, iterator) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go new file mode 100644 index 000000000..958ecc47d --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go @@ -0,0 +1,50 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "strings" +) + +type ValidationError map[string]error + +func (e ValidationError) Error() string { + b := strings.Builder{} + for k, v := range e { + b.WriteString(k) + b.WriteString(": ") + b.WriteString(v.Error()) + b.WriteRune('\n') + } + return b.String() +} + +// Validate performs a spec based validation on this event. +// Validation is dependent on the spec version specified in the event context. +func (e Event) Validate() error { + if e.Context == nil { + return ValidationError{"specversion": fmt.Errorf("missing Event.Context")} + } + + errs := map[string]error{} + if e.FieldErrors != nil { + for k, v := range e.FieldErrors { + errs[k] = v + } + } + + if fieldErrors := e.Context.Validate(); fieldErrors != nil { + for k, v := range fieldErrors { + errs[k] = v + } + } + + if len(errs) > 0 { + return ValidationError(errs) + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go new file mode 100644 index 000000000..ddfb1be38 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go @@ -0,0 +1,117 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "time" +) + +var _ EventWriter = (*Event)(nil) + +// SetSpecVersion implements EventWriter.SetSpecVersion +func (e *Event) SetSpecVersion(v string) { + switch v { + case CloudEventsVersionV03: + if e.Context == nil { + e.Context = &EventContextV03{} + } else { + e.Context = e.Context.AsV03() + } + case CloudEventsVersionV1: + if e.Context == nil { + e.Context = &EventContextV1{} + } else { + e.Context = e.Context.AsV1() + } + default: + e.fieldError("specversion", fmt.Errorf("a valid spec version is required: [%s, %s]", + CloudEventsVersionV03, CloudEventsVersionV1)) + return + } + e.fieldOK("specversion") +} + +// SetType implements EventWriter.SetType +func (e *Event) SetType(t string) { + if err := e.Context.SetType(t); err != nil { + e.fieldError("type", err) + } else { + e.fieldOK("type") + } +} + +// SetSource implements EventWriter.SetSource +func (e *Event) SetSource(s string) { + if err := e.Context.SetSource(s); err != nil { + e.fieldError("source", err) + } else { + e.fieldOK("source") + } +} + +// SetSubject implements EventWriter.SetSubject +func (e *Event) SetSubject(s string) { + if err := e.Context.SetSubject(s); err != nil { + e.fieldError("subject", err) + } else { + e.fieldOK("subject") + } +} + +// SetID implements EventWriter.SetID +func (e *Event) SetID(id string) { + if err := e.Context.SetID(id); err != nil { + e.fieldError("id", err) + } else { + e.fieldOK("id") + } +} + +// SetTime implements EventWriter.SetTime +func (e *Event) SetTime(t time.Time) { + if err := e.Context.SetTime(t); err != nil { + e.fieldError("time", err) + } else { + e.fieldOK("time") + } +} + +// SetDataSchema implements EventWriter.SetDataSchema +func (e *Event) SetDataSchema(s string) { + if err := e.Context.SetDataSchema(s); err != nil { + e.fieldError("dataschema", err) + } else { + e.fieldOK("dataschema") + } +} + +// SetDataContentType implements EventWriter.SetDataContentType +func (e *Event) SetDataContentType(ct string) { + if err := e.Context.SetDataContentType(ct); err != nil { + e.fieldError("datacontenttype", err) + } else { + e.fieldOK("datacontenttype") + } +} + +// SetDataContentEncoding is deprecated. Implements EventWriter.SetDataContentEncoding. +func (e *Event) SetDataContentEncoding(enc string) { + if err := e.Context.DeprecatedSetDataContentEncoding(enc); err != nil { + e.fieldError("datacontentencoding", err) + } else { + e.fieldOK("datacontentencoding") + } +} + +// SetExtension implements EventWriter.SetExtension +func (e *Event) SetExtension(name string, obj interface{}) { + if err := e.Context.SetExtension(name, obj); err != nil { + e.fieldError("extension:"+name, err) + } else { + e.fieldOK("extension:" + name) + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go new file mode 100644 index 000000000..a39565afa --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go @@ -0,0 +1,125 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import "time" + +// EventContextReader are the methods required to be a reader of context +// attributes. +type EventContextReader interface { + // GetSpecVersion returns the native CloudEvents Spec version of the event + // context. + GetSpecVersion() string + // GetType returns the CloudEvents type from the context. + GetType() string + // GetSource returns the CloudEvents source from the context. + GetSource() string + // GetSubject returns the CloudEvents subject from the context. + GetSubject() string + // GetID returns the CloudEvents ID from the context. + GetID() string + // GetTime returns the CloudEvents creation time from the context. + GetTime() time.Time + // GetDataSchema returns the CloudEvents schema URL (if any) from the + // context. + GetDataSchema() string + // GetDataContentType returns content type on the context. + GetDataContentType() string + // DeprecatedGetDataContentEncoding returns content encoding on the context. + DeprecatedGetDataContentEncoding() string + + // GetDataMediaType returns the MIME media type for encoded data, which is + // needed by both encoding and decoding. This is a processed form of + // GetDataContentType and it may return an error. + GetDataMediaType() (string, error) + + // DEPRECATED: Access extensions directly via the GetExtensions() + // For example replace this: + // + // var i int + // err := ec.ExtensionAs("foo", &i) + // + // With this: + // + // i, err := types.ToInteger(ec.GetExtensions["foo"]) + // + ExtensionAs(string, interface{}) error + + // GetExtensions returns the full extensions map. + // + // Extensions use the CloudEvents type system, details in package cloudevents/types. + GetExtensions() map[string]interface{} + + // GetExtension returns the extension associated with with the given key. + // The given key is case insensitive. If the extension can not be found, + // an error will be returned. + GetExtension(string) (interface{}, error) +} + +// EventContextWriter are the methods required to be a writer of context +// attributes. +type EventContextWriter interface { + // SetType sets the type of the context. + SetType(string) error + // SetSource sets the source of the context. + SetSource(string) error + // SetSubject sets the subject of the context. + SetSubject(string) error + // SetID sets the ID of the context. + SetID(string) error + // SetTime sets the time of the context. + SetTime(time time.Time) error + // SetDataSchema sets the schema url of the context. + SetDataSchema(string) error + // SetDataContentType sets the data content type of the context. + SetDataContentType(string) error + // DeprecatedSetDataContentEncoding sets the data context encoding of the context. + DeprecatedSetDataContentEncoding(string) error + + // SetExtension sets the given interface onto the extension attributes + // determined by the provided name. + // + // This function fails in V1 if the name doesn't respect the regex ^[a-zA-Z0-9]+$ + // + // Package ./types documents the types that are allowed as extension values. + SetExtension(string, interface{}) error +} + +// EventContextConverter are the methods that allow for event version +// conversion. +type EventContextConverter interface { + // AsV03 provides a translation from whatever the "native" encoding of the + // CloudEvent was to the equivalent in v0.3 field names, moving fields to or + // from extensions as necessary. + AsV03() *EventContextV03 + + // AsV1 provides a translation from whatever the "native" encoding of the + // CloudEvent was to the equivalent in v1.0 field names, moving fields to or + // from extensions as necessary. + AsV1() *EventContextV1 +} + +// EventContext is conical interface for a CloudEvents Context. +type EventContext interface { + // EventContextConverter allows for conversion between versions. + EventContextConverter + + // EventContextReader adds methods for reading context. + EventContextReader + + // EventContextWriter adds methods for writing to context. + EventContextWriter + + // Validate the event based on the specifics of the CloudEvents spec version + // represented by this event context. + Validate() ValidationError + + // Clone clones the event context. + Clone() EventContext + + // String returns a pretty-printed representation of the EventContext. + String() string +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go new file mode 100644 index 000000000..c511c81c4 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go @@ -0,0 +1,329 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "encoding/json" + "fmt" + "mime" + "sort" + "strings" + + "github.com/cloudevents/sdk-go/v2/types" +) + +const ( + // CloudEventsVersionV03 represents the version 0.3 of the CloudEvents spec. + CloudEventsVersionV03 = "0.3" +) + +var specV03Attributes = map[string]struct{}{ + "type": {}, + "source": {}, + "subject": {}, + "id": {}, + "time": {}, + "schemaurl": {}, + "datacontenttype": {}, + "datacontentencoding": {}, +} + +// EventContextV03 represents the non-data attributes of a CloudEvents v0.3 +// event. +type EventContextV03 struct { + // Type - The type of the occurrence which has happened. + Type string `json:"type"` + // Source - A URI describing the event producer. + Source types.URIRef `json:"source"` + // Subject - The subject of the event in the context of the event producer + // (identified by `source`). + Subject *string `json:"subject,omitempty"` + // ID of the event; must be non-empty and unique within the scope of the producer. + ID string `json:"id"` + // Time - A Timestamp when the event happened. + Time *types.Timestamp `json:"time,omitempty"` + // DataSchema - A link to the schema that the `data` attribute adheres to. + SchemaURL *types.URIRef `json:"schemaurl,omitempty"` + // GetDataMediaType - A MIME (RFC2046) string describing the media type of `data`. + DataContentType *string `json:"datacontenttype,omitempty"` + // DeprecatedDataContentEncoding describes the content encoding for the `data` attribute. Valid: nil, `Base64`. + DataContentEncoding *string `json:"datacontentencoding,omitempty"` + // Extensions - Additional extension metadata beyond the base spec. + Extensions map[string]interface{} `json:"-"` +} + +// Adhere to EventContext +var _ EventContext = (*EventContextV03)(nil) + +// ExtensionAs implements EventContext.ExtensionAs +func (ec EventContextV03) ExtensionAs(name string, obj interface{}) error { + value, ok := ec.Extensions[name] + if !ok { + return fmt.Errorf("extension %q does not exist", name) + } + + // Try to unmarshal extension if we find it as a RawMessage. + switch v := value.(type) { + case json.RawMessage: + if err := json.Unmarshal(v, obj); err == nil { + // if that worked, return with obj set. + return nil + } + } + // else try as a string ptr. + + // Only support *string for now. + switch v := obj.(type) { + case *string: + if valueAsString, ok := value.(string); ok { + *v = valueAsString + return nil + } else { + return fmt.Errorf("invalid type for extension %q", name) + } + default: + return fmt.Errorf("unknown extension type %T", obj) + } +} + +// SetExtension adds the extension 'name' with value 'value' to the CloudEvents +// context. This function fails if the name uses a reserved event context key. +func (ec *EventContextV03) SetExtension(name string, value interface{}) error { + if ec.Extensions == nil { + ec.Extensions = make(map[string]interface{}) + } + + if _, ok := specV03Attributes[strings.ToLower(name)]; ok { + return fmt.Errorf("bad key %q: CloudEvents spec attribute MUST NOT be overwritten by extension", name) + } + + if value == nil { + delete(ec.Extensions, name) + if len(ec.Extensions) == 0 { + ec.Extensions = nil + } + return nil + } else { + v, err := types.Validate(value) + if err == nil { + ec.Extensions[name] = v + } + return err + } +} + +// Clone implements EventContextConverter.Clone +func (ec EventContextV03) Clone() EventContext { + ec03 := ec.AsV03() + ec03.Source = types.Clone(ec.Source).(types.URIRef) + if ec.Time != nil { + ec03.Time = types.Clone(ec.Time).(*types.Timestamp) + } + if ec.SchemaURL != nil { + ec03.SchemaURL = types.Clone(ec.SchemaURL).(*types.URIRef) + } + ec03.Extensions = ec.cloneExtensions() + return ec03 +} + +func (ec *EventContextV03) cloneExtensions() map[string]interface{} { + old := ec.Extensions + if old == nil { + return nil + } + new := make(map[string]interface{}, len(ec.Extensions)) + for k, v := range old { + new[k] = types.Clone(v) + } + return new +} + +// AsV03 implements EventContextConverter.AsV03 +func (ec EventContextV03) AsV03() *EventContextV03 { + return &ec +} + +// AsV1 implements EventContextConverter.AsV1 +func (ec EventContextV03) AsV1() *EventContextV1 { + ret := EventContextV1{ + ID: ec.ID, + Time: ec.Time, + Type: ec.Type, + DataContentType: ec.DataContentType, + Source: types.URIRef{URL: ec.Source.URL}, + Subject: ec.Subject, + Extensions: make(map[string]interface{}), + } + if ec.SchemaURL != nil { + ret.DataSchema = &types.URI{URL: ec.SchemaURL.URL} + } + + // DataContentEncoding was removed in 1.0, so put it in an extension for 1.0. + if ec.DataContentEncoding != nil { + _ = ret.SetExtension(DataContentEncodingKey, *ec.DataContentEncoding) + } + + if ec.Extensions != nil { + for k, v := range ec.Extensions { + k = strings.ToLower(k) + ret.Extensions[k] = v + } + } + if len(ret.Extensions) == 0 { + ret.Extensions = nil + } + return &ret +} + +// Validate returns errors based on requirements from the CloudEvents spec. +// For more details, see https://github.com/cloudevents/spec/blob/master/spec.md +// As of Feb 26, 2019, commit 17c32ea26baf7714ad027d9917d03d2fff79fc7e +// + https://github.com/cloudevents/spec/pull/387 -> datacontentencoding +// + https://github.com/cloudevents/spec/pull/406 -> subject +func (ec EventContextV03) Validate() ValidationError { + errors := map[string]error{} + + // type + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // SHOULD be prefixed with a reverse-DNS name. The prefixed domain dictates the organization which defines the semantics of this event type. + eventType := strings.TrimSpace(ec.Type) + if eventType == "" { + errors["type"] = fmt.Errorf("MUST be a non-empty string") + } + + // source + // Type: URI-reference + // Constraints: + // REQUIRED + source := strings.TrimSpace(ec.Source.String()) + if source == "" { + errors["source"] = fmt.Errorf("REQUIRED") + } + + // subject + // Type: String + // Constraints: + // OPTIONAL + // MUST be a non-empty string + if ec.Subject != nil { + subject := strings.TrimSpace(*ec.Subject) + if subject == "" { + errors["subject"] = fmt.Errorf("if present, MUST be a non-empty string") + } + } + + // id + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // MUST be unique within the scope of the producer + id := strings.TrimSpace(ec.ID) + if id == "" { + errors["id"] = fmt.Errorf("MUST be a non-empty string") + + // no way to test "MUST be unique within the scope of the producer" + } + + // time + // Type: Timestamp + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3339 + // --> no need to test this, no way to set the time without it being valid. + + // schemaurl + // Type: URI + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3986 + if ec.SchemaURL != nil { + schemaURL := strings.TrimSpace(ec.SchemaURL.String()) + // empty string is not RFC 3986 compatible. + if schemaURL == "" { + errors["schemaurl"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 3986") + } + } + + // datacontenttype + // Type: String per RFC 2046 + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 2046 + if ec.DataContentType != nil { + dataContentType := strings.TrimSpace(*ec.DataContentType) + if dataContentType == "" { + errors["datacontenttype"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 2046") + } else { + _, _, err := mime.ParseMediaType(dataContentType) + if err != nil { + errors["datacontenttype"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 2046") + } + } + } + + // datacontentencoding + // Type: String per RFC 2045 Section 6.1 + // Constraints: + // The attribute MUST be set if the data attribute contains string-encoded binary data. + // Otherwise the attribute MUST NOT be set. + // If present, MUST adhere to RFC 2045 Section 6.1 + if ec.DataContentEncoding != nil { + dataContentEncoding := strings.ToLower(strings.TrimSpace(*ec.DataContentEncoding)) + if dataContentEncoding != Base64 { + errors["datacontentencoding"] = fmt.Errorf("if present, MUST adhere to RFC 2045 Section 6.1") + } + } + + if len(errors) > 0 { + return errors + } + return nil +} + +// String returns a pretty-printed representation of the EventContext. +func (ec EventContextV03) String() string { + b := strings.Builder{} + + b.WriteString("Context Attributes,\n") + + b.WriteString(" specversion: " + CloudEventsVersionV03 + "\n") + b.WriteString(" type: " + ec.Type + "\n") + b.WriteString(" source: " + ec.Source.String() + "\n") + if ec.Subject != nil { + b.WriteString(" subject: " + *ec.Subject + "\n") + } + b.WriteString(" id: " + ec.ID + "\n") + if ec.Time != nil { + b.WriteString(" time: " + ec.Time.String() + "\n") + } + if ec.SchemaURL != nil { + b.WriteString(" schemaurl: " + ec.SchemaURL.String() + "\n") + } + if ec.DataContentType != nil { + b.WriteString(" datacontenttype: " + *ec.DataContentType + "\n") + } + if ec.DataContentEncoding != nil { + b.WriteString(" datacontentencoding: " + *ec.DataContentEncoding + "\n") + } + + if ec.Extensions != nil && len(ec.Extensions) > 0 { + b.WriteString("Extensions,\n") + keys := make([]string, 0, len(ec.Extensions)) + for k := range ec.Extensions { + keys = append(keys, k) + } + sort.Strings(keys) + for _, key := range keys { + b.WriteString(fmt.Sprintf(" %s: %v\n", key, ec.Extensions[key])) + } + } + + return b.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go new file mode 100644 index 000000000..2cd27a705 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go @@ -0,0 +1,99 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "strings" + "time" +) + +// GetSpecVersion implements EventContextReader.GetSpecVersion +func (ec EventContextV03) GetSpecVersion() string { + return CloudEventsVersionV03 +} + +// GetDataContentType implements EventContextReader.GetDataContentType +func (ec EventContextV03) GetDataContentType() string { + if ec.DataContentType != nil { + return *ec.DataContentType + } + return "" +} + +// GetDataMediaType implements EventContextReader.GetDataMediaType +func (ec EventContextV03) GetDataMediaType() (string, error) { + if ec.DataContentType != nil { + dct := *ec.DataContentType + i := strings.IndexRune(dct, ';') + if i == -1 { + return dct, nil + } + return strings.TrimSpace(dct[0:i]), nil + } + return "", nil +} + +// GetType implements EventContextReader.GetType +func (ec EventContextV03) GetType() string { + return ec.Type +} + +// GetSource implements EventContextReader.GetSource +func (ec EventContextV03) GetSource() string { + return ec.Source.String() +} + +// GetSubject implements EventContextReader.GetSubject +func (ec EventContextV03) GetSubject() string { + if ec.Subject != nil { + return *ec.Subject + } + return "" +} + +// GetTime implements EventContextReader.GetTime +func (ec EventContextV03) GetTime() time.Time { + if ec.Time != nil { + return ec.Time.Time + } + return time.Time{} +} + +// GetID implements EventContextReader.GetID +func (ec EventContextV03) GetID() string { + return ec.ID +} + +// GetDataSchema implements EventContextReader.GetDataSchema +func (ec EventContextV03) GetDataSchema() string { + if ec.SchemaURL != nil { + return ec.SchemaURL.String() + } + return "" +} + +// DeprecatedGetDataContentEncoding implements EventContextReader.DeprecatedGetDataContentEncoding +func (ec EventContextV03) DeprecatedGetDataContentEncoding() string { + if ec.DataContentEncoding != nil { + return *ec.DataContentEncoding + } + return "" +} + +// GetExtensions implements EventContextReader.GetExtensions +func (ec EventContextV03) GetExtensions() map[string]interface{} { + return ec.Extensions +} + +// GetExtension implements EventContextReader.GetExtension +func (ec EventContextV03) GetExtension(key string) (interface{}, error) { + v, ok := caseInsensitiveSearch(key, ec.Extensions) + if !ok { + return "", fmt.Errorf("%q not found", key) + } + return v, nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go new file mode 100644 index 000000000..5d664635e --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go @@ -0,0 +1,103 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "errors" + "net/url" + "strings" + "time" + + "github.com/cloudevents/sdk-go/v2/types" +) + +// Adhere to EventContextWriter +var _ EventContextWriter = (*EventContextV03)(nil) + +// SetDataContentType implements EventContextWriter.SetDataContentType +func (ec *EventContextV03) SetDataContentType(ct string) error { + ct = strings.TrimSpace(ct) + if ct == "" { + ec.DataContentType = nil + } else { + ec.DataContentType = &ct + } + return nil +} + +// SetType implements EventContextWriter.SetType +func (ec *EventContextV03) SetType(t string) error { + t = strings.TrimSpace(t) + ec.Type = t + return nil +} + +// SetSource implements EventContextWriter.SetSource +func (ec *EventContextV03) SetSource(u string) error { + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.Source = types.URIRef{URL: *pu} + return nil +} + +// SetSubject implements EventContextWriter.SetSubject +func (ec *EventContextV03) SetSubject(s string) error { + s = strings.TrimSpace(s) + if s == "" { + ec.Subject = nil + } else { + ec.Subject = &s + } + return nil +} + +// SetID implements EventContextWriter.SetID +func (ec *EventContextV03) SetID(id string) error { + id = strings.TrimSpace(id) + if id == "" { + return errors.New("id is required to be a non-empty string") + } + ec.ID = id + return nil +} + +// SetTime implements EventContextWriter.SetTime +func (ec *EventContextV03) SetTime(t time.Time) error { + if t.IsZero() { + ec.Time = nil + } else { + ec.Time = &types.Timestamp{Time: t} + } + return nil +} + +// SetDataSchema implements EventContextWriter.SetDataSchema +func (ec *EventContextV03) SetDataSchema(u string) error { + u = strings.TrimSpace(u) + if u == "" { + ec.SchemaURL = nil + return nil + } + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.SchemaURL = &types.URIRef{URL: *pu} + return nil +} + +// DeprecatedSetDataContentEncoding implements EventContextWriter.DeprecatedSetDataContentEncoding +func (ec *EventContextV03) DeprecatedSetDataContentEncoding(e string) error { + e = strings.ToLower(strings.TrimSpace(e)) + if e == "" { + ec.DataContentEncoding = nil + } else { + ec.DataContentEncoding = &e + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go new file mode 100644 index 000000000..8f164502b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go @@ -0,0 +1,315 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "mime" + "sort" + "strings" + + "github.com/cloudevents/sdk-go/v2/types" +) + +// WIP: AS OF SEP 20, 2019 + +const ( + // CloudEventsVersionV1 represents the version 1.0 of the CloudEvents spec. + CloudEventsVersionV1 = "1.0" +) + +var specV1Attributes = map[string]struct{}{ + "id": {}, + "source": {}, + "type": {}, + "datacontenttype": {}, + "subject": {}, + "time": {}, + "specversion": {}, + "dataschema": {}, +} + +// EventContextV1 represents the non-data attributes of a CloudEvents v1.0 +// event. +type EventContextV1 struct { + // ID of the event; must be non-empty and unique within the scope of the producer. + // +required + ID string `json:"id"` + // Source - A URI describing the event producer. + // +required + Source types.URIRef `json:"source"` + // Type - The type of the occurrence which has happened. + // +required + Type string `json:"type"` + + // DataContentType - A MIME (RFC2046) string describing the media type of `data`. + // +optional + DataContentType *string `json:"datacontenttype,omitempty"` + // Subject - The subject of the event in the context of the event producer + // (identified by `source`). + // +optional + Subject *string `json:"subject,omitempty"` + // Time - A Timestamp when the event happened. + // +optional + Time *types.Timestamp `json:"time,omitempty"` + // DataSchema - A link to the schema that the `data` attribute adheres to. + // +optional + DataSchema *types.URI `json:"dataschema,omitempty"` + + // Extensions - Additional extension metadata beyond the base spec. + // +optional + Extensions map[string]interface{} `json:"-"` +} + +// Adhere to EventContext +var _ EventContext = (*EventContextV1)(nil) + +// ExtensionAs implements EventContext.ExtensionAs +func (ec EventContextV1) ExtensionAs(name string, obj interface{}) error { + name = strings.ToLower(name) + value, ok := ec.Extensions[name] + if !ok { + return fmt.Errorf("extension %q does not exist", name) + } + + // Only support *string for now. + if v, ok := obj.(*string); ok { + if *v, ok = value.(string); ok { + return nil + } + } + return fmt.Errorf("unknown extension type %T", obj) +} + +// SetExtension adds the extension 'name' with value 'value' to the CloudEvents +// context. This function fails if the name doesn't respect the regex +// ^[a-zA-Z0-9]+$ or if the name uses a reserved event context key. +func (ec *EventContextV1) SetExtension(name string, value interface{}) error { + if err := validateExtensionName(name); err != nil { + return err + } + + if _, ok := specV1Attributes[strings.ToLower(name)]; ok { + return fmt.Errorf("bad key %q: CloudEvents spec attribute MUST NOT be overwritten by extension", name) + } + + name = strings.ToLower(name) + if ec.Extensions == nil { + ec.Extensions = make(map[string]interface{}) + } + if value == nil { + delete(ec.Extensions, name) + if len(ec.Extensions) == 0 { + ec.Extensions = nil + } + return nil + } else { + v, err := types.Validate(value) // Ensure it's a legal CE attribute value + if err == nil { + ec.Extensions[name] = v + } + return err + } +} + +// Clone implements EventContextConverter.Clone +func (ec EventContextV1) Clone() EventContext { + ec1 := ec.AsV1() + ec1.Source = types.Clone(ec.Source).(types.URIRef) + if ec.Time != nil { + ec1.Time = types.Clone(ec.Time).(*types.Timestamp) + } + if ec.DataSchema != nil { + ec1.DataSchema = types.Clone(ec.DataSchema).(*types.URI) + } + ec1.Extensions = ec.cloneExtensions() + return ec1 +} + +func (ec *EventContextV1) cloneExtensions() map[string]interface{} { + old := ec.Extensions + if old == nil { + return nil + } + new := make(map[string]interface{}, len(ec.Extensions)) + for k, v := range old { + new[k] = types.Clone(v) + } + return new +} + +// AsV03 implements EventContextConverter.AsV03 +func (ec EventContextV1) AsV03() *EventContextV03 { + ret := EventContextV03{ + ID: ec.ID, + Time: ec.Time, + Type: ec.Type, + DataContentType: ec.DataContentType, + Source: types.URIRef{URL: ec.Source.URL}, + Subject: ec.Subject, + Extensions: make(map[string]interface{}), + } + + if ec.DataSchema != nil { + ret.SchemaURL = &types.URIRef{URL: ec.DataSchema.URL} + } + + if ec.Extensions != nil { + for k, v := range ec.Extensions { + k = strings.ToLower(k) + // DeprecatedDataContentEncoding was introduced in 0.3, removed in 1.0 + if strings.EqualFold(k, DataContentEncodingKey) { + etv, ok := v.(string) + if ok && etv != "" { + ret.DataContentEncoding = &etv + } + continue + } + ret.Extensions[k] = v + } + } + if len(ret.Extensions) == 0 { + ret.Extensions = nil + } + return &ret +} + +// AsV1 implements EventContextConverter.AsV1 +func (ec EventContextV1) AsV1() *EventContextV1 { + return &ec +} + +// Validate returns errors based on requirements from the CloudEvents spec. +// For more details, see https://github.com/cloudevents/spec/blob/v1.0/spec.md. +func (ec EventContextV1) Validate() ValidationError { + errors := map[string]error{} + + // id + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // MUST be unique within the scope of the producer + id := strings.TrimSpace(ec.ID) + if id == "" { + errors["id"] = fmt.Errorf("MUST be a non-empty string") + // no way to test "MUST be unique within the scope of the producer" + } + + // source + // Type: URI-reference + // Constraints: + // REQUIRED + // MUST be a non-empty URI-reference + // An absolute URI is RECOMMENDED + source := strings.TrimSpace(ec.Source.String()) + if source == "" { + errors["source"] = fmt.Errorf("REQUIRED") + } + + // type + // Type: String + // Constraints: + // REQUIRED + // MUST be a non-empty string + // SHOULD be prefixed with a reverse-DNS name. The prefixed domain dictates the organization which defines the semantics of this event type. + eventType := strings.TrimSpace(ec.Type) + if eventType == "" { + errors["type"] = fmt.Errorf("MUST be a non-empty string") + } + + // The following attributes are optional but still have validation. + + // datacontenttype + // Type: String per RFC 2046 + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 2046 + if ec.DataContentType != nil { + dataContentType := strings.TrimSpace(*ec.DataContentType) + if dataContentType == "" { + errors["datacontenttype"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 2046") + } else { + _, _, err := mime.ParseMediaType(dataContentType) + if err != nil { + errors["datacontenttype"] = fmt.Errorf("failed to parse RFC 2046 media type %w", err) + } + } + } + + // dataschema + // Type: URI + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3986 + if ec.DataSchema != nil { + if !ec.DataSchema.Validate() { + errors["dataschema"] = fmt.Errorf("if present, MUST adhere to the format specified in RFC 3986, Section 4.3. Absolute URI") + } + } + + // subject + // Type: String + // Constraints: + // OPTIONAL + // MUST be a non-empty string + if ec.Subject != nil { + subject := strings.TrimSpace(*ec.Subject) + if subject == "" { + errors["subject"] = fmt.Errorf("if present, MUST be a non-empty string") + } + } + + // time + // Type: Timestamp + // Constraints: + // OPTIONAL + // If present, MUST adhere to the format specified in RFC 3339 + // --> no need to test this, no way to set the time without it being valid. + + if len(errors) > 0 { + return errors + } + return nil +} + +// String returns a pretty-printed representation of the EventContext. +func (ec EventContextV1) String() string { + b := strings.Builder{} + + b.WriteString("Context Attributes,\n") + + b.WriteString(" specversion: " + CloudEventsVersionV1 + "\n") + b.WriteString(" type: " + ec.Type + "\n") + b.WriteString(" source: " + ec.Source.String() + "\n") + if ec.Subject != nil { + b.WriteString(" subject: " + *ec.Subject + "\n") + } + b.WriteString(" id: " + ec.ID + "\n") + if ec.Time != nil { + b.WriteString(" time: " + ec.Time.String() + "\n") + } + if ec.DataSchema != nil { + b.WriteString(" dataschema: " + ec.DataSchema.String() + "\n") + } + if ec.DataContentType != nil { + b.WriteString(" datacontenttype: " + *ec.DataContentType + "\n") + } + + if ec.Extensions != nil && len(ec.Extensions) > 0 { + b.WriteString("Extensions,\n") + keys := make([]string, 0, len(ec.Extensions)) + for k := range ec.Extensions { + keys = append(keys, k) + } + sort.Strings(keys) + for _, key := range keys { + b.WriteString(fmt.Sprintf(" %s: %v\n", key, ec.Extensions[key])) + } + } + + return b.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go new file mode 100644 index 000000000..74f73b029 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go @@ -0,0 +1,104 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "fmt" + "strings" + "time" +) + +// GetSpecVersion implements EventContextReader.GetSpecVersion +func (ec EventContextV1) GetSpecVersion() string { + return CloudEventsVersionV1 +} + +// GetDataContentType implements EventContextReader.GetDataContentType +func (ec EventContextV1) GetDataContentType() string { + if ec.DataContentType != nil { + return *ec.DataContentType + } + return "" +} + +// GetDataMediaType implements EventContextReader.GetDataMediaType +func (ec EventContextV1) GetDataMediaType() (string, error) { + if ec.DataContentType != nil { + dct := *ec.DataContentType + i := strings.IndexRune(dct, ';') + if i == -1 { + return dct, nil + } + return strings.TrimSpace(dct[0:i]), nil + } + return "", nil +} + +// GetType implements EventContextReader.GetType +func (ec EventContextV1) GetType() string { + return ec.Type +} + +// GetSource implements EventContextReader.GetSource +func (ec EventContextV1) GetSource() string { + return ec.Source.String() +} + +// GetSubject implements EventContextReader.GetSubject +func (ec EventContextV1) GetSubject() string { + if ec.Subject != nil { + return *ec.Subject + } + return "" +} + +// GetTime implements EventContextReader.GetTime +func (ec EventContextV1) GetTime() time.Time { + if ec.Time != nil { + return ec.Time.Time + } + return time.Time{} +} + +// GetID implements EventContextReader.GetID +func (ec EventContextV1) GetID() string { + return ec.ID +} + +// GetDataSchema implements EventContextReader.GetDataSchema +func (ec EventContextV1) GetDataSchema() string { + if ec.DataSchema != nil { + return ec.DataSchema.String() + } + return "" +} + +// DeprecatedGetDataContentEncoding implements EventContextReader.DeprecatedGetDataContentEncoding +func (ec EventContextV1) DeprecatedGetDataContentEncoding() string { + return "" +} + +// GetExtensions implements EventContextReader.GetExtensions +func (ec EventContextV1) GetExtensions() map[string]interface{} { + if len(ec.Extensions) == 0 { + return nil + } + // For now, convert the extensions of v1.0 to the pre-v1.0 style. + ext := make(map[string]interface{}, len(ec.Extensions)) + for k, v := range ec.Extensions { + ext[k] = v + } + return ext +} + +// GetExtension implements EventContextReader.GetExtension +func (ec EventContextV1) GetExtension(key string) (interface{}, error) { + v, ok := caseInsensitiveSearch(key, ec.Extensions) + if !ok { + return "", fmt.Errorf("%q not found", key) + } + return v, nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go new file mode 100644 index 000000000..5f2aca763 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go @@ -0,0 +1,97 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "errors" + "net/url" + "strings" + "time" + + "github.com/cloudevents/sdk-go/v2/types" +) + +// Adhere to EventContextWriter +var _ EventContextWriter = (*EventContextV1)(nil) + +// SetDataContentType implements EventContextWriter.SetDataContentType +func (ec *EventContextV1) SetDataContentType(ct string) error { + ct = strings.TrimSpace(ct) + if ct == "" { + ec.DataContentType = nil + } else { + ec.DataContentType = &ct + } + return nil +} + +// SetType implements EventContextWriter.SetType +func (ec *EventContextV1) SetType(t string) error { + t = strings.TrimSpace(t) + ec.Type = t + return nil +} + +// SetSource implements EventContextWriter.SetSource +func (ec *EventContextV1) SetSource(u string) error { + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.Source = types.URIRef{URL: *pu} + return nil +} + +// SetSubject implements EventContextWriter.SetSubject +func (ec *EventContextV1) SetSubject(s string) error { + s = strings.TrimSpace(s) + if s == "" { + ec.Subject = nil + } else { + ec.Subject = &s + } + return nil +} + +// SetID implements EventContextWriter.SetID +func (ec *EventContextV1) SetID(id string) error { + id = strings.TrimSpace(id) + if id == "" { + return errors.New("id is required to be a non-empty string") + } + ec.ID = id + return nil +} + +// SetTime implements EventContextWriter.SetTime +func (ec *EventContextV1) SetTime(t time.Time) error { + if t.IsZero() { + ec.Time = nil + } else { + ec.Time = &types.Timestamp{Time: t} + } + return nil +} + +// SetDataSchema implements EventContextWriter.SetDataSchema +func (ec *EventContextV1) SetDataSchema(u string) error { + u = strings.TrimSpace(u) + if u == "" { + ec.DataSchema = nil + return nil + } + pu, err := url.Parse(u) + if err != nil { + return err + } + ec.DataSchema = &types.URI{URL: *pu} + return nil +} + +// DeprecatedSetDataContentEncoding implements EventContextWriter.DeprecatedSetDataContentEncoding +func (ec *EventContextV1) DeprecatedSetDataContentEncoding(e string) error { + return errors.New("deprecated: SetDataContentEncoding is not supported in v1.0 of CloudEvents") +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go b/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go new file mode 100644 index 000000000..72d0e757a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go @@ -0,0 +1,57 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package event + +import ( + "errors" + "fmt" + "strings" +) + +const ( + // DataContentEncodingKey is the key to DeprecatedDataContentEncoding for versions that do not support data content encoding + // directly. + DataContentEncodingKey = "datacontentencoding" +) + +var ( + // This determines the behavior of validateExtensionName(). For MaxExtensionNameLength > 0, an error will be returned, + // if len(key) > MaxExtensionNameLength + MaxExtensionNameLength = 0 +) + +func caseInsensitiveSearch(key string, space map[string]interface{}) (interface{}, bool) { + lkey := strings.ToLower(key) + for k, v := range space { + if strings.EqualFold(lkey, strings.ToLower(k)) { + return v, true + } + } + return nil, false +} + +func IsExtensionNameValid(key string) bool { + if err := validateExtensionName(key); err != nil { + return false + } + return true +} + +func validateExtensionName(key string) error { + if len(key) < 1 { + return errors.New("bad key, CloudEvents attribute names MUST NOT be empty") + } + if MaxExtensionNameLength > 0 && len(key) > MaxExtensionNameLength { + return fmt.Errorf("bad key, CloudEvents attribute name '%s' is longer than %d characters", key, MaxExtensionNameLength) + } + + for _, c := range key { + if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9')) { + return errors.New("bad key, CloudEvents attribute names MUST consist of lower-case letters ('a' to 'z'), upper-case letters ('A' to 'Z') or digits ('0' to '9') from the ASCII character set") + } + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go new file mode 100644 index 000000000..f826a1841 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go @@ -0,0 +1,26 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package protocol defines interfaces to decouple the client package +from protocol implementations. + +Most event sender and receiver applications should not use this +package, they should use the client package. This package is for +infrastructure developers implementing new transports, or intermediary +components like importers, channels or brokers. + +Available protocols: + +* HTTP (using net/http) +* Kafka (using github.com/Shopify/sarama) +* AMQP (using pack.ag/amqp) +* Go Channels +* Nats +* Nats Streaming (stan) +* Google PubSub + +*/ +package protocol diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go new file mode 100644 index 000000000..a3f335261 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go @@ -0,0 +1,42 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import "fmt" + +// ErrTransportMessageConversion is an error produced when the transport +// message can not be converted. +type ErrTransportMessageConversion struct { + fatal bool + handled bool + transport string + message string +} + +// NewErrTransportMessageConversion makes a new ErrTransportMessageConversion. +func NewErrTransportMessageConversion(transport, message string, handled, fatal bool) *ErrTransportMessageConversion { + return &ErrTransportMessageConversion{ + transport: transport, + message: message, + handled: handled, + fatal: fatal, + } +} + +// IsFatal reports if this error should be considered fatal. +func (e *ErrTransportMessageConversion) IsFatal() bool { + return e.fatal +} + +// Handled reports if this error should be considered accepted and no further action. +func (e *ErrTransportMessageConversion) Handled() bool { + return e.handled +} + +// Error implements error.Error +func (e *ErrTransportMessageConversion) Error() string { + return fmt.Sprintf("transport %s failed to convert message: %s", e.transport, e.message) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go new file mode 100644 index 000000000..48f03fb6c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go @@ -0,0 +1,128 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "go.uber.org/zap" + "net/http" + "strconv" + "strings" + "time" +) + +type WebhookConfig struct { + AllowedMethods []string // defaults to POST + AllowedRate *int + AutoACKCallback bool + AllowedOrigins []string +} + +const ( + DefaultAllowedRate = 1000 + DefaultTimeout = time.Second * 600 +) + +// TODO: implement rate limiting. +// Throttling is indicated by requests being rejected using HTTP status code 429 Too Many Requests. +// TODO: use this if Webhook Request Origin has been turned on. +// Inbound requests should be rejected if Allowed Origins is required by SDK. + +func (p *Protocol) OptionsHandler(rw http.ResponseWriter, req *http.Request) { + if req.Method != http.MethodOptions || p.WebhookConfig == nil { + rw.WriteHeader(http.StatusMethodNotAllowed) + return + } + + headers := make(http.Header) + + // The spec does not say we need to validate the origin, just the request origin. + // After the handshake, we will validate the origin. + if origin, ok := p.ValidateRequestOrigin(req); !ok { + rw.WriteHeader(http.StatusBadRequest) + return + } else { + headers.Set("WebHook-Allowed-Origin", origin) + } + + allowedRateRequired := false + if _, ok := req.Header[http.CanonicalHeaderKey("WebHook-Request-Rate")]; ok { + // must send WebHook-Allowed-Rate + allowedRateRequired = true + } + + if p.WebhookConfig.AllowedRate != nil { + headers.Set("WebHook-Allowed-Rate", strconv.Itoa(*p.WebhookConfig.AllowedRate)) + } else if allowedRateRequired { + headers.Set("WebHook-Allowed-Rate", strconv.Itoa(DefaultAllowedRate)) + } + + if len(p.WebhookConfig.AllowedMethods) > 0 { + headers.Set("Allow", strings.Join(p.WebhookConfig.AllowedMethods, ", ")) + } else { + headers.Set("Allow", http.MethodPost) + } + + cb := req.Header.Get("WebHook-Request-Callback") + if cb != "" { + if p.WebhookConfig.AutoACKCallback { + go func() { + reqAck, err := http.NewRequest(http.MethodPost, cb, nil) + if err != nil { + cecontext.LoggerFrom(req.Context()).Errorw("OPTIONS handler failed to create http request attempting to ack callback.", zap.Error(err), zap.String("callback", cb)) + return + } + + // Write out the headers. + for k := range headers { + reqAck.Header.Set(k, headers.Get(k)) + } + + _, err = http.DefaultClient.Do(reqAck) + if err != nil { + cecontext.LoggerFrom(req.Context()).Errorw("OPTIONS handler failed to ack callback.", zap.Error(err), zap.String("callback", cb)) + return + } + }() + return + } else { + cecontext.LoggerFrom(req.Context()).Infof("ACTION REQUIRED: Please validate web hook request callback: %q", cb) + // TODO: what to do pending https://github.com/cloudevents/spec/issues/617 + return + } + } + + // Write out the headers. + for k := range headers { + rw.Header().Set(k, headers.Get(k)) + } +} + +func (p *Protocol) ValidateRequestOrigin(req *http.Request) (string, bool) { + return p.validateOrigin(req.Header.Get("WebHook-Request-Origin")) +} + +func (p *Protocol) ValidateOrigin(req *http.Request) (string, bool) { + return p.validateOrigin(req.Header.Get("Origin")) +} + +func (p *Protocol) validateOrigin(ro string) (string, bool) { + cecontext.LoggerFrom(context.TODO()).Infow("Validating origin.", zap.String("origin", ro)) + + for _, ao := range p.WebhookConfig.AllowedOrigins { + if ao == "*" { + return ao, true + } + // TODO: it is not clear what the rules for allowed hosts are. + // Need to find docs for this. For now, test for prefix. + if strings.HasPrefix(ro, ao) { + return ao, true + } + } + + return ro, false +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go new file mode 100644 index 000000000..0eec396a1 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go @@ -0,0 +1,48 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + + nethttp "net/http" + "net/url" +) + +type requestKey struct{} + +// RequestData holds the http.Request information subset that can be +// used to retrieve HTTP information for an incoming CloudEvent. +type RequestData struct { + URL *url.URL + Header nethttp.Header + RemoteAddr string + Host string +} + +// WithRequestDataAtContext uses the http.Request to add RequestData +// information to the Context. +func WithRequestDataAtContext(ctx context.Context, r *nethttp.Request) context.Context { + if r == nil { + return ctx + } + + return context.WithValue(ctx, requestKey{}, &RequestData{ + URL: r.URL, + Header: r.Header, + RemoteAddr: r.RemoteAddr, + Host: r.Host, + }) +} + +// RequestDataFromContext retrieves RequestData from the Context. +// If not set nil is returned. +func RequestDataFromContext(ctx context.Context) *RequestData { + if req := ctx.Value(requestKey{}); req != nil { + return req.(*RequestData) + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go new file mode 100644 index 000000000..3428ea387 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go @@ -0,0 +1,9 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package http implements an HTTP binding using net/http module +*/ +package http diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go new file mode 100644 index 000000000..055a5c4dd --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go @@ -0,0 +1,55 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + "github.com/cloudevents/sdk-go/v2/binding" + "net/http" + "net/textproto" + "strings" + "unicode" + + "github.com/cloudevents/sdk-go/v2/binding/spec" +) + +var attributeHeadersMapping map[string]string + +type customHeaderKey int + +const ( + headerKey customHeaderKey = iota +) + +func init() { + attributeHeadersMapping = make(map[string]string) + for _, v := range specs.Versions() { + for _, a := range v.Attributes() { + if a.Kind() == spec.DataContentType { + attributeHeadersMapping[a.Name()] = ContentType + } else { + attributeHeadersMapping[a.Name()] = textproto.CanonicalMIMEHeaderKey(prefix + a.Name()) + } + } + } +} + +func extNameToHeaderName(name string) string { + var b strings.Builder + b.Grow(len(name) + len(prefix)) + b.WriteString(prefix) + b.WriteRune(unicode.ToUpper(rune(name[0]))) + b.WriteString(name[1:]) + return b.String() +} + +func HeaderFrom(ctx context.Context) http.Header { + return binding.GetOrDefaultFromCtx(ctx, headerKey, make(http.Header)).(http.Header) +} + +func WithCustomHeader(ctx context.Context, header http.Header) context.Context { + return context.WithValue(ctx, headerKey, header) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go new file mode 100644 index 000000000..7a7c36f9b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go @@ -0,0 +1,175 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + "io" + nethttp "net/http" + "net/textproto" + "strings" + "unicode" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" +) + +const prefix = "Ce-" + +var specs = spec.WithPrefixMatchExact( + func(s string) string { + if s == "datacontenttype" { + return "Content-Type" + } else { + return textproto.CanonicalMIMEHeaderKey("Ce-" + s) + } + }, + "Ce-", +) + +const ContentType = "Content-Type" +const ContentLength = "Content-Length" + +// Message holds the Header and Body of a HTTP Request or Response. +// The Message instance *must* be constructed from NewMessage function. +// This message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +type Message struct { + Header nethttp.Header + BodyReader io.ReadCloser + OnFinish func(error) error + + ctx context.Context + + format format.Format + version spec.Version +} + +// Check if http.Message implements binding.Message +var _ binding.Message = (*Message)(nil) +var _ binding.MessageContext = (*Message)(nil) +var _ binding.MessageMetadataReader = (*Message)(nil) + +// NewMessage returns a binding.Message with header and data. +// The returned binding.Message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +func NewMessage(header nethttp.Header, body io.ReadCloser) *Message { + m := Message{Header: header} + if body != nil { + m.BodyReader = body + } + if m.format = format.Lookup(header.Get(ContentType)); m.format == nil { + m.version = specs.Version(m.Header.Get(specs.PrefixedSpecVersionName())) + } + return &m +} + +// NewMessageFromHttpRequest returns a binding.Message with header and data. +// The returned binding.Message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +func NewMessageFromHttpRequest(req *nethttp.Request) *Message { + if req == nil { + return nil + } + message := NewMessage(req.Header, req.Body) + message.ctx = req.Context() + return message +} + +// NewMessageFromHttpResponse returns a binding.Message with header and data. +// The returned binding.Message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +func NewMessageFromHttpResponse(resp *nethttp.Response) *Message { + if resp == nil { + return nil + } + msg := NewMessage(resp.Header, resp.Body) + return msg +} + +func (m *Message) ReadEncoding() binding.Encoding { + if m.version != nil { + return binding.EncodingBinary + } + if m.format != nil { + if m.format == format.JSONBatch { + return binding.EncodingBatch + } + return binding.EncodingStructured + } + return binding.EncodingUnknown +} + +func (m *Message) ReadStructured(ctx context.Context, encoder binding.StructuredWriter) error { + if m.format == nil { + return binding.ErrNotStructured + } else { + return encoder.SetStructuredEvent(ctx, m.format, m.BodyReader) + } +} + +func (m *Message) ReadBinary(ctx context.Context, encoder binding.BinaryWriter) (err error) { + if m.version == nil { + return binding.ErrNotBinary + } + + for k, v := range m.Header { + attr := m.version.Attribute(k) + if attr != nil { + err = encoder.SetAttribute(attr, v[0]) + } else if strings.HasPrefix(k, prefix) { + // Trim Prefix + To lower + var b strings.Builder + b.Grow(len(k) - len(prefix)) + b.WriteRune(unicode.ToLower(rune(k[len(prefix)]))) + b.WriteString(k[len(prefix)+1:]) + err = encoder.SetExtension(b.String(), v[0]) + } + if err != nil { + return err + } + } + + if m.BodyReader != nil { + err = encoder.SetData(m.BodyReader) + if err != nil { + return err + } + } + + return +} + +func (m *Message) GetAttribute(k spec.Kind) (spec.Attribute, interface{}) { + attr := m.version.AttributeFromKind(k) + if attr != nil { + h := m.Header[attributeHeadersMapping[attr.Name()]] + if h != nil { + return attr, h[0] + } + return attr, nil + } + return nil, nil +} + +func (m *Message) GetExtension(name string) interface{} { + h := m.Header[extNameToHeaderName(name)] + if h != nil { + return h[0] + } + return nil +} + +func (m *Message) Context() context.Context { + return m.ctx +} + +func (m *Message) Finish(err error) error { + if m.BodyReader != nil { + _ = m.BodyReader.Close() + } + if m.OnFinish != nil { + return m.OnFinish(err) + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go new file mode 100644 index 000000000..5e400905a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go @@ -0,0 +1,301 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "fmt" + "net" + nethttp "net/http" + "net/url" + "strings" + "time" +) + +// Option is the function signature required to be considered an http.Option. +type Option func(*Protocol) error + +// WithTarget sets the outbound recipient of cloudevents when using an HTTP +// request. +func WithTarget(targetUrl string) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http target option can not set nil protocol") + } + targetUrl = strings.TrimSpace(targetUrl) + if targetUrl != "" { + var err error + var target *url.URL + target, err = url.Parse(targetUrl) + if err != nil { + return fmt.Errorf("http target option failed to parse target url: %s", err.Error()) + } + + p.Target = target + + if p.RequestTemplate == nil { + p.RequestTemplate = &nethttp.Request{ + Method: nethttp.MethodPost, + } + } + p.RequestTemplate.URL = target + + return nil + } + return fmt.Errorf("http target option was empty string") + } +} + +// WithHeader sets an additional default outbound header for all cloudevents +// when using an HTTP request. +func WithHeader(key, value string) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http header option can not set nil protocol") + } + key = strings.TrimSpace(key) + if key != "" { + if p.RequestTemplate == nil { + p.RequestTemplate = &nethttp.Request{ + Method: nethttp.MethodPost, + } + } + if p.RequestTemplate.Header == nil { + p.RequestTemplate.Header = nethttp.Header{} + } + p.RequestTemplate.Header.Add(key, value) + return nil + } + return fmt.Errorf("http header option was empty string") + } +} + +// WithShutdownTimeout sets the shutdown timeout when the http server is being shutdown. +func WithShutdownTimeout(timeout time.Duration) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http shutdown timeout option can not set nil protocol") + } + p.ShutdownTimeout = timeout + return nil + } +} + +func checkListen(p *Protocol, prefix string) error { + switch { + case p.listener.Load() != nil: + return fmt.Errorf("error setting %v: listener already set", prefix) + } + return nil +} + +// WithPort sets the listening port for StartReceiver. +// Only one of WithListener or WithPort is allowed. +func WithPort(port int) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http port option can not set nil protocol") + } + if port < 0 || port > 65535 { + return fmt.Errorf("http port option was given an invalid port: %d", port) + } + if err := checkListen(p, "http port option"); err != nil { + return err + } + p.Port = port + return nil + } +} + +// WithListener sets the listener for StartReceiver. +// Only one of WithListener or WithPort is allowed. +func WithListener(l net.Listener) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http listener option can not set nil protocol") + } + if err := checkListen(p, "http listener"); err != nil { + return err + } + p.listener.Store(l) + return nil + } +} + +// WithPath sets the path to receive cloudevents on for HTTP transports. +func WithPath(path string) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http path option can not set nil protocol") + } + path = strings.TrimSpace(path) + if len(path) == 0 { + return fmt.Errorf("http path option was given an invalid path: %q", path) + } + p.Path = path + return nil + } +} + +// WithMethod sets the HTTP verb (GET, POST, PUT, etc.) to use +// when using an HTTP request. +func WithMethod(method string) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http method option can not set nil protocol") + } + method = strings.TrimSpace(method) + if method != "" { + if p.RequestTemplate == nil { + p.RequestTemplate = &nethttp.Request{} + } + p.RequestTemplate.Method = method + return nil + } + return fmt.Errorf("http method option was empty string") + } +} + +// +// Middleware is a function that takes an existing http.Handler and wraps it in middleware, +// returning the wrapped http.Handler. +type Middleware func(next nethttp.Handler) nethttp.Handler + +// WithMiddleware adds an HTTP middleware to the transport. It may be specified multiple times. +// Middleware is applied to everything before it. For example +// `NewClient(WithMiddleware(foo), WithMiddleware(bar))` would result in `bar(foo(original))`. +func WithMiddleware(middleware Middleware) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http middleware option can not set nil protocol") + } + p.middleware = append(p.middleware, middleware) + return nil + } +} + +// WithRoundTripper sets the HTTP RoundTripper. +func WithRoundTripper(roundTripper nethttp.RoundTripper) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http round tripper option can not set nil protocol") + } + p.roundTripper = roundTripper + return nil + } +} + +// WithRoundTripperDecorator decorates the default HTTP RoundTripper chosen. +func WithRoundTripperDecorator(decorator func(roundTripper nethttp.RoundTripper) nethttp.RoundTripper) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http round tripper option can not set nil protocol") + } + if p.roundTripper == nil { + if p.Client == nil { + p.roundTripper = nethttp.DefaultTransport + } else { + p.roundTripper = p.Client.Transport + } + } + p.roundTripper = decorator(p.roundTripper) + return nil + } +} + +// WithClient sets the protocol client +func WithClient(client nethttp.Client) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("client option can not set nil protocol") + } + p.Client = &client + return nil + } +} + +// WithGetHandlerFunc sets the http GET handler func +func WithGetHandlerFunc(fn nethttp.HandlerFunc) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http GET handler func can not set nil protocol") + } + p.GetHandlerFn = fn + return nil + } +} + +// WithOptionsHandlerFunc sets the http OPTIONS handler func +func WithOptionsHandlerFunc(fn nethttp.HandlerFunc) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http OPTIONS handler func can not set nil protocol") + } + p.OptionsHandlerFn = fn + return nil + } +} + +// WithDefaultOptionsHandlerFunc sets the options handler to be the built in handler and configures the options. +// methods: the supported methods reported to OPTIONS caller. +// rate: the rate limit reported to OPTIONS caller. +// origins: the prefix of the accepted origins, or "*". +// callback: preform the callback to ACK the OPTIONS request. +func WithDefaultOptionsHandlerFunc(methods []string, rate int, origins []string, callback bool) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http OPTIONS handler func can not set nil protocol") + } + p.OptionsHandlerFn = p.OptionsHandler + p.WebhookConfig = &WebhookConfig{ + AllowedMethods: methods, + AllowedRate: &rate, + AllowedOrigins: origins, + AutoACKCallback: callback, + } + return nil + } +} + +// IsRetriable is a custom function that can be used to override the +// default retriable status codes. +type IsRetriable func(statusCode int) bool + +// WithIsRetriableFunc sets the function that gets called to determine if an +// error should be retried. If not set, the defaultIsRetriableFunc is used. +func WithIsRetriableFunc(isRetriable IsRetriable) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("isRetriable handler func can not set nil protocol") + } + if isRetriable == nil { + return fmt.Errorf("isRetriable handler can not be nil") + } + p.isRetriableFunc = isRetriable + return nil + } +} + +func WithRateLimiter(rl RateLimiter) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http OPTIONS handler func can not set nil protocol") + } + p.limiter = rl + return nil + } +} + +// WithRequestDataAtContextMiddleware adds to the Context RequestData. +// This enables a user's dispatch handler to inspect HTTP request information by +// retrieving it from the Context. +func WithRequestDataAtContextMiddleware() Option { + return WithMiddleware(func(next nethttp.Handler) nethttp.Handler { + return nethttp.HandlerFunc(func(w nethttp.ResponseWriter, r *nethttp.Request) { + ctx := WithRequestDataAtContext(r.Context(), r) + next.ServeHTTP(w, r.WithContext(ctx)) + }) + }) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go new file mode 100644 index 000000000..dba6fd7ba --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go @@ -0,0 +1,408 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +const ( + // DefaultShutdownTimeout defines the default timeout given to the http.Server when calling Shutdown. + DefaultShutdownTimeout = time.Minute * 1 +) + +type msgErr struct { + msg *Message + respFn protocol.ResponseFn + err error +} + +// Default error codes that we retry on - string isn't used, it's just there so +// people know what each error code's title is. +// To modify this use Option +var defaultRetriableErrors = map[int]string{ + 404: "Not Found", + 413: "Payload Too Large", + 425: "Too Early", + 429: "Too Many Requests", + 502: "Bad Gateway", + 503: "Service Unavailable", + 504: "Gateway Timeout", +} + +// Protocol acts as both a http client and a http handler. +type Protocol struct { + Target *url.URL + RequestTemplate *http.Request + Client *http.Client + incoming chan msgErr + + // OptionsHandlerFn handles the OPTIONS method requests and is intended to + // implement the abuse protection spec: + // https://github.com/cloudevents/spec/blob/v1.0/http-webhook.md#4-abuse-protection + OptionsHandlerFn http.HandlerFunc + WebhookConfig *WebhookConfig + + GetHandlerFn http.HandlerFunc + DeleteHandlerFn http.HandlerFunc + + // To support Opener: + + // ShutdownTimeout defines the timeout given to the http.Server when calling Shutdown. + // If 0, DefaultShutdownTimeout is used. + ShutdownTimeout time.Duration + + // Port is the port configured to bind the receiver to. Defaults to 8080. + // If you want to know the effective port you're listening to, use GetListeningPort() + Port int + // Path is the path to bind the receiver to. Defaults to "/". + Path string + + // Receive Mutex + reMu sync.Mutex + // Handler is the handler the http Server will use. Use this to reuse the + // http server. If nil, the Protocol will create a one. + Handler *http.ServeMux + + listener atomic.Value + roundTripper http.RoundTripper + server *http.Server + handlerRegistered bool + middleware []Middleware + limiter RateLimiter + + isRetriableFunc IsRetriable +} + +func New(opts ...Option) (*Protocol, error) { + p := &Protocol{ + incoming: make(chan msgErr), + Port: -1, + } + if err := p.applyOptions(opts...); err != nil { + return nil, err + } + + if p.Client == nil { + p.Client = http.DefaultClient + } + + if p.roundTripper != nil { + p.Client.Transport = p.roundTripper + } + + if p.ShutdownTimeout == 0 { + p.ShutdownTimeout = DefaultShutdownTimeout + } + + if p.isRetriableFunc == nil { + p.isRetriableFunc = defaultIsRetriableFunc + } + + if p.limiter == nil { + p.limiter = noOpLimiter{} + } + + return p, nil +} + +// NewObserved creates an HTTP protocol with trace propagating middleware. +// Deprecated: now this behaves like New and it will be removed in future releases, +// setup the http observed protocol using the opencensus separate module NewObservedHttp +var NewObserved = New + +func (p *Protocol) applyOptions(opts ...Option) error { + for _, fn := range opts { + if err := fn(p); err != nil { + return err + } + } + return nil +} + +// Send implements binding.Sender +func (p *Protocol) Send(ctx context.Context, m binding.Message, transformers ...binding.Transformer) error { + if ctx == nil { + return fmt.Errorf("nil Context") + } else if m == nil { + return fmt.Errorf("nil Message") + } + + msg, err := p.Request(ctx, m, transformers...) + if msg != nil { + defer func() { _ = msg.Finish(err) }() + } + if err != nil && !protocol.IsACK(err) { + var res *Result + if protocol.ResultAs(err, &res) { + if message, ok := msg.(*Message); ok { + buf := new(bytes.Buffer) + buf.ReadFrom(message.BodyReader) + errorStr := buf.String() + // If the error is not wrapped, then append the original error string. + if og, ok := err.(*Result); ok { + og.Format = og.Format + "%s" + og.Args = append(og.Args, errorStr) + err = og + } else { + err = NewResult(res.StatusCode, "%w: %s", err, errorStr) + } + } + } + } + return err +} + +// Request implements binding.Requester +func (p *Protocol) Request(ctx context.Context, m binding.Message, transformers ...binding.Transformer) (binding.Message, error) { + if ctx == nil { + return nil, fmt.Errorf("nil Context") + } else if m == nil { + return nil, fmt.Errorf("nil Message") + } + + var err error + defer func() { _ = m.Finish(err) }() + + req := p.makeRequest(ctx) + + if p.Client == nil || req == nil || req.URL == nil { + return nil, fmt.Errorf("not initialized: %#v", p) + } + + if err = WriteRequest(ctx, m, req, transformers...); err != nil { + return nil, err + } + + return p.do(ctx, req) +} + +func (p *Protocol) makeRequest(ctx context.Context) *http.Request { + req := &http.Request{ + Method: http.MethodPost, + Header: HeaderFrom(ctx), + } + + if p.RequestTemplate != nil { + req.Method = p.RequestTemplate.Method + req.URL = p.RequestTemplate.URL + req.Close = p.RequestTemplate.Close + req.Host = p.RequestTemplate.Host + copyHeadersEnsure(p.RequestTemplate.Header, &req.Header) + } + + if p.Target != nil { + req.URL = p.Target + } + + // Override the default request with target from context. + if target := cecontext.TargetFrom(ctx); target != nil { + req.URL = target + } + return req.WithContext(ctx) +} + +// Ensure to is a non-nil map before copying +func copyHeadersEnsure(from http.Header, to *http.Header) { + if len(from) > 0 { + if *to == nil { + *to = http.Header{} + } + copyHeaders(from, *to) + } +} + +func copyHeaders(from, to http.Header) { + if from == nil || to == nil { + return + } + for header, values := range from { + for _, value := range values { + to.Add(header, value) + } + } +} + +// Receive the next incoming HTTP request as a CloudEvent. +// Returns non-nil error if the incoming HTTP request fails to parse as a CloudEvent +// Returns io.EOF if the receiver is closed. +func (p *Protocol) Receive(ctx context.Context) (binding.Message, error) { + if ctx == nil { + return nil, fmt.Errorf("nil Context") + } + + msg, fn, err := p.Respond(ctx) + // No-op the response when finish is invoked. + if msg != nil { + return binding.WithFinish(msg, func(err error) { + if fn != nil { + _ = fn(ctx, nil, nil) + } + }), err + } else { + return nil, err + } +} + +// Respond receives the next incoming HTTP request as a CloudEvent and waits +// for the response callback to invoked before continuing. +// Returns non-nil error if the incoming HTTP request fails to parse as a CloudEvent +// Returns io.EOF if the receiver is closed. +func (p *Protocol) Respond(ctx context.Context) (binding.Message, protocol.ResponseFn, error) { + if ctx == nil { + return nil, nil, fmt.Errorf("nil Context") + } + + select { + case in, ok := <-p.incoming: + if !ok { + return nil, nil, io.EOF + } + + if in.msg == nil { + return nil, in.respFn, in.err + } + return in.msg, in.respFn, in.err + + case <-ctx.Done(): + return nil, nil, io.EOF + } +} + +// ServeHTTP implements http.Handler. +// Blocks until ResponseFn is invoked. +func (p *Protocol) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + // always apply limiter first using req context + ok, reset, err := p.limiter.Allow(req.Context(), req) + if err != nil { + p.incoming <- msgErr{msg: nil, err: fmt.Errorf("unable to acquire rate limit token: %w", err)} + rw.WriteHeader(http.StatusInternalServerError) + return + } + + if !ok { + rw.Header().Add("Retry-After", strconv.Itoa(int(reset))) + http.Error(rw, "limit exceeded", 429) + return + } + + // Filter the GET style methods: + switch req.Method { + case http.MethodOptions: + if p.OptionsHandlerFn == nil { + rw.WriteHeader(http.StatusMethodNotAllowed) + return + } + p.OptionsHandlerFn(rw, req) + return + + case http.MethodGet: + if p.GetHandlerFn == nil { + rw.WriteHeader(http.StatusMethodNotAllowed) + return + } + p.GetHandlerFn(rw, req) + return + + case http.MethodDelete: + if p.DeleteHandlerFn == nil { + rw.WriteHeader(http.StatusMethodNotAllowed) + return + } + p.DeleteHandlerFn(rw, req) + return + } + + m := NewMessageFromHttpRequest(req) + if m == nil { + // Should never get here unless ServeHTTP is called directly. + p.incoming <- msgErr{msg: nil, err: binding.ErrUnknownEncoding} + rw.WriteHeader(http.StatusBadRequest) + return // if there was no message, return. + } + + var finishErr error + m.OnFinish = func(err error) error { + finishErr = err + return nil + } + + wg := sync.WaitGroup{} + wg.Add(1) + var fn protocol.ResponseFn = func(ctx context.Context, respMsg binding.Message, res protocol.Result, transformers ...binding.Transformer) error { + // Unblock the ServeHTTP after the reply is written + defer func() { + wg.Done() + }() + + if finishErr != nil { + http.Error(rw, fmt.Sprintf("Cannot forward CloudEvent: %s", finishErr), http.StatusInternalServerError) + return finishErr + } + + status := http.StatusOK + var errMsg string + if res != nil { + var result *Result + switch { + case protocol.ResultAs(res, &result): + if result.StatusCode > 100 && result.StatusCode < 600 { + status = result.StatusCode + } + errMsg = fmt.Errorf(result.Format, result.Args...).Error() + case !protocol.IsACK(res): + // Map client errors to http status code + validationError := event.ValidationError{} + if errors.As(res, &validationError) { + status = http.StatusBadRequest + rw.Header().Set("content-type", "text/plain") + rw.WriteHeader(status) + _, _ = rw.Write([]byte(validationError.Error())) + return validationError + } else if errors.Is(res, binding.ErrUnknownEncoding) { + status = http.StatusUnsupportedMediaType + } else { + status = http.StatusInternalServerError + } + } + } + + if respMsg != nil { + err := WriteResponseWriter(ctx, respMsg, status, rw, transformers...) + return respMsg.Finish(err) + } + + rw.WriteHeader(status) + if _, err := rw.Write([]byte(errMsg)); err != nil { + return err + } + return nil + } + + p.incoming <- msgErr{msg: m, respFn: fn} // Send to Request + // Block until ResponseFn is invoked + wg.Wait() +} + +func defaultIsRetriableFunc(sc int) bool { + _, ok := defaultRetriableErrors[sc] + return ok +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go new file mode 100644 index 000000000..04ef96915 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go @@ -0,0 +1,143 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + "fmt" + "net" + "net/http" + "strings" + + "github.com/cloudevents/sdk-go/v2/protocol" +) + +var _ protocol.Opener = (*Protocol)(nil) + +func (p *Protocol) OpenInbound(ctx context.Context) error { + p.reMu.Lock() + defer p.reMu.Unlock() + + if p.Handler == nil { + p.Handler = http.NewServeMux() + } + + if !p.handlerRegistered { + // handler.Handle might panic if the user tries to use the same path as the sdk. + p.Handler.Handle(p.GetPath(), p) + p.handlerRegistered = true + } + + // After listener is invok + listener, err := p.listen() + if err != nil { + return err + } + + p.server = &http.Server{ + Addr: listener.Addr().String(), + Handler: attachMiddleware(p.Handler, p.middleware), + ReadTimeout: DefaultTimeout, + WriteTimeout: DefaultTimeout, + } + + // Shutdown + defer func() { + _ = p.server.Close() + p.server = nil + }() + + errChan := make(chan error) + go func() { + errChan <- p.server.Serve(listener) + }() + + // wait for the server to return or ctx.Done(). + select { + case <-ctx.Done(): + // Try a graceful shutdown. + ctx, cancel := context.WithTimeout(context.Background(), p.ShutdownTimeout) + defer cancel() + + shdwnErr := p.server.Shutdown(ctx) + if shdwnErr != nil { + shdwnErr = fmt.Errorf("shutting down HTTP server: %w", shdwnErr) + } + + // Wait for server goroutine to exit + rntmErr := <-errChan + if rntmErr != nil && rntmErr != http.ErrServerClosed { + rntmErr = fmt.Errorf("server failed during shutdown: %w", rntmErr) + + if shdwnErr != nil { + return fmt.Errorf("combined error during shutdown of HTTP server: %w, %v", + shdwnErr, rntmErr) + } + + return rntmErr + } + + return shdwnErr + + case err := <-errChan: + if err != nil { + return fmt.Errorf("during runtime of HTTP server: %w", err) + } + return nil + } +} + +// GetListeningPort returns the listening port. +// Returns -1 if it's not listening. +func (p *Protocol) GetListeningPort() int { + if listener := p.listener.Load(); listener != nil { + if tcpAddr, ok := listener.(net.Listener).Addr().(*net.TCPAddr); ok { + return tcpAddr.Port + } + } + return -1 +} + +// listen if not already listening, update t.Port +func (p *Protocol) listen() (net.Listener, error) { + if p.listener.Load() == nil { + port := 8080 + if p.Port != -1 { + port = p.Port + if port < 0 || port > 65535 { + return nil, fmt.Errorf("invalid port %d", port) + } + } + var err error + var listener net.Listener + if listener, err = net.Listen("tcp", fmt.Sprintf(":%d", port)); err != nil { + return nil, err + } + p.listener.Store(listener) + return listener, nil + } + return p.listener.Load().(net.Listener), nil +} + +// GetPath returns the path the transport is hosted on. If the path is '/', +// the transport will handle requests on any URI. To discover the true path +// a request was received on, inspect the context from Receive(cxt, ...) with +// TransportContextFrom(ctx). +func (p *Protocol) GetPath() string { + path := strings.TrimSpace(p.Path) + if len(path) > 0 { + return path + } + return "/" // default +} + +// attachMiddleware attaches the HTTP middleware to the specified handler. +func attachMiddleware(h http.Handler, middleware []Middleware) http.Handler { + for _, m := range middleware { + h = m(h) + } + return h +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go new file mode 100644 index 000000000..9c4c10a29 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go @@ -0,0 +1,34 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + "net/http" +) + +type RateLimiter interface { + // Allow attempts to take one token from the rate limiter for the specified + // request. It returns ok when this operation was successful. In case ok is + // false, reset will indicate the time in seconds when it is safe to perform + // another attempt. An error is returned when this operation failed, e.g. due to + // a backend error. + Allow(ctx context.Context, r *http.Request) (ok bool, reset uint64, err error) + // Close terminates rate limiter and cleans up any data structures or + // connections that may remain open. After a store is stopped, Take() should + // always return zero values. + Close(ctx context.Context) error +} + +type noOpLimiter struct{} + +func (n noOpLimiter) Allow(ctx context.Context, r *http.Request) (bool, uint64, error) { + return true, 0, nil +} + +func (n noOpLimiter) Close(ctx context.Context) error { + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go new file mode 100644 index 000000000..71e7346f3 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go @@ -0,0 +1,145 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "errors" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + + "go.uber.org/zap" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +func (p *Protocol) do(ctx context.Context, req *http.Request) (binding.Message, error) { + params := cecontext.RetriesFrom(ctx) + + switch params.Strategy { + case cecontext.BackoffStrategyConstant, cecontext.BackoffStrategyLinear, cecontext.BackoffStrategyExponential: + return p.doWithRetry(ctx, params, req) + case cecontext.BackoffStrategyNone: + fallthrough + default: + return p.doOnce(req) + } +} + +func (p *Protocol) doOnce(req *http.Request) (binding.Message, protocol.Result) { + resp, err := p.Client.Do(req) + if err != nil { + return nil, protocol.NewReceipt(false, "%w", err) + } + + var result protocol.Result + if resp.StatusCode/100 == 2 { + result = protocol.ResultACK + } else { + result = protocol.ResultNACK + } + + return NewMessage(resp.Header, resp.Body), NewResult(resp.StatusCode, "%w", result) +} + +func (p *Protocol) doWithRetry(ctx context.Context, params *cecontext.RetryParams, req *http.Request) (binding.Message, error) { + then := time.Now() + retry := 0 + results := make([]protocol.Result, 0) + + var ( + body []byte + err error + ) + + if req != nil && req.Body != nil { + defer func() { + if err = req.Body.Close(); err != nil { + cecontext.LoggerFrom(ctx).Warnw("could not close request body", zap.Error(err)) + } + }() + body, err = ioutil.ReadAll(req.Body) + if err != nil { + panic(err) + } + resetBody(req, body) + } + + for { + msg, result := p.doOnce(req) + + // Fast track common case. + if protocol.IsACK(result) { + return msg, NewRetriesResult(result, retry, then, results) + } + + // Try again? + // + // Make sure the error was something we should retry. + + { + var uErr *url.Error + if errors.As(result, &uErr) { + goto DoBackoff + } + } + + { + var httpResult *Result + if errors.As(result, &httpResult) { + sc := httpResult.StatusCode + if p.isRetriableFunc(sc) { + // retry! + goto DoBackoff + } else { + // Permanent error + cecontext.LoggerFrom(ctx).Debugw("status code not retryable, will not try again", + zap.Error(httpResult), + zap.Int("statusCode", sc)) + return msg, NewRetriesResult(result, retry, then, results) + } + } + } + + DoBackoff: + resetBody(req, body) + + // Wait for the correct amount of backoff time. + + // total tries = retry + 1 + if err := params.Backoff(ctx, retry+1); err != nil { + // do not try again. + cecontext.LoggerFrom(ctx).Debugw("backoff error, will not try again", zap.Error(err)) + return msg, NewRetriesResult(result, retry, then, results) + } + + retry++ + results = append(results, result) + } +} + +// reset body to allow it to be read multiple times, e.g. when retrying http +// requests +func resetBody(req *http.Request, body []byte) { + if req == nil || req.Body == nil { + return + } + + req.Body = ioutil.NopCloser(bytes.NewReader(body)) + + // do not modify existing GetBody function + if req.GetBody == nil { + req.GetBody = func() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(body)), nil + } + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go new file mode 100644 index 000000000..7a0b2626c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go @@ -0,0 +1,60 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "errors" + "fmt" + + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// NewResult returns a fully populated http Result that should be used as +// a transport.Result. +func NewResult(statusCode int, messageFmt string, args ...interface{}) protocol.Result { + return &Result{ + StatusCode: statusCode, + Format: messageFmt, + Args: args, + } +} + +// Result wraps the fields required to make adjustments for http Responses. +type Result struct { + StatusCode int + Format string + Args []interface{} +} + +// make sure Result implements error. +var _ error = (*Result)(nil) + +// Is returns if the target error is a Result type checking target. +func (e *Result) Is(target error) bool { + if o, ok := target.(*Result); ok { + return e.StatusCode == o.StatusCode + } + + // Special case for nil == ACK + if o, ok := target.(*protocol.Receipt); ok { + if e == nil && o.ACK { + return true + } + } + + // Allow for wrapped errors. + if e != nil { + err := fmt.Errorf(e.Format, e.Args...) + return errors.Is(err, target) + } + return false +} + +// Error returns the string that is formed by using the format string with the +// provided args. +func (e *Result) Error() string { + return fmt.Sprintf("%d: %v", e.StatusCode, fmt.Errorf(e.Format, e.Args...)) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go new file mode 100644 index 000000000..f4046d522 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go @@ -0,0 +1,59 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "fmt" + "time" + + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// NewRetriesResult returns a http RetriesResult that should be used as +// a transport.Result without retries +func NewRetriesResult(result protocol.Result, retries int, startTime time.Time, attempts []protocol.Result) protocol.Result { + rr := &RetriesResult{ + Result: result, + Retries: retries, + Duration: time.Since(startTime), + } + if len(attempts) > 0 { + rr.Attempts = attempts + } + return rr +} + +// RetriesResult wraps the fields required to make adjustments for http Responses. +type RetriesResult struct { + // The last result + protocol.Result + + // Retries is the number of times the request was tried + Retries int + + // Duration records the time spent retrying. Exclude the successful request (if any) + Duration time.Duration + + // Attempts of all failed requests. Exclude last result. + Attempts []protocol.Result +} + +// make sure RetriesResult implements error. +var _ error = (*RetriesResult)(nil) + +// Is returns if the target error is a RetriesResult type checking target. +func (e *RetriesResult) Is(target error) bool { + return protocol.ResultIs(e.Result, target) +} + +// Error returns the string that is formed by using the format string with the +// provided args. +func (e *RetriesResult) Error() string { + if e.Retries == 0 { + return e.Result.Error() + } + return fmt.Sprintf("%s (%dx)", e.Result.Error(), e.Retries) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go new file mode 100644 index 000000000..350fc1cf6 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/utility.go @@ -0,0 +1,89 @@ +/* + Copyright 2022 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "encoding/json" + nethttp "net/http" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/event" +) + +// NewEventFromHTTPRequest returns an Event. +func NewEventFromHTTPRequest(req *nethttp.Request) (*event.Event, error) { + msg := NewMessageFromHttpRequest(req) + return binding.ToEvent(context.Background(), msg) +} + +// NewEventFromHTTPResponse returns an Event. +func NewEventFromHTTPResponse(resp *nethttp.Response) (*event.Event, error) { + msg := NewMessageFromHttpResponse(resp) + return binding.ToEvent(context.Background(), msg) +} + +// NewEventsFromHTTPRequest returns a batched set of Events from a HTTP Request +func NewEventsFromHTTPRequest(req *nethttp.Request) ([]event.Event, error) { + msg := NewMessageFromHttpRequest(req) + return binding.ToEvents(context.Background(), msg, msg.BodyReader) +} + +// NewEventsFromHTTPResponse returns a batched set of Events from a HTTP Response +func NewEventsFromHTTPResponse(resp *nethttp.Response) ([]event.Event, error) { + msg := NewMessageFromHttpResponse(resp) + return binding.ToEvents(context.Background(), msg, msg.BodyReader) +} + +// NewHTTPRequestFromEvent creates a http.Request object that can be used with any http.Client for a singular event. +// This is an HTTP POST action to the provided url. +func NewHTTPRequestFromEvent(ctx context.Context, url string, event event.Event) (*nethttp.Request, error) { + if err := event.Validate(); err != nil { + return nil, err + } + + req, err := nethttp.NewRequestWithContext(ctx, nethttp.MethodPost, url, nil) + if err != nil { + return nil, err + } + if err := WriteRequest(ctx, (*binding.EventMessage)(&event), req); err != nil { + return nil, err + } + + return req, nil +} + +// NewHTTPRequestFromEvents creates a http.Request object that can be used with any http.Client for sending +// a batched set of events. This is an HTTP POST action to the provided url. +func NewHTTPRequestFromEvents(ctx context.Context, url string, events []event.Event) (*nethttp.Request, error) { + // Sending batch events is quite straightforward, as there is only JSON format, so a simple implementation. + for _, e := range events { + if err := e.Validate(); err != nil { + return nil, err + } + } + var buffer bytes.Buffer + err := json.NewEncoder(&buffer).Encode(events) + if err != nil { + return nil, err + } + + request, err := nethttp.NewRequestWithContext(ctx, nethttp.MethodPost, url, &buffer) + if err != nil { + return nil, err + } + + request.Header.Set(ContentType, event.ApplicationCloudEventsBatchJSON) + + return request, nil +} + +// IsHTTPBatch returns if the current http.Request or http.Response is a batch event operation, by checking the +// header `Content-Type` value. +func IsHTTPBatch(header nethttp.Header) bool { + return header.Get(ContentType) == event.ApplicationCloudEventsBatchJSON +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go new file mode 100644 index 000000000..43ad36180 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go @@ -0,0 +1,141 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/types" +) + +// WriteRequest fills the provided httpRequest with the message m. +// Using context you can tweak the encoding processing (more details on binding.Write documentation). +func WriteRequest(ctx context.Context, m binding.Message, httpRequest *http.Request, transformers ...binding.Transformer) error { + structuredWriter := (*httpRequestWriter)(httpRequest) + binaryWriter := (*httpRequestWriter)(httpRequest) + + _, err := binding.Write( + ctx, + m, + structuredWriter, + binaryWriter, + transformers..., + ) + return err +} + +type httpRequestWriter http.Request + +func (b *httpRequestWriter) SetStructuredEvent(ctx context.Context, format format.Format, event io.Reader) error { + b.Header.Set(ContentType, format.MediaType()) + return b.setBody(event) +} + +func (b *httpRequestWriter) Start(ctx context.Context) error { + return nil +} + +func (b *httpRequestWriter) End(ctx context.Context) error { + return nil +} + +func (b *httpRequestWriter) SetData(data io.Reader) error { + return b.setBody(data) +} + +// setBody is a cherry-pick of the implementation in http.NewRequestWithContext +func (b *httpRequestWriter) setBody(body io.Reader) error { + rc, ok := body.(io.ReadCloser) + if !ok && body != nil { + rc = ioutil.NopCloser(body) + } + b.Body = rc + if body != nil { + switch v := body.(type) { + case *bytes.Buffer: + b.ContentLength = int64(v.Len()) + buf := v.Bytes() + b.GetBody = func() (io.ReadCloser, error) { + r := bytes.NewReader(buf) + return ioutil.NopCloser(r), nil + } + case *bytes.Reader: + b.ContentLength = int64(v.Len()) + snapshot := *v + b.GetBody = func() (io.ReadCloser, error) { + r := snapshot + return ioutil.NopCloser(&r), nil + } + case *strings.Reader: + b.ContentLength = int64(v.Len()) + snapshot := *v + b.GetBody = func() (io.ReadCloser, error) { + r := snapshot + return ioutil.NopCloser(&r), nil + } + default: + // This is where we'd set it to -1 (at least + // if body != NoBody) to mean unknown, but + // that broke people during the Go 1.8 testing + // period. People depend on it being 0 I + // guess. Maybe retry later. See Issue 18117. + } + // For client requests, Request.ContentLength of 0 + // means either actually 0, or unknown. The only way + // to explicitly say that the ContentLength is zero is + // to set the Body to nil. But turns out too much code + // depends on NewRequest returning a non-nil Body, + // so we use a well-known ReadCloser variable instead + // and have the http package also treat that sentinel + // variable to mean explicitly zero. + if b.GetBody != nil && b.ContentLength == 0 { + b.Body = http.NoBody + b.GetBody = func() (io.ReadCloser, error) { return http.NoBody, nil } + } + } + return nil +} + +func (b *httpRequestWriter) SetAttribute(attribute spec.Attribute, value interface{}) error { + mapping := attributeHeadersMapping[attribute.Name()] + if value == nil { + delete(b.Header, mapping) + return nil + } + + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + b.Header[mapping] = append(b.Header[mapping], s) + return nil +} + +func (b *httpRequestWriter) SetExtension(name string, value interface{}) error { + if value == nil { + delete(b.Header, extNameToHeaderName(name)) + return nil + } + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + b.Header[extNameToHeaderName(name)] = []string{s} + return nil +} + +var _ binding.StructuredWriter = (*httpRequestWriter)(nil) // Test it conforms to the interface +var _ binding.BinaryWriter = (*httpRequestWriter)(nil) // Test it conforms to the interface diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go new file mode 100644 index 000000000..41385dab1 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go @@ -0,0 +1,126 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "bytes" + "context" + "io" + "net/http" + "strconv" + "strings" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/types" +) + +// WriteResponseWriter writes out to the the provided httpResponseWriter with the message m. +// Using context you can tweak the encoding processing (more details on binding.Write documentation). +func WriteResponseWriter(ctx context.Context, m binding.Message, status int, rw http.ResponseWriter, transformers ...binding.Transformer) error { + if status < 200 || status >= 600 { + status = http.StatusOK + } + writer := &httpResponseWriter{rw: rw, status: status} + + _, err := binding.Write( + ctx, + m, + writer, + writer, + transformers..., + ) + return err +} + +type httpResponseWriter struct { + rw http.ResponseWriter + status int + body io.Reader +} + +func (b *httpResponseWriter) SetStructuredEvent(ctx context.Context, format format.Format, event io.Reader) error { + b.rw.Header().Set(ContentType, format.MediaType()) + b.body = event + return b.finalizeWriter() +} + +func (b *httpResponseWriter) Start(ctx context.Context) error { + return nil +} + +func (b *httpResponseWriter) SetAttribute(attribute spec.Attribute, value interface{}) error { + mapping := attributeHeadersMapping[attribute.Name()] + if value == nil { + delete(b.rw.Header(), mapping) + } + + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + b.rw.Header()[mapping] = append(b.rw.Header()[mapping], s) + return nil +} + +func (b *httpResponseWriter) SetExtension(name string, value interface{}) error { + if value == nil { + delete(b.rw.Header(), extNameToHeaderName(name)) + } + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + b.rw.Header()[extNameToHeaderName(name)] = []string{s} + return nil +} + +func (b *httpResponseWriter) SetData(reader io.Reader) error { + b.body = reader + return nil +} + +func (b *httpResponseWriter) finalizeWriter() error { + if b.body != nil { + // Try to figure it out if we have a content-length + contentLength := -1 + switch v := b.body.(type) { + case *bytes.Buffer: + contentLength = v.Len() + case *bytes.Reader: + contentLength = v.Len() + case *strings.Reader: + contentLength = v.Len() + } + + if contentLength != -1 { + b.rw.Header().Add("Content-length", strconv.Itoa(contentLength)) + } + + // Finalize the headers. + b.rw.WriteHeader(b.status) + + // Write body. + _, err := io.Copy(b.rw, b.body) + if err != nil { + return err + } + } else { + // Finalize the headers. + b.rw.WriteHeader(b.status) + } + return nil +} + +func (b *httpResponseWriter) End(ctx context.Context) error { + return b.finalizeWriter() +} + +var _ binding.StructuredWriter = (*httpResponseWriter)(nil) // Test it conforms to the interface +var _ binding.BinaryWriter = (*httpResponseWriter)(nil) // Test it conforms to the interface diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go new file mode 100644 index 000000000..e7a74294d --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go @@ -0,0 +1,54 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/binding" +) + +// Receiver receives messages. +type Receiver interface { + // Receive blocks till a message is received or ctx expires. + // Receive can be invoked safely from different goroutines. + // + // A non-nil error means the receiver is closed. + // io.EOF means it closed cleanly, any other value indicates an error. + // The caller is responsible for `Finish()` the returned message + Receive(ctx context.Context) (binding.Message, error) +} + +// ReceiveCloser is a Receiver that can be closed. +type ReceiveCloser interface { + Receiver + Closer +} + +// ResponseFn is the function callback provided from Responder.Respond to allow +// for a receiver to "reply" to a message it receives. +// transformers are applied when the message is written on the wire. +type ResponseFn func(ctx context.Context, m binding.Message, r Result, transformers ...binding.Transformer) error + +// Responder receives messages and is given a callback to respond. +type Responder interface { + // Respond blocks till a message is received or ctx expires. + // Respond can be invoked safely from different goroutines. + // + // A non-nil error means the receiver is closed. + // io.EOF means it closed cleanly, any other value indicates an error. + // The caller is responsible for `Finish()` the returned message, + // while the protocol implementation is responsible for `Finish()` the response message. + // The caller MUST invoke ResponseFn, in order to avoid leaks. + // The correct flow for the caller is to finish the received message and then invoke the ResponseFn + Respond(ctx context.Context) (binding.Message, ResponseFn, error) +} + +// ResponderCloser is a Responder that can be closed. +type ResponderCloser interface { + Responder + Closer +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go new file mode 100644 index 000000000..4a058c962 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go @@ -0,0 +1,23 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import ( + "context" +) + +// Opener is the common interface for things that need to be opened. +type Opener interface { + // OpenInbound is a blocking call and ctx is used to stop the Inbound message Receiver/Responder. + // Closing the context won't close the Receiver/Responder, aka it won't invoke Close(ctx). + OpenInbound(ctx context.Context) error +} + +// Closer is the common interface for things that can be closed. +// After invoking Close(ctx), you cannot reuse the object you closed. +type Closer interface { + Close(ctx context.Context) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go new file mode 100644 index 000000000..e44fa432a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go @@ -0,0 +1,49 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/binding" +) + +// Sender sends messages. +type Sender interface { + // Send a message. + // + // Send returns when the "outbound" message has been sent. The Sender may + // still be expecting acknowledgment or holding other state for the message. + // + // m.Finish() is called when sending is finished (both succeeded or failed): + // expected acknowledgments (or errors) have been received, the Sender is + // no longer holding any state for the message. + // m.Finish() may be called during or after Send(). + // + // transformers are applied when the message is written on the wire. + Send(ctx context.Context, m binding.Message, transformers ...binding.Transformer) error +} + +// SendCloser is a Sender that can be closed. +type SendCloser interface { + Sender + Closer +} + +// Requester sends a message and receives a response +// +// Optional interface that may be implemented by protocols that support +// request/response correlation. +type Requester interface { + // Request sends m like Sender.Send() but also arranges to receive a response. + Request(ctx context.Context, m binding.Message, transformers ...binding.Transformer) (binding.Message, error) +} + +// RequesterCloser is a Requester that can be closed. +type RequesterCloser interface { + Requester + Closer +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go new file mode 100644 index 000000000..eae64e018 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go @@ -0,0 +1,127 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package protocol + +import ( + "errors" + "fmt" +) + +// Result leverages go's error wrapping. +type Result error + +// ResultIs reports whether any error in err's chain matches target. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +// (text from errors/wrap.go) +var ResultIs = errors.Is + +// ResultAs finds the first error in err's chain that matches target, and if so, sets +// target to that error value and returns true. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error matches target if the error's concrete value is assignable to the value +// pointed to by target, or if the error has a method As(interface{}) bool such that +// As(target) returns true. In the latter case, the As method is responsible for +// setting target. +// +// As will panic if target is not a non-nil pointer to either a type that implements +// error, or to any interface type. As returns false if err is nil. +// (text from errors/wrap.go) +var ResultAs = errors.As + +func NewResult(messageFmt string, args ...interface{}) Result { + return fmt.Errorf(messageFmt, args...) +} + +// IsACK true means the recipient acknowledged the event. +func IsACK(target Result) bool { + // special case, nil target also means ACK. + if target == nil { + return true + } + + return ResultIs(target, ResultACK) +} + +// IsNACK true means the recipient did not acknowledge the event. +func IsNACK(target Result) bool { + return ResultIs(target, ResultNACK) +} + +// IsUndelivered true means the target result is not an ACK/NACK, but some other +// error unrelated to delivery not from the intended recipient. Likely target +// is an error that represents some part of the protocol is misconfigured or +// the event that was attempting to be sent was invalid. +func IsUndelivered(target Result) bool { + if target == nil { + // Short-circuit nil result is ACK. + return false + } + return !ResultIs(target, ResultACK) && !ResultIs(target, ResultNACK) +} + +var ( + ResultACK = NewReceipt(true, "") + ResultNACK = NewReceipt(false, "") +) + +// NewReceipt returns a fully populated protocol Receipt that should be used as +// a transport.Result. This type holds the base ACK/NACK results. +func NewReceipt(ack bool, messageFmt string, args ...interface{}) Result { + return &Receipt{ + Err: fmt.Errorf(messageFmt, args...), + ACK: ack, + } +} + +// Receipt wraps the fields required to understand if a protocol event is acknowledged. +type Receipt struct { + Err error + ACK bool +} + +// make sure Result implements error. +var _ error = (*Receipt)(nil) + +// Is returns if the target error is a Result type checking target. +func (e *Receipt) Is(target error) bool { + if o, ok := target.(*Receipt); ok { + if e == nil { + // Special case nil e as ACK. + return o.ACK + } + return e.ACK == o.ACK + } + // Allow for wrapped errors. + if e != nil { + return errors.Is(e.Err, target) + } + return false +} + +// Error returns the string that is formed by using the format string with the +// provided args. +func (e *Receipt) Error() string { + if e != nil { + return e.Err.Error() + } + return "" +} + +// Unwrap returns the wrapped error if exist or nil +func (e *Receipt) Unwrap() error { + if e != nil { + return errors.Unwrap(e.Err) + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/staticcheck.conf b/vendor/github.com/cloudevents/sdk-go/v2/staticcheck.conf new file mode 100644 index 000000000..d6f269556 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/staticcheck.conf @@ -0,0 +1,3 @@ +checks = [ + "all", "-ST1003", +] \ No newline at end of file diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go b/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go new file mode 100644 index 000000000..814626874 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go @@ -0,0 +1,41 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import "reflect" + +// Allocate allocates a new instance of type t and returns: +// asPtr is of type t if t is a pointer type and of type &t otherwise +// asValue is a Value of type t pointing to the same data as asPtr +func Allocate(obj interface{}) (asPtr interface{}, asValue reflect.Value) { + if obj == nil { + return nil, reflect.Value{} + } + + switch t := reflect.TypeOf(obj); t.Kind() { + case reflect.Ptr: + reflectPtr := reflect.New(t.Elem()) + asPtr = reflectPtr.Interface() + asValue = reflectPtr + case reflect.Map: + reflectPtr := reflect.MakeMap(t) + asPtr = reflectPtr.Interface() + asValue = reflectPtr + case reflect.String: + reflectPtr := reflect.New(t) + asPtr = "" + asValue = reflectPtr.Elem() + case reflect.Slice: + reflectPtr := reflect.MakeSlice(t, 0, 0) + asPtr = reflectPtr.Interface() + asValue = reflectPtr + default: + reflectPtr := reflect.New(t) + asPtr = reflectPtr.Interface() + asValue = reflectPtr.Elem() + } + return +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go new file mode 100644 index 000000000..cf7a94f35 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go @@ -0,0 +1,46 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +/* +Package types implements the CloudEvents type system. + +CloudEvents defines a set of abstract types for event context attributes. Each +type has a corresponding native Go type and a canonical string encoding. The +native Go types used to represent the CloudEvents types are: +bool, int32, string, []byte, *url.URL, time.Time + + +----------------+----------------+-----------------------------------+ + |CloudEvents Type|Native Type |Convertible From | + +================+================+===================================+ + |Bool |bool |bool | + +----------------+----------------+-----------------------------------+ + |Integer |int32 |Any numeric type with value in | + | | |range of int32 | + +----------------+----------------+-----------------------------------+ + |String |string |string | + +----------------+----------------+-----------------------------------+ + |Binary |[]byte |[]byte | + +----------------+----------------+-----------------------------------+ + |URI-Reference |*url.URL |url.URL, types.URIRef, types.URI | + +----------------+----------------+-----------------------------------+ + |URI |*url.URL |url.URL, types.URIRef, types.URI | + | | |Must be an absolute URI. | + +----------------+----------------+-----------------------------------+ + |Timestamp |time.Time |time.Time, types.Timestamp | + +----------------+----------------+-----------------------------------+ + +Extension attributes may be stored as a native type or a canonical string. The +To functions will convert to the desired from any convertible type +or from the canonical string form. + +The Parse and Format functions convert native types to/from +canonical strings. + +Note are no Parse or Format functions for URL or string. For URL use the +standard url.Parse() and url.URL.String(). The canonical string format of a +string is the string itself. + +*/ +package types diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go b/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go new file mode 100644 index 000000000..ff049727d --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go @@ -0,0 +1,75 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "time" +) + +// Timestamp wraps time.Time to normalize the time layout to RFC3339. It is +// intended to enforce compliance with the CloudEvents spec for their +// definition of Timestamp. Custom marshal methods are implemented to ensure +// the outbound Timestamp is a string in the RFC3339 layout. +type Timestamp struct { + time.Time +} + +// ParseTimestamp attempts to parse the given time assuming RFC3339 layout +func ParseTimestamp(s string) (*Timestamp, error) { + if s == "" { + return nil, nil + } + tt, err := ParseTime(s) + return &Timestamp{Time: tt}, err +} + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (t *Timestamp) MarshalJSON() ([]byte, error) { + if t == nil || t.IsZero() { + return []byte(`""`), nil + } + return []byte(fmt.Sprintf("%q", t)), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (t *Timestamp) UnmarshalJSON(b []byte) error { + var timestamp string + if err := json.Unmarshal(b, ×tamp); err != nil { + return err + } + var err error + t.Time, err = ParseTime(timestamp) + return err +} + +// MarshalXML implements a custom xml marshal method used when this type is +// marshaled using xml.Marshal. +func (t *Timestamp) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if t == nil || t.IsZero() { + return e.EncodeElement(nil, start) + } + return e.EncodeElement(t.String(), start) +} + +// UnmarshalXML implements the xml unmarshal method used when this type is +// unmarshaled using xml.Unmarshal. +func (t *Timestamp) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var timestamp string + if err := d.DecodeElement(×tamp, &start); err != nil { + return err + } + var err error + t.Time, err = ParseTime(timestamp) + return err +} + +// String outputs the time using RFC3339 format. +func (t Timestamp) String() string { return FormatTime(t.Time) } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go b/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go new file mode 100644 index 000000000..bed608094 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go @@ -0,0 +1,86 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "net/url" +) + +// URI is a wrapper to url.URL. It is intended to enforce compliance with +// the CloudEvents spec for their definition of URI. Custom +// marshal methods are implemented to ensure the outbound URI object +// is a flat string. +type URI struct { + url.URL +} + +// ParseURI attempts to parse the given string as a URI. +func ParseURI(u string) *URI { + if u == "" { + return nil + } + pu, err := url.Parse(u) + if err != nil { + return nil + } + return &URI{URL: *pu} +} + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (u URI) MarshalJSON() ([]byte, error) { + b := fmt.Sprintf("%q", u.String()) + return []byte(b), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (u *URI) UnmarshalJSON(b []byte) error { + var ref string + if err := json.Unmarshal(b, &ref); err != nil { + return err + } + r := ParseURI(ref) + if r != nil { + *u = *r + } + return nil +} + +// MarshalXML implements a custom xml marshal method used when this type is +// marshaled using xml.Marshal. +func (u URI) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + return e.EncodeElement(u.String(), start) +} + +// UnmarshalXML implements the xml unmarshal method used when this type is +// unmarshaled using xml.Unmarshal. +func (u *URI) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var ref string + if err := d.DecodeElement(&ref, &start); err != nil { + return err + } + r := ParseURI(ref) + if r != nil { + *u = *r + } + return nil +} + +func (u URI) Validate() bool { + return u.IsAbs() +} + +// String returns the full string representation of the URI-Reference. +func (u *URI) String() string { + if u == nil { + return "" + } + return u.URL.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go b/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go new file mode 100644 index 000000000..22fa12314 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go @@ -0,0 +1,82 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "net/url" +) + +// URIRef is a wrapper to url.URL. It is intended to enforce compliance with +// the CloudEvents spec for their definition of URI-Reference. Custom +// marshal methods are implemented to ensure the outbound URIRef object is +// is a flat string. +type URIRef struct { + url.URL +} + +// ParseURIRef attempts to parse the given string as a URI-Reference. +func ParseURIRef(u string) *URIRef { + if u == "" { + return nil + } + pu, err := url.Parse(u) + if err != nil { + return nil + } + return &URIRef{URL: *pu} +} + +// MarshalJSON implements a custom json marshal method used when this type is +// marshaled using json.Marshal. +func (u URIRef) MarshalJSON() ([]byte, error) { + b := fmt.Sprintf("%q", u.String()) + return []byte(b), nil +} + +// UnmarshalJSON implements the json unmarshal method used when this type is +// unmarshaled using json.Unmarshal. +func (u *URIRef) UnmarshalJSON(b []byte) error { + var ref string + if err := json.Unmarshal(b, &ref); err != nil { + return err + } + r := ParseURIRef(ref) + if r != nil { + *u = *r + } + return nil +} + +// MarshalXML implements a custom xml marshal method used when this type is +// marshaled using xml.Marshal. +func (u URIRef) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + return e.EncodeElement(u.String(), start) +} + +// UnmarshalXML implements the xml unmarshal method used when this type is +// unmarshaled using xml.Unmarshal. +func (u *URIRef) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var ref string + if err := d.DecodeElement(&ref, &start); err != nil { + return err + } + r := ParseURIRef(ref) + if r != nil { + *u = *r + } + return nil +} + +// String returns the full string representation of the URI-Reference. +func (u *URIRef) String() string { + if u == nil { + return "" + } + return u.URL.String() +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/value.go b/vendor/github.com/cloudevents/sdk-go/v2/types/value.go new file mode 100644 index 000000000..f643d0aa5 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/value.go @@ -0,0 +1,335 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package types + +import ( + "encoding/base64" + "fmt" + "math" + "net/url" + "reflect" + "strconv" + "time" +) + +// FormatBool returns canonical string format: "true" or "false" +func FormatBool(v bool) string { return strconv.FormatBool(v) } + +// FormatInteger returns canonical string format: decimal notation. +func FormatInteger(v int32) string { return strconv.Itoa(int(v)) } + +// FormatBinary returns canonical string format: standard base64 encoding +func FormatBinary(v []byte) string { return base64.StdEncoding.EncodeToString(v) } + +// FormatTime returns canonical string format: RFC3339 with nanoseconds +func FormatTime(v time.Time) string { return v.UTC().Format(time.RFC3339Nano) } + +// ParseBool parse canonical string format: "true" or "false" +func ParseBool(v string) (bool, error) { return strconv.ParseBool(v) } + +// ParseInteger parse canonical string format: decimal notation. +func ParseInteger(v string) (int32, error) { + // Accept floating-point but truncate to int32 as per CE spec. + f, err := strconv.ParseFloat(v, 64) + if err != nil { + return 0, err + } + if f > math.MaxInt32 || f < math.MinInt32 { + return 0, rangeErr(v) + } + return int32(f), nil +} + +// ParseBinary parse canonical string format: standard base64 encoding +func ParseBinary(v string) ([]byte, error) { return base64.StdEncoding.DecodeString(v) } + +// ParseTime parse canonical string format: RFC3339 with nanoseconds +func ParseTime(v string) (time.Time, error) { + t, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + err := convertErr(time.Time{}, v) + err.extra = ": not in RFC3339 format" + return time.Time{}, err + } + return t, nil +} + +// Format returns the canonical string format of v, where v can be +// any type that is convertible to a CloudEvents type. +func Format(v interface{}) (string, error) { + v, err := Validate(v) + if err != nil { + return "", err + } + switch v := v.(type) { + case bool: + return FormatBool(v), nil + case int32: + return FormatInteger(v), nil + case string: + return v, nil + case []byte: + return FormatBinary(v), nil + case URI: + return v.String(), nil + case URIRef: + // url.URL is often passed by pointer so allow both + return v.String(), nil + case Timestamp: + return FormatTime(v.Time), nil + default: + return "", fmt.Errorf("%T is not a CloudEvents type", v) + } +} + +// Validate v is a valid CloudEvents attribute value, convert it to one of: +// bool, int32, string, []byte, types.URI, types.URIRef, types.Timestamp +func Validate(v interface{}) (interface{}, error) { + switch v := v.(type) { + case bool, int32, string, []byte: + return v, nil // Already a CloudEvents type, no validation needed. + + case uint, uintptr, uint8, uint16, uint32, uint64: + u := reflect.ValueOf(v).Uint() + if u > math.MaxInt32 { + return nil, rangeErr(v) + } + return int32(u), nil + case int, int8, int16, int64: + i := reflect.ValueOf(v).Int() + if i > math.MaxInt32 || i < math.MinInt32 { + return nil, rangeErr(v) + } + return int32(i), nil + case float32, float64: + f := reflect.ValueOf(v).Float() + if f > math.MaxInt32 || f < math.MinInt32 { + return nil, rangeErr(v) + } + return int32(f), nil + + case *url.URL: + if v == nil { + break + } + return URI{URL: *v}, nil + case url.URL: + return URI{URL: v}, nil + case *URIRef: + if v != nil { + return *v, nil + } + return nil, nil + case URIRef: + return v, nil + case *URI: + if v != nil { + return *v, nil + } + return nil, nil + case URI: + return v, nil + case time.Time: + return Timestamp{Time: v}, nil + case *time.Time: + if v == nil { + break + } + return Timestamp{Time: *v}, nil + case Timestamp: + return v, nil + } + rx := reflect.ValueOf(v) + if rx.Kind() == reflect.Ptr && !rx.IsNil() { + // Allow pointers-to convertible types + return Validate(rx.Elem().Interface()) + } + return nil, fmt.Errorf("invalid CloudEvents value: %#v", v) +} + +// Clone v clones a CloudEvents attribute value, which is one of the valid types: +// bool, int32, string, []byte, types.URI, types.URIRef, types.Timestamp +// Returns the same type +// Panics if the type is not valid +func Clone(v interface{}) interface{} { + if v == nil { + return nil + } + switch v := v.(type) { + case bool, int32, string, nil: + return v // Already a CloudEvents type, no validation needed. + case []byte: + clone := make([]byte, len(v)) + copy(clone, v) + return v + case url.URL: + return URI{v} + case *url.URL: + return &URI{*v} + case URIRef: + return v + case *URIRef: + return &URIRef{v.URL} + case URI: + return v + case *URI: + return &URI{v.URL} + case time.Time: + return Timestamp{v} + case *time.Time: + return &Timestamp{*v} + case Timestamp: + return v + case *Timestamp: + return &Timestamp{v.Time} + } + panic(fmt.Errorf("invalid CloudEvents value: %#v", v)) +} + +// ToBool accepts a bool value or canonical "true"/"false" string. +func ToBool(v interface{}) (bool, error) { + v, err := Validate(v) + if err != nil { + return false, err + } + switch v := v.(type) { + case bool: + return v, nil + case string: + return ParseBool(v) + default: + return false, convertErr(true, v) + } +} + +// ToInteger accepts any numeric value in int32 range, or canonical string. +func ToInteger(v interface{}) (int32, error) { + v, err := Validate(v) + if err != nil { + return 0, err + } + switch v := v.(type) { + case int32: + return v, nil + case string: + return ParseInteger(v) + default: + return 0, convertErr(int32(0), v) + } +} + +// ToString returns a string value unaltered. +// +// This function does not perform canonical string encoding, use one of the +// Format functions for that. +func ToString(v interface{}) (string, error) { + v, err := Validate(v) + if err != nil { + return "", err + } + switch v := v.(type) { + case string: + return v, nil + default: + return "", convertErr("", v) + } +} + +// ToBinary returns a []byte value, decoding from base64 string if necessary. +func ToBinary(v interface{}) ([]byte, error) { + v, err := Validate(v) + if err != nil { + return nil, err + } + switch v := v.(type) { + case []byte: + return v, nil + case string: + return base64.StdEncoding.DecodeString(v) + default: + return nil, convertErr([]byte(nil), v) + } +} + +// ToURL returns a *url.URL value, parsing from string if necessary. +func ToURL(v interface{}) (*url.URL, error) { + v, err := Validate(v) + if err != nil { + return nil, err + } + switch v := v.(type) { + case *URI: + return &v.URL, nil + case URI: + return &v.URL, nil + case *URIRef: + return &v.URL, nil + case URIRef: + return &v.URL, nil + case string: + u, err := url.Parse(v) + if err != nil { + return nil, err + } + return u, nil + default: + return nil, convertErr((*url.URL)(nil), v) + } +} + +// ToTime returns a time.Time value, parsing from RFC3339 string if necessary. +func ToTime(v interface{}) (time.Time, error) { + v, err := Validate(v) + if err != nil { + return time.Time{}, err + } + switch v := v.(type) { + case Timestamp: + return v.Time, nil + case string: + ts, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + return time.Time{}, err + } + return ts, nil + default: + return time.Time{}, convertErr(time.Time{}, v) + } +} + +func IsZero(v interface{}) bool { + // Fast path + if v == nil { + return true + } + if s, ok := v.(string); ok && s == "" { + return true + } + return reflect.ValueOf(v).IsZero() +} + +type ConvertErr struct { + // Value being converted + Value interface{} + // Type of attempted conversion + Type reflect.Type + + extra string +} + +func (e *ConvertErr) Error() string { + return fmt.Sprintf("cannot convert %#v to %s%s", e.Value, e.Type, e.extra) +} + +func convertErr(target, v interface{}) *ConvertErr { + return &ConvertErr{Value: v, Type: reflect.TypeOf(target)} +} + +func rangeErr(v interface{}) error { + e := convertErr(int32(0), v) + e.extra = ": out of range" + return e +} diff --git a/vendor/github.com/eclipse/paho.golang/LICENSE b/vendor/github.com/eclipse/paho.golang/LICENSE new file mode 100644 index 000000000..d3087e4c5 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/LICENSE @@ -0,0 +1,277 @@ +Eclipse Public License - v 2.0 + + THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE + PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION + OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +1. DEFINITIONS + +"Contribution" means: + + a) in the case of the initial Contributor, the initial content + Distributed under this Agreement, and + + b) in the case of each subsequent Contributor: + i) changes to the Program, and + ii) additions to the Program; + where such changes and/or additions to the Program originate from + and are Distributed by that particular Contributor. A Contribution + "originates" from a Contributor if it was added to the Program by + such Contributor itself or anyone acting on such Contributor's behalf. + Contributions do not include changes or additions to the Program that + are not Modified Works. + +"Contributor" means any person or entity that Distributes the Program. + +"Licensed Patents" mean patent claims licensable by a Contributor which +are necessarily infringed by the use or sale of its Contribution alone +or when combined with the Program. + +"Program" means the Contributions Distributed in accordance with this +Agreement. + +"Recipient" means anyone who receives the Program under this Agreement +or any Secondary License (as applicable), including Contributors. + +"Derivative Works" shall mean any work, whether in Source Code or other +form, that is based on (or derived from) the Program and for which the +editorial revisions, annotations, elaborations, or other modifications +represent, as a whole, an original work of authorship. + +"Modified Works" shall mean any work in Source Code or other form that +results from an addition to, deletion from, or modification of the +contents of the Program, including, for purposes of clarity any new file +in Source Code form that contains any contents of the Program. Modified +Works shall not include works that contain only declarations, +interfaces, types, classes, structures, or files of the Program solely +in each case in order to link to, bind by name, or subclass the Program +or Modified Works thereof. + +"Distribute" means the acts of a) distributing or b) making available +in any manner that enables the transfer of a copy. + +"Source Code" means the form of a Program preferred for making +modifications, including but not limited to software source code, +documentation source, and configuration files. + +"Secondary License" means either the GNU General Public License, +Version 2.0, or any later versions of that license, including any +exceptions or additional permissions as identified by the initial +Contributor. + +2. GRANT OF RIGHTS + + a) Subject to the terms of this Agreement, each Contributor hereby + grants Recipient a non-exclusive, worldwide, royalty-free copyright + license to reproduce, prepare Derivative Works of, publicly display, + publicly perform, Distribute and sublicense the Contribution of such + Contributor, if any, and such Derivative Works. + + b) Subject to the terms of this Agreement, each Contributor hereby + grants Recipient a non-exclusive, worldwide, royalty-free patent + license under Licensed Patents to make, use, sell, offer to sell, + import and otherwise transfer the Contribution of such Contributor, + if any, in Source Code or other form. This patent license shall + apply to the combination of the Contribution and the Program if, at + the time the Contribution is added by the Contributor, such addition + of the Contribution causes such combination to be covered by the + Licensed Patents. The patent license shall not apply to any other + combinations which include the Contribution. No hardware per se is + licensed hereunder. + + c) Recipient understands that although each Contributor grants the + licenses to its Contributions set forth herein, no assurances are + provided by any Contributor that the Program does not infringe the + patent or other intellectual property rights of any other entity. + Each Contributor disclaims any liability to Recipient for claims + brought by any other entity based on infringement of intellectual + property rights or otherwise. As a condition to exercising the + rights and licenses granted hereunder, each Recipient hereby + assumes sole responsibility to secure any other intellectual + property rights needed, if any. For example, if a third party + patent license is required to allow Recipient to Distribute the + Program, it is Recipient's responsibility to acquire that license + before distributing the Program. + + d) Each Contributor represents that to its knowledge it has + sufficient copyright rights in its Contribution, if any, to grant + the copyright license set forth in this Agreement. + + e) Notwithstanding the terms of any Secondary License, no + Contributor makes additional grants to any Recipient (other than + those set forth in this Agreement) as a result of such Recipient's + receipt of the Program under the terms of a Secondary License + (if permitted under the terms of Section 3). + +3. REQUIREMENTS + +3.1 If a Contributor Distributes the Program in any form, then: + + a) the Program must also be made available as Source Code, in + accordance with section 3.2, and the Contributor must accompany + the Program with a statement that the Source Code for the Program + is available under this Agreement, and informs Recipients how to + obtain it in a reasonable manner on or through a medium customarily + used for software exchange; and + + b) the Contributor may Distribute the Program under a license + different than this Agreement, provided that such license: + i) effectively disclaims on behalf of all other Contributors all + warranties and conditions, express and implied, including + warranties or conditions of title and non-infringement, and + implied warranties or conditions of merchantability and fitness + for a particular purpose; + + ii) effectively excludes on behalf of all other Contributors all + liability for damages, including direct, indirect, special, + incidental and consequential damages, such as lost profits; + + iii) does not attempt to limit or alter the recipients' rights + in the Source Code under section 3.2; and + + iv) requires any subsequent distribution of the Program by any + party to be under a license that satisfies the requirements + of this section 3. + +3.2 When the Program is Distributed as Source Code: + + a) it must be made available under this Agreement, or if the + Program (i) is combined with other material in a separate file or + files made available under a Secondary License, and (ii) the initial + Contributor attached to the Source Code the notice described in + Exhibit A of this Agreement, then the Program may be made available + under the terms of such Secondary Licenses, and + + b) a copy of this Agreement must be included with each copy of + the Program. + +3.3 Contributors may not remove or alter any copyright, patent, +trademark, attribution notices, disclaimers of warranty, or limitations +of liability ("notices") contained within the Program from any copy of +the Program which they Distribute, provided that Contributors may add +their own appropriate notices. + +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities +with respect to end users, business partners and the like. While this +license is intended to facilitate the commercial use of the Program, +the Contributor who includes the Program in a commercial product +offering should do so in a manner which does not create potential +liability for other Contributors. Therefore, if a Contributor includes +the Program in a commercial product offering, such Contributor +("Commercial Contributor") hereby agrees to defend and indemnify every +other Contributor ("Indemnified Contributor") against any losses, +damages and costs (collectively "Losses") arising from claims, lawsuits +and other legal actions brought by a third party against the Indemnified +Contributor to the extent caused by the acts or omissions of such +Commercial Contributor in connection with its distribution of the Program +in a commercial product offering. The obligations in this section do not +apply to any claims or Losses relating to any actual or alleged +intellectual property infringement. In order to qualify, an Indemnified +Contributor must: a) promptly notify the Commercial Contributor in +writing of such claim, and b) allow the Commercial Contributor to control, +and cooperate with the Commercial Contributor in, the defense and any +related settlement negotiations. The Indemnified Contributor may +participate in any such claim at its own expense. + +For example, a Contributor might include the Program in a commercial +product offering, Product X. That Contributor is then a Commercial +Contributor. If that Commercial Contributor then makes performance +claims, or offers warranties related to Product X, those performance +claims and warranties are such Commercial Contributor's responsibility +alone. Under this section, the Commercial Contributor would have to +defend claims against the other Contributors related to those performance +claims and warranties, and if a court requires any other Contributor to +pay any damages as a result, the Commercial Contributor must pay +those damages. + +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT +PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN "AS IS" +BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR +IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF +TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR +PURPOSE. Each Recipient is solely responsible for determining the +appropriateness of using and distributing the Program and assumes all +risks associated with its exercise of rights under this Agreement, +including but not limited to the risks and costs of program errors, +compliance with applicable laws, damage to or loss of data, programs +or equipment, and unavailability or interruption of operations. + +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT +PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS +SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST +PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE +EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under +applicable law, it shall not affect the validity or enforceability of +the remainder of the terms of this Agreement, and without further +action by the parties hereto, such provision shall be reformed to the +minimum extent necessary to make such provision valid and enforceable. + +If Recipient institutes patent litigation against any entity +(including a cross-claim or counterclaim in a lawsuit) alleging that the +Program itself (excluding combinations of the Program with other software +or hardware) infringes such Recipient's patent(s), then such Recipient's +rights granted under Section 2(b) shall terminate as of the date such +litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it +fails to comply with any of the material terms or conditions of this +Agreement and does not cure such failure in a reasonable period of +time after becoming aware of such noncompliance. If all Recipient's +rights under this Agreement terminate, Recipient agrees to cease use +and distribution of the Program as soon as reasonably practicable. +However, Recipient's obligations under this Agreement and any licenses +granted by Recipient relating to the Program shall continue and survive. + +Everyone is permitted to copy and distribute copies of this Agreement, +but in order to avoid inconsistency the Agreement is copyrighted and +may only be modified in the following manner. The Agreement Steward +reserves the right to publish new versions (including revisions) of +this Agreement from time to time. No one other than the Agreement +Steward has the right to modify this Agreement. The Eclipse Foundation +is the initial Agreement Steward. The Eclipse Foundation may assign the +responsibility to serve as the Agreement Steward to a suitable separate +entity. Each new version of the Agreement will be given a distinguishing +version number. The Program (including Contributions) may always be +Distributed subject to the version of the Agreement under which it was +received. In addition, after a new version of the Agreement is published, +Contributor may elect to Distribute the Program (including its +Contributions) under the new version. + +Except as expressly stated in Sections 2(a) and 2(b) above, Recipient +receives no rights or licenses to the intellectual property of any +Contributor under this Agreement, whether expressly, by implication, +estoppel or otherwise. All rights in the Program not expressly granted +under this Agreement are reserved. Nothing in this Agreement is intended +to be enforceable by any entity that is not a Contributor or Recipient. +No third-party beneficiary rights are created under this Agreement. + +Exhibit A - Form of Secondary Licenses Notice + +"This Source Code may also be made available under the following +Secondary Licenses when the conditions for such availability set forth +in the Eclipse Public License, v. 2.0 are satisfied: {name license(s), +version(s), and exceptions or additional permissions here}." + + Simply including a copy of this Agreement, including this Exhibit A + is not sufficient to license the Source Code under Secondary Licenses. + + If it is not possible or desirable to put the notice in a particular + file, then You may include the notice in a location (such as a LICENSE + file in a relevant directory) where a recipient would be likely to + look for such a notice. + + You may add additional accurate notices of copyright ownership. diff --git a/vendor/github.com/eclipse/paho.golang/packets/auth.go b/vendor/github.com/eclipse/paho.golang/packets/auth.go new file mode 100644 index 000000000..56237e00c --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/auth.go @@ -0,0 +1,77 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Auth is the Variable Header definition for a Auth control packet +type Auth struct { + Properties *Properties + ReasonCode byte +} + +// AuthSuccess is the return code for successful authentication +const ( + AuthSuccess = 0x00 + AuthContinueAuthentication = 0x18 + AuthReauthenticate = 0x19 +) + +func (a *Auth) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "AUTH: ReasonCode:%X", a.ReasonCode) + if a.Properties != nil { + fmt.Fprintf(&b, " Properties:\n%s", a.Properties) + } else { + fmt.Fprint(&b, "\n") + } + + return b.String() +} + +// Unpack is the implementation of the interface required function for a packet +func (a *Auth) Unpack(r *bytes.Buffer) error { + var err error + + success := r.Len() == 0 + noProps := r.Len() == 1 + if !success { + a.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + if !noProps { + err = a.Properties.Unpack(r, AUTH) + if err != nil { + return err + } + } + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (a *Auth) Buffers() net.Buffers { + idvp := a.Properties.Pack(AUTH) + propLen := encodeVBI(len(idvp)) + n := net.Buffers{[]byte{a.ReasonCode}, propLen} + if len(idvp) > 0 { + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (a *Auth) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: AUTH}} + cp.Content = a + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/connack.go b/vendor/github.com/eclipse/paho.golang/packets/connack.go new file mode 100644 index 000000000..3041fbcb5 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/connack.go @@ -0,0 +1,145 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Connack is the Variable Header definition for a connack control packet +type Connack struct { + Properties *Properties + ReasonCode byte + SessionPresent bool +} + +const ( + ConnackSuccess = 0x00 + ConnackUnspecifiedError = 0x80 + ConnackMalformedPacket = 0x81 + ConnackProtocolError = 0x81 + ConnackImplementationSpecificError = 0x83 + ConnackUnsupportedProtocolVersion = 0x84 + ConnackInvalidClientID = 0x85 + ConnackBadUsernameOrPassword = 0x86 + ConnackNotAuthorized = 0x87 + ConnackServerUnavailable = 0x88 + ConnackServerBusy = 0x89 + ConnackBanned = 0x8A + ConnackBadAuthenticationMethod = 0x8C + ConnackTopicNameInvalid = 0x90 + ConnackPacketTooLarge = 0x95 + ConnackQuotaExceeded = 0x97 + ConnackPayloadFormatInvalid = 0x99 + ConnackRetainNotSupported = 0x9A + ConnackQoSNotSupported = 0x9B + ConnackUseAnotherServer = 0x9C + ConnackServerMoved = 0x9D + ConnackConnectionRateExceeded = 0x9F +) + +func (c *Connack) String() string { + return fmt.Sprintf("CONNACK: ReasonCode:%d SessionPresent:%t\nProperties:\n%s", c.ReasonCode, c.SessionPresent, c.Properties) +} + +//Unpack is the implementation of the interface required function for a packet +func (c *Connack) Unpack(r *bytes.Buffer) error { + connackFlags, err := r.ReadByte() + if err != nil { + return err + } + c.SessionPresent = connackFlags&0x01 > 0 + + c.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + err = c.Properties.Unpack(r, CONNACK) + if err != nil { + return err + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (c *Connack) Buffers() net.Buffers { + var header bytes.Buffer + + if c.SessionPresent { + header.WriteByte(1) + } else { + header.WriteByte(0) + } + header.WriteByte(c.ReasonCode) + + idvp := c.Properties.Pack(CONNACK) + propLen := encodeVBI(len(idvp)) + + n := net.Buffers{header.Bytes(), propLen} + if len(idvp) > 0 { + n = append(n, idvp) + } + + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (c *Connack) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: CONNACK}} + cp.Content = c + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (c *Connack) Reason() string { + switch c.ReasonCode { + case 0: + return "Success - The Connection is accepted." + case 128: + return "Unspecified error - The Server does not wish to reveal the reason for the failure, or none of the other Reason Codes apply." + case 129: + return "Malformed Packet - Data within the CONNECT packet could not be correctly parsed." + case 130: + return "Protocol Error - Data in the CONNECT packet does not conform to this specification." + case 131: + return "Implementation specific error - The CONNECT is valid but is not accepted by this Server." + case 132: + return "Unsupported Protocol Version - The Server does not support the version of the MQTT protocol requested by the Client." + case 133: + return "Client Identifier not valid - The Client Identifier is a valid string but is not allowed by the Server." + case 134: + return "Bad User Name or Password - The Server does not accept the User Name or Password specified by the Client" + case 135: + return "Not authorized - The Client is not authorized to connect." + case 136: + return "Server unavailable - The MQTT Server is not available." + case 137: + return "Server busy - The Server is busy. Try again later." + case 138: + return "Banned - This Client has been banned by administrative action. Contact the server administrator." + case 140: + return "Bad authentication method - The authentication method is not supported or does not match the authentication method currently in use." + case 144: + return "Topic Name invalid - The Will Topic Name is not malformed, but is not accepted by this Server." + case 149: + return "Packet too large - The CONNECT packet exceeded the maximum permissible size." + case 151: + return "Quota exceeded - An implementation or administrative imposed limit has been exceeded." + case 154: + return "Retain not supported - The Server does not support retained messages, and Will Retain was set to 1." + case 155: + return "QoS not supported - The Server does not support the QoS set in Will QoS." + case 156: + return "Use another server - The Client should temporarily use another server." + case 157: + return "Server moved - The Client should permanently use another server." + case 159: + return "Connection rate exceeded - The connection rate limit has been exceeded." + } + + return "" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/connect.go b/vendor/github.com/eclipse/paho.golang/packets/connect.go new file mode 100644 index 000000000..31340f6bd --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/connect.go @@ -0,0 +1,189 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Connect is the Variable Header definition for a connect control packet +type Connect struct { + WillMessage []byte + Password []byte + Username string + ProtocolName string + ClientID string + WillTopic string + Properties *Properties + WillProperties *Properties + KeepAlive uint16 + ProtocolVersion byte + WillQOS byte + PasswordFlag bool + UsernameFlag bool + WillRetain bool + WillFlag bool + CleanStart bool +} + +func (c *Connect) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "CONNECT: ProtocolName:%s ProtocolVersion:%d ClientID:%s KeepAlive:%d CleanStart:%t", c.ProtocolName, c.ProtocolVersion, c.ClientID, c.KeepAlive, c.CleanStart) + if c.UsernameFlag { + fmt.Fprintf(&b, " Username:%s", c.Username) + } + if c.PasswordFlag { + fmt.Fprintf(&b, " Password:%s", c.Password) + } + fmt.Fprint(&b, "\n") + if c.WillFlag { + fmt.Fprintf(&b, " WillTopic:%s WillQOS:%d WillRetain:%t WillMessage:\n%s\n", c.WillTopic, c.WillQOS, c.WillRetain, c.WillMessage) + if c.WillProperties != nil { + fmt.Fprintf(&b, "WillProperties:\n%s", c.WillProperties) + } + } + if c.Properties != nil { + fmt.Fprintf(&b, "Properties:\n%s", c.Properties) + } + + return b.String() +} + +// PackFlags takes the Connect flags and packs them into the single byte +// representation used on the wire by MQTT +func (c *Connect) PackFlags() (f byte) { + if c.UsernameFlag { + f |= 0x01 << 7 + } + if c.PasswordFlag { + f |= 0x01 << 6 + } + if c.WillFlag { + f |= 0x01 << 2 + f |= c.WillQOS << 3 + if c.WillRetain { + f |= 0x01 << 5 + } + } + if c.CleanStart { + f |= 0x01 << 1 + } + return +} + +// UnpackFlags takes the wire byte representing the connect options flags +// and fills out the appropriate variables in the struct +func (c *Connect) UnpackFlags(b byte) { + c.CleanStart = 1&(b>>1) > 0 + c.WillFlag = 1&(b>>2) > 0 + c.WillQOS = 3 & (b >> 3) + c.WillRetain = 1&(b>>5) > 0 + c.PasswordFlag = 1&(b>>6) > 0 + c.UsernameFlag = 1&(b>>7) > 0 +} + +//Unpack is the implementation of the interface required function for a packet +func (c *Connect) Unpack(r *bytes.Buffer) error { + var err error + + if c.ProtocolName, err = readString(r); err != nil { + return err + } + + if c.ProtocolVersion, err = r.ReadByte(); err != nil { + return err + } + + flags, err := r.ReadByte() + if err != nil { + return err + } + c.UnpackFlags(flags) + + if c.KeepAlive, err = readUint16(r); err != nil { + return err + } + + err = c.Properties.Unpack(r, CONNECT) + if err != nil { + return err + } + + c.ClientID, err = readString(r) + if err != nil { + return err + } + + if c.WillFlag { + c.WillProperties = &Properties{} + err = c.WillProperties.Unpack(r, CONNECT) + if err != nil { + return err + } + c.WillTopic, err = readString(r) + if err != nil { + return err + } + c.WillMessage, err = readBinary(r) + if err != nil { + return err + } + } + + if c.UsernameFlag { + c.Username, err = readString(r) + if err != nil { + return err + } + } + + if c.PasswordFlag { + c.Password, err = readBinary(r) + if err != nil { + return err + } + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (c *Connect) Buffers() net.Buffers { + var cp bytes.Buffer + + writeString(c.ProtocolName, &cp) + cp.WriteByte(c.ProtocolVersion) + cp.WriteByte(c.PackFlags()) + writeUint16(c.KeepAlive, &cp) + idvp := c.Properties.Pack(CONNECT) + encodeVBIdirect(len(idvp), &cp) + cp.Write(idvp) + + writeString(c.ClientID, &cp) + if c.WillFlag { + willIdvp := c.WillProperties.Pack(CONNECT) + encodeVBIdirect(len(willIdvp), &cp) + cp.Write(willIdvp) + writeString(c.WillTopic, &cp) + writeBinary(c.WillMessage, &cp) + } + if c.UsernameFlag { + writeString(c.Username, &cp) + } + if c.PasswordFlag { + writeBinary(c.Password, &cp) + } + + return net.Buffers{cp.Bytes()} +} + +// WriteTo is the implementation of the interface required function for a packet +func (c *Connect) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: CONNECT}} + cp.Content = c + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/disconnect.go b/vendor/github.com/eclipse/paho.golang/packets/disconnect.go new file mode 100644 index 000000000..9180207a6 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/disconnect.go @@ -0,0 +1,152 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Disconnect is the Variable Header definition for a Disconnect control packet +type Disconnect struct { + Properties *Properties + ReasonCode byte +} + +func (d *Disconnect) String() string { + return fmt.Sprintf("DISCONNECT: ReasonCode:%X Properties\n%s", d.ReasonCode, d.Properties) +} + +// DisconnectNormalDisconnection, etc are the list of valid disconnection reason codes. +const ( + DisconnectNormalDisconnection = 0x00 + DisconnectDisconnectWithWillMessage = 0x04 + DisconnectUnspecifiedError = 0x80 + DisconnectMalformedPacket = 0x81 + DisconnectProtocolError = 0x82 + DisconnectImplementationSpecificError = 0x83 + DisconnectNotAuthorized = 0x87 + DisconnectServerBusy = 0x89 + DisconnectServerShuttingDown = 0x8B + DisconnectKeepAliveTimeout = 0x8D + DisconnectSessionTakenOver = 0x8E + DisconnectTopicFilterInvalid = 0x8F + DisconnectTopicNameInvalid = 0x90 + DisconnectReceiveMaximumExceeded = 0x93 + DisconnectTopicAliasInvalid = 0x94 + DisconnectPacketTooLarge = 0x95 + DisconnectMessageRateTooHigh = 0x96 + DisconnectQuotaExceeded = 0x97 + DisconnectAdministrativeAction = 0x98 + DisconnectPayloadFormatInvalid = 0x99 + DisconnectRetainNotSupported = 0x9A + DisconnectQoSNotSupported = 0x9B + DisconnectUseAnotherServer = 0x9C + DisconnectServerMoved = 0x9D + DisconnectSharedSubscriptionNotSupported = 0x9E + DisconnectConnectionRateExceeded = 0x9F + DisconnectMaximumConnectTime = 0xA0 + DisconnectSubscriptionIdentifiersNotSupported = 0xA1 + DisconnectWildcardSubscriptionsNotSupported = 0xA2 +) + +// Unpack is the implementation of the interface required function for a packet +func (d *Disconnect) Unpack(r *bytes.Buffer) error { + var err error + d.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + err = d.Properties.Unpack(r, DISCONNECT) + if err != nil { + return err + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (d *Disconnect) Buffers() net.Buffers { + idvp := d.Properties.Pack(DISCONNECT) + propLen := encodeVBI(len(idvp)) + n := net.Buffers{[]byte{d.ReasonCode}, propLen} + if len(idvp) > 0 { + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (d *Disconnect) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: DISCONNECT}} + cp.Content = d + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (d *Disconnect) Reason() string { + switch d.ReasonCode { + case 0: + return "Normal disconnection - Close the connection normally. Do not send the Will Message." + case 4: + return "Disconnect with Will Message - The Client wishes to disconnect but requires that the Server also publishes its Will Message." + case 128: + return "Unspecified error - The Connection is closed but the sender either does not wish to reveal the reason, or none of the other Reason Codes apply." + case 129: + return "Malformed Packet - The received packet does not conform to this specification." + case 130: + return "Protocol Error - An unexpected or out of order packet was received." + case 131: + return "Implementation specific error - The packet received is valid but cannot be processed by this implementation." + case 135: + return "Not authorized - The request is not authorized." + case 137: + return "Server busy - The Server is busy and cannot continue processing requests from this Client." + case 139: + return "Server shutting down - The Server is shutting down." + case 141: + return "Keep Alive timeout - The Connection is closed because no packet has been received for 1.5 times the Keepalive time." + case 142: + return "Session taken over - Another Connection using the same ClientID has connected causing this Connection to be closed." + case 143: + return "Topic Filter invalid - The Topic Filter is correctly formed, but is not accepted by this Sever." + case 144: + return "Topic Name invalid - The Topic Name is correctly formed, but is not accepted by this Client or Server." + case 147: + return "Receive Maximum exceeded - The Client or Server has received more than Receive Maximum publication for which it has not sent PUBACK or PUBCOMP." + case 148: + return "Topic Alias invalid - The Client or Server has received a PUBLISH packet containing a Topic Alias which is greater than the Maximum Topic Alias it sent in the CONNECT or CONNACK packet." + case 149: + return "Packet too large - The packet size is greater than Maximum Packet Size for this Client or Server." + case 150: + return "Message rate too high - The received data rate is too high." + case 151: + return "Quota exceeded - An implementation or administrative imposed limit has been exceeded." + case 152: + return "Administrative action - The Connection is closed due to an administrative action." + case 153: + return "Payload format invalid - The payload format does not match the one specified by the Payload Format Indicator." + case 154: + return "Retain not supported - The Server has does not support retained messages." + case 155: + return "QoS not supported - The Client specified a QoS greater than the QoS specified in a Maximum QoS in the CONNACK." + case 156: + return "Use another server - The Client should temporarily change its Server." + case 157: + return "Server moved - The Server is moved and the Client should permanently change its server location." + case 158: + return "Shared Subscription not supported - The Server does not support Shared Subscriptions." + case 159: + return "Connection rate exceeded - This connection is closed because the connection rate is too high." + case 160: + return "Maximum connect time - The maximum connection time authorized for this connection has been exceeded." + case 161: + return "Subscription Identifiers not supported - The Server does not support Subscription Identifiers; the subscription is not accepted." + case 162: + return "Wildcard subscriptions not supported - The Server does not support Wildcard subscription; the subscription is not accepted." + } + + return "" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/packets.go b/vendor/github.com/eclipse/paho.golang/packets/packets.go new file mode 100644 index 000000000..496594012 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/packets.go @@ -0,0 +1,447 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "sync" +) + +// PacketType is a type alias to byte representing the different +// MQTT control packet types +// type PacketType byte + +// The following consts are the packet type number for each of the +// different control packets in MQTT +const ( + _ byte = iota + CONNECT + CONNACK + PUBLISH + PUBACK + PUBREC + PUBREL + PUBCOMP + SUBSCRIBE + SUBACK + UNSUBSCRIBE + UNSUBACK + PINGREQ + PINGRESP + DISCONNECT + AUTH +) + +type ( + // Packet is the interface defining the unique parts of a controlpacket + Packet interface { + Unpack(*bytes.Buffer) error + Buffers() net.Buffers + WriteTo(io.Writer) (int64, error) + } + + // FixedHeader is the definition of a control packet fixed header + FixedHeader struct { + remainingLength int + Type byte + Flags byte + } + + // ControlPacket is the definition of a control packet + ControlPacket struct { + Content Packet + FixedHeader + } +) + +// NewThreadSafeConn wraps net.Conn with a mutex. ControlPacket uses it in +// WriteTo method to ensure parallel writes are thread-Safe. +func NewThreadSafeConn(c net.Conn) net.Conn { + type threadSafeConn struct { + net.Conn + sync.Locker + } + + return &threadSafeConn{ + Conn: c, + Locker: &sync.Mutex{}, + } +} + +// WriteTo operates on a FixedHeader and takes the option values and produces +// the wire format byte that represents these. +func (f *FixedHeader) WriteTo(w io.Writer) (int64, error) { + if _, err := w.Write([]byte{byte(f.Type)<<4 | f.Flags}); err != nil { + return 0, err + } + if _, err := w.Write(encodeVBI(f.remainingLength)); err != nil { + return 0, err + } + + return 0, nil +} + +// PacketID is a helper function that returns the value of the PacketID +// field from any kind of mqtt packet in the Content element +func (c *ControlPacket) PacketID() uint16 { + switch r := c.Content.(type) { + case *Publish: + return r.PacketID + case *Puback: + return r.PacketID + case *Pubrec: + return r.PacketID + case *Pubrel: + return r.PacketID + case *Pubcomp: + return r.PacketID + case *Subscribe: + return r.PacketID + case *Suback: + return r.PacketID + case *Unsubscribe: + return r.PacketID + case *Unsuback: + return r.PacketID + default: + return 0 + } +} + +func (c *ControlPacket) PacketType() string { + return [...]string{ + "", + "CONNECT", + "CONNACK", + "PUBLISH", + "PUBACK", + "PUBREC", + "PUBREL", + "PUBCOMP", + "SUBSCRIBE", + "SUBACK", + "UNSUBSCRIBE", + "UNSUBACK", + "PINGREQ", + "PINGRESP", + "DISCONNECT", + "AUTH", + }[c.FixedHeader.Type] +} + +// NewControlPacket takes a packetType and returns a pointer to a +// ControlPacket where the VariableHeader field is a pointer to an +// instance of a VariableHeader definition for that packetType +func NewControlPacket(t byte) *ControlPacket { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: t}} + switch t { + case CONNECT: + cp.Content = &Connect{ + ProtocolName: "MQTT", + ProtocolVersion: 5, + Properties: &Properties{}, + } + case CONNACK: + cp.Content = &Connack{Properties: &Properties{}} + case PUBLISH: + cp.Content = &Publish{Properties: &Properties{}} + case PUBACK: + cp.Content = &Puback{Properties: &Properties{}} + case PUBREC: + cp.Content = &Pubrec{Properties: &Properties{}} + case PUBREL: + cp.Flags = 2 + cp.Content = &Pubrel{Properties: &Properties{}} + case PUBCOMP: + cp.Content = &Pubcomp{Properties: &Properties{}} + case SUBSCRIBE: + cp.Flags = 2 + cp.Content = &Subscribe{ + Subscriptions: make(map[string]SubOptions), + Properties: &Properties{}, + } + case SUBACK: + cp.Content = &Suback{Properties: &Properties{}} + case UNSUBSCRIBE: + cp.Flags = 2 + cp.Content = &Unsubscribe{Properties: &Properties{}} + case UNSUBACK: + cp.Content = &Unsuback{Properties: &Properties{}} + case PINGREQ: + cp.Content = &Pingreq{} + case PINGRESP: + cp.Content = &Pingresp{} + case DISCONNECT: + cp.Content = &Disconnect{Properties: &Properties{}} + case AUTH: + cp.Flags = 1 + cp.Content = &Auth{Properties: &Properties{}} + default: + return nil + } + return cp +} + +// ReadPacket reads a control packet from a io.Reader and returns a completed +// struct with the appropriate data +func ReadPacket(r io.Reader) (*ControlPacket, error) { + t := [1]byte{} + _, err := io.ReadFull(r, t[:]) + if err != nil { + return nil, err + } + // cp := NewControlPacket(PacketType(t[0] >> 4)) + // if cp == nil { + // return nil, fmt.Errorf("invalid packet type requested, %d", t[0]>>4) + // } + + pt := t[0] >> 4 + cp := &ControlPacket{FixedHeader: FixedHeader{Type: pt}} + switch pt { + case CONNECT: + cp.Content = &Connect{ + ProtocolName: "MQTT", + ProtocolVersion: 5, + Properties: &Properties{}, + } + case CONNACK: + cp.Content = &Connack{Properties: &Properties{}} + case PUBLISH: + cp.Content = &Publish{Properties: &Properties{}} + case PUBACK: + cp.Content = &Puback{Properties: &Properties{}} + case PUBREC: + cp.Content = &Pubrec{Properties: &Properties{}} + case PUBREL: + cp.Flags = 2 + cp.Content = &Pubrel{Properties: &Properties{}} + case PUBCOMP: + cp.Content = &Pubcomp{Properties: &Properties{}} + case SUBSCRIBE: + cp.Flags = 2 + cp.Content = &Subscribe{ + Subscriptions: make(map[string]SubOptions), + Properties: &Properties{}, + } + case SUBACK: + cp.Content = &Suback{Properties: &Properties{}} + case UNSUBSCRIBE: + cp.Flags = 2 + cp.Content = &Unsubscribe{Properties: &Properties{}} + case UNSUBACK: + cp.Content = &Unsuback{Properties: &Properties{}} + case PINGREQ: + cp.Content = &Pingreq{} + case PINGRESP: + cp.Content = &Pingresp{} + case DISCONNECT: + cp.Content = &Disconnect{Properties: &Properties{}} + case AUTH: + cp.Flags = 1 + cp.Content = &Auth{Properties: &Properties{}} + default: + return nil, fmt.Errorf("unknown packet type %d requested", pt) + } + + cp.Flags = t[0] & 0xF + if cp.Type == PUBLISH { + cp.Content.(*Publish).QoS = (cp.Flags & 0x6) >> 1 + } + vbi, err := getVBI(r) + if err != nil { + return nil, err + } + cp.remainingLength, err = decodeVBI(vbi) + if err != nil { + return nil, err + } + + var content bytes.Buffer + content.Grow(cp.remainingLength) + + n, err := io.CopyN(&content, r, int64(cp.remainingLength)) + if err != nil { + return nil, err + } + + if n != int64(cp.remainingLength) { + return nil, fmt.Errorf("failed to read packet, expected %d bytes, read %d", cp.remainingLength, n) + } + err = cp.Content.Unpack(&content) + if err != nil { + return nil, err + } + return cp, nil +} + +// WriteTo writes a packet to an io.Writer, handling packing all the parts of +// a control packet. +func (c *ControlPacket) WriteTo(w io.Writer) (int64, error) { + buffers := c.Content.Buffers() + for _, b := range buffers { + c.remainingLength += len(b) + } + + var header bytes.Buffer + if _, err := c.FixedHeader.WriteTo(&header); err != nil { + return 0, err + } + + buffers = append(net.Buffers{header.Bytes()}, buffers...) + + if safe, ok := w.(sync.Locker); ok { + safe.Lock() + defer safe.Unlock() + } + return buffers.WriteTo(w) +} + +func encodeVBI(length int) []byte { + var x int + b := [4]byte{} + for { + digit := byte(length % 128) + length /= 128 + if length > 0 { + digit |= 0x80 + } + b[x] = digit + x++ + if length == 0 { + return b[:x] + } + } +} + +func encodeVBIdirect(length int, buf *bytes.Buffer) { + var x int + b := [4]byte{} + for { + digit := byte(length % 128) + length /= 128 + if length > 0 { + digit |= 0x80 + } + b[x] = digit + x++ + if length == 0 { + buf.Write(b[:x]) + return + } + } +} + +func getVBI(r io.Reader) (*bytes.Buffer, error) { + var ret bytes.Buffer + digit := [1]byte{} + for { + _, err := io.ReadFull(r, digit[:]) + if err != nil { + return nil, err + } + ret.WriteByte(digit[0]) + if digit[0] <= 0x7f { + return &ret, nil + } + } +} + +func decodeVBI(r *bytes.Buffer) (int, error) { + var vbi uint32 + var multiplier uint32 + for { + digit, err := r.ReadByte() + if err != nil && err != io.EOF { + return 0, err + } + vbi |= uint32(digit&127) << multiplier + if (digit & 128) == 0 { + break + } + multiplier += 7 + } + return int(vbi), nil +} + +func writeUint16(u uint16, b *bytes.Buffer) error { + if err := b.WriteByte(byte(u >> 8)); err != nil { + return err + } + return b.WriteByte(byte(u)) +} + +func writeUint32(u uint32, b *bytes.Buffer) error { + if err := b.WriteByte(byte(u >> 24)); err != nil { + return err + } + if err := b.WriteByte(byte(u >> 16)); err != nil { + return err + } + if err := b.WriteByte(byte(u >> 8)); err != nil { + return err + } + return b.WriteByte(byte(u)) +} + +func writeString(s string, b *bytes.Buffer) { + writeUint16(uint16(len(s)), b) + b.WriteString(s) +} + +func writeBinary(d []byte, b *bytes.Buffer) { + writeUint16(uint16(len(d)), b) + b.Write(d) +} + +func readUint16(b *bytes.Buffer) (uint16, error) { + b1, err := b.ReadByte() + if err != nil { + return 0, err + } + b2, err := b.ReadByte() + if err != nil { + return 0, err + } + return (uint16(b1) << 8) | uint16(b2), nil +} + +func readUint32(b *bytes.Buffer) (uint32, error) { + b1, err := b.ReadByte() + if err != nil { + return 0, err + } + b2, err := b.ReadByte() + if err != nil { + return 0, err + } + b3, err := b.ReadByte() + if err != nil { + return 0, err + } + b4, err := b.ReadByte() + if err != nil { + return 0, err + } + return (uint32(b1) << 24) | (uint32(b2) << 16) | (uint32(b3) << 8) | uint32(b4), nil +} + +func readBinary(b *bytes.Buffer) ([]byte, error) { + size, err := readUint16(b) + if err != nil { + return nil, err + } + + var s bytes.Buffer + s.Grow(int(size)) + if _, err := io.CopyN(&s, b, int64(size)); err != nil { + return nil, err + } + + return s.Bytes(), nil +} + +func readString(b *bytes.Buffer) (string, error) { + s, err := readBinary(b) + return string(s), err +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/pingreq.go b/vendor/github.com/eclipse/paho.golang/packets/pingreq.go new file mode 100644 index 000000000..85f30c2b5 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/pingreq.go @@ -0,0 +1,34 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Pingreq is the Variable Header definition for a Pingreq control packet +type Pingreq struct { +} + +func (p *Pingreq) String() string { + return fmt.Sprintf("PINGREQ") +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Pingreq) Unpack(r *bytes.Buffer) error { + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Pingreq) Buffers() net.Buffers { + return nil +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Pingreq) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PINGREQ}} + cp.Content = p + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/pingresp.go b/vendor/github.com/eclipse/paho.golang/packets/pingresp.go new file mode 100644 index 000000000..c110fc4dc --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/pingresp.go @@ -0,0 +1,34 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Pingresp is the Variable Header definition for a Pingresp control packet +type Pingresp struct { +} + +func (p *Pingresp) String() string { + return fmt.Sprintf("PINGRESP") +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Pingresp) Unpack(r *bytes.Buffer) error { + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Pingresp) Buffers() net.Buffers { + return nil +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Pingresp) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PINGRESP}} + cp.Content = p + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/properties.go b/vendor/github.com/eclipse/paho.golang/packets/properties.go new file mode 100644 index 000000000..fe1f5e22e --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/properties.go @@ -0,0 +1,804 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "strings" +) + +// PropPayloadFormat, etc are the list of property codes for the +// MQTT packet properties +const ( + PropPayloadFormat byte = 1 + PropMessageExpiry byte = 2 + PropContentType byte = 3 + PropResponseTopic byte = 8 + PropCorrelationData byte = 9 + PropSubscriptionIdentifier byte = 11 + PropSessionExpiryInterval byte = 17 + PropAssignedClientID byte = 18 + PropServerKeepAlive byte = 19 + PropAuthMethod byte = 21 + PropAuthData byte = 22 + PropRequestProblemInfo byte = 23 + PropWillDelayInterval byte = 24 + PropRequestResponseInfo byte = 25 + PropResponseInfo byte = 26 + PropServerReference byte = 28 + PropReasonString byte = 31 + PropReceiveMaximum byte = 33 + PropTopicAliasMaximum byte = 34 + PropTopicAlias byte = 35 + PropMaximumQOS byte = 36 + PropRetainAvailable byte = 37 + PropUser byte = 38 + PropMaximumPacketSize byte = 39 + PropWildcardSubAvailable byte = 40 + PropSubIDAvailable byte = 41 + PropSharedSubAvailable byte = 42 +) + +// User is a struct for the User properties, originally it was a map +// then it was pointed out that user properties are allowed to appear +// more than once +type User struct { + Key, Value string +} + +// Properties is a struct representing the all the described properties +// allowed by the MQTT protocol, determining the validity of a property +// relvative to the packettype it was received in is provided by the +// ValidateID function +type Properties struct { + // PayloadFormat indicates the format of the payload of the message + // 0 is unspecified bytes + // 1 is UTF8 encoded character data + PayloadFormat *byte + // MessageExpiry is the lifetime of the message in seconds + MessageExpiry *uint32 + // ContentType is a UTF8 string describing the content of the message + // for example it could be a MIME type + ContentType string + // ResponseTopic is a UTF8 string indicating the topic name to which any + // response to this message should be sent + ResponseTopic string + // CorrelationData is binary data used to associate future response + // messages with the original request message + CorrelationData []byte + // SubscriptionIdentifier is an identifier of the subscription to which + // the Publish matched + SubscriptionIdentifier *int + // SessionExpiryInterval is the time in seconds after a client disconnects + // that the server should retain the session information (subscriptions etc) + SessionExpiryInterval *uint32 + // AssignedClientID is the server assigned client identifier in the case + // that a client connected without specifying a clientID the server + // generates one and returns it in the Connack + AssignedClientID string + // ServerKeepAlive allows the server to specify in the Connack packet + // the time in seconds to be used as the keep alive value + ServerKeepAlive *uint16 + // AuthMethod is a UTF8 string containing the name of the authentication + // method to be used for extended authentication + AuthMethod string + // AuthData is binary data containing authentication data + AuthData []byte + // RequestProblemInfo is used by the Client to indicate to the server to + // include the Reason String and/or User Properties in case of failures + RequestProblemInfo *byte + // WillDelayInterval is the number of seconds the server waits after the + // point at which it would otherwise send the will message before sending + // it. The client reconnecting before that time expires causes the server + // to cancel sending the will + WillDelayInterval *uint32 + // RequestResponseInfo is used by the Client to request the Server provide + // Response Information in the Connack + RequestResponseInfo *byte + // ResponseInfo is a UTF8 encoded string that can be used as the basis for + // createing a Response Topic. The way in which the Client creates a + // Response Topic from the Response Information is not defined. A common + // use of this is to pass a globally unique portion of the topic tree which + // is reserved for this Client for at least the lifetime of its Session. This + // often cannot just be a random name as both the requesting Client and the + // responding Client need to be authorized to use it. It is normal to use this + // as the root of a topic tree for a particular Client. For the Server to + // return this information, it normally needs to be correctly configured. + // Using this mechanism allows this configuration to be done once in the + // Server rather than in each Client + ResponseInfo string + // ServerReference is a UTF8 string indicating another server the client + // can use + ServerReference string + // ReasonString is a UTF8 string representing the reason associated with + // this response, intended to be human readable for diagnostic purposes + ReasonString string + // ReceiveMaximum is the maximum number of QOS1 & 2 messages allowed to be + // 'inflight' (not having received a PUBACK/PUBCOMP response for) + ReceiveMaximum *uint16 + // TopicAliasMaximum is the highest value permitted as a Topic Alias + TopicAliasMaximum *uint16 + // TopicAlias is used in place of the topic string to reduce the size of + // packets for repeated messages on a topic + TopicAlias *uint16 + // MaximumQOS is the highest QOS level permitted for a Publish + MaximumQOS *byte + // RetainAvailable indicates whether the server supports messages with the + // retain flag set + RetainAvailable *byte + // User is a slice of user provided properties (key and value) + User []User + // MaximumPacketSize allows the client or server to specify the maximum packet + // size in bytes that they support + MaximumPacketSize *uint32 + // WildcardSubAvailable indicates whether wildcard subscriptions are permitted + WildcardSubAvailable *byte + // SubIDAvailable indicates whether subscription identifiers are supported + SubIDAvailable *byte + // SharedSubAvailable indicates whether shared subscriptions are supported + SharedSubAvailable *byte +} + +func (p *Properties) String() string { + var b strings.Builder + if p.PayloadFormat != nil { + fmt.Fprintf(&b, "\tPayloadFormat:%d\n", *p.PayloadFormat) + } + if p.MessageExpiry != nil { + fmt.Fprintf(&b, "\tMessageExpiry:%d\n", *p.MessageExpiry) + } + if p.ContentType != "" { + fmt.Fprintf(&b, "\tContentType:%s\n", p.ContentType) + } + if p.ResponseTopic != "" { + fmt.Fprintf(&b, "\tResponseTopic:%s\n", p.ResponseTopic) + } + if len(p.CorrelationData) > 0 { + fmt.Fprintf(&b, "\tCorrelationData:%X\n", p.CorrelationData) + } + if p.SubscriptionIdentifier != nil { + fmt.Fprintf(&b, "\tSubscriptionIdentifier:%d\n", *p.SubscriptionIdentifier) + } + if p.SessionExpiryInterval != nil { + fmt.Fprintf(&b, "\tSessionExpiryInterval:%d\n", *p.SessionExpiryInterval) + } + if p.AssignedClientID != "" { + fmt.Fprintf(&b, "\tAssignedClientID:%s\n", p.AssignedClientID) + } + if p.ServerKeepAlive != nil { + fmt.Fprintf(&b, "\tServerKeepAlive:%d\n", *p.ServerKeepAlive) + } + if p.AuthMethod != "" { + fmt.Fprintf(&b, "\tAuthMethod:%s\n", p.AuthMethod) + } + if len(p.AuthData) > 0 { + fmt.Fprintf(&b, "\tAuthData:%X\n", p.AuthData) + } + if p.RequestProblemInfo != nil { + fmt.Fprintf(&b, "\tRequestProblemInfo:%d\n", *p.RequestProblemInfo) + } + if p.WillDelayInterval != nil { + fmt.Fprintf(&b, "\tWillDelayInterval:%d\n", *p.WillDelayInterval) + } + if p.RequestResponseInfo != nil { + fmt.Fprintf(&b, "\tRequestResponseInfo:%d\n", *p.RequestResponseInfo) + } + if p.ServerReference != "" { + fmt.Fprintf(&b, "\tServerReference:%s\n", p.ServerReference) + } + if p.ReasonString != "" { + fmt.Fprintf(&b, "\tReasonString:%s\n", p.ReasonString) + } + if p.ReceiveMaximum != nil { + fmt.Fprintf(&b, "\tReceiveMaximum:%d\n", *p.ReceiveMaximum) + } + if p.TopicAliasMaximum != nil { + fmt.Fprintf(&b, "\tTopicAliasMaximum:%d\n", *p.TopicAliasMaximum) + } + if p.TopicAlias != nil { + fmt.Fprintf(&b, "\tTopicAlias:%d\n", *p.TopicAlias) + } + if p.MaximumQOS != nil { + fmt.Fprintf(&b, "\tMaximumQOS:%d\n", *p.MaximumQOS) + } + if p.RetainAvailable != nil { + fmt.Fprintf(&b, "\tRetainAvailable:%d\n", *p.RetainAvailable) + } + if p.MaximumPacketSize != nil { + fmt.Fprintf(&b, "\tMaximumPacketSize:%d\n", *p.MaximumPacketSize) + } + if p.WildcardSubAvailable != nil { + fmt.Fprintf(&b, "\tWildcardSubAvailable:%d\n", *p.WildcardSubAvailable) + } + if p.SubIDAvailable != nil { + fmt.Fprintf(&b, "\tSubIDAvailable:%d\n", *p.SubIDAvailable) + } + if p.SharedSubAvailable != nil { + fmt.Fprintf(&b, "\tSharedSubAvailable:%d\n", *p.SharedSubAvailable) + } + if len(p.User) > 0 { + fmt.Fprint(&b, "\tUser Properties:\n") + for _, v := range p.User { + fmt.Fprintf(&b, "\t\t%s:%s\n", v.Key, v.Value) + } + } + + return b.String() +} + +// Pack takes all the defined properties for an Properties and produces +// a slice of bytes representing the wire format for the information +func (i *Properties) Pack(p byte) []byte { + var b bytes.Buffer + + if i == nil { + return nil + } + + if p == PUBLISH { + if i.PayloadFormat != nil { + b.WriteByte(PropPayloadFormat) + b.WriteByte(*i.PayloadFormat) + } + + if i.MessageExpiry != nil { + b.WriteByte(PropMessageExpiry) + writeUint32(*i.MessageExpiry, &b) + } + + if i.ContentType != "" { + b.WriteByte(PropContentType) + writeString(i.ContentType, &b) + } + + if i.ResponseTopic != "" { + b.WriteByte(PropResponseTopic) + writeString(i.ResponseTopic, &b) + } + + if len(i.CorrelationData) > 0 { + b.WriteByte(PropCorrelationData) + writeBinary(i.CorrelationData, &b) + } + + if i.TopicAlias != nil { + b.WriteByte(PropTopicAlias) + writeUint16(*i.TopicAlias, &b) + } + } + + if p == PUBLISH || p == SUBSCRIBE { + if i.SubscriptionIdentifier != nil { + b.WriteByte(PropSubscriptionIdentifier) + encodeVBIdirect(*i.SubscriptionIdentifier, &b) + } + } + + if p == CONNECT || p == CONNACK { + if i.ReceiveMaximum != nil { + b.WriteByte(PropReceiveMaximum) + writeUint16(*i.ReceiveMaximum, &b) + } + + if i.TopicAliasMaximum != nil { + b.WriteByte(PropTopicAliasMaximum) + writeUint16(*i.TopicAliasMaximum, &b) + } + + if i.MaximumQOS != nil { + b.WriteByte(PropMaximumQOS) + b.WriteByte(*i.MaximumQOS) + } + + if i.MaximumPacketSize != nil { + b.WriteByte(PropMaximumPacketSize) + writeUint32(*i.MaximumPacketSize, &b) + } + } + + if p == CONNACK { + if i.AssignedClientID != "" { + b.WriteByte(PropAssignedClientID) + writeString(i.AssignedClientID, &b) + } + + if i.ServerKeepAlive != nil { + b.WriteByte(PropServerKeepAlive) + writeUint16(*i.ServerKeepAlive, &b) + } + + if i.WildcardSubAvailable != nil { + b.WriteByte(PropWildcardSubAvailable) + b.WriteByte(*i.WildcardSubAvailable) + } + + if i.SubIDAvailable != nil { + b.WriteByte(PropSubIDAvailable) + b.WriteByte(*i.SubIDAvailable) + } + + if i.SharedSubAvailable != nil { + b.WriteByte(PropSharedSubAvailable) + b.WriteByte(*i.SharedSubAvailable) + } + + if i.RetainAvailable != nil { + b.WriteByte(PropRetainAvailable) + b.WriteByte(*i.RetainAvailable) + } + + if i.ResponseInfo != "" { + b.WriteByte(PropResponseInfo) + writeString(i.ResponseInfo, &b) + } + } + + if p == CONNECT { + if i.RequestProblemInfo != nil { + b.WriteByte(PropRequestProblemInfo) + b.WriteByte(*i.RequestProblemInfo) + } + + if i.WillDelayInterval != nil { + b.WriteByte(PropWillDelayInterval) + writeUint32(*i.WillDelayInterval, &b) + } + + if i.RequestResponseInfo != nil { + b.WriteByte(PropRequestResponseInfo) + b.WriteByte(*i.RequestResponseInfo) + } + } + + if p == CONNECT || p == CONNACK || p == DISCONNECT { + if i.SessionExpiryInterval != nil { + b.WriteByte(PropSessionExpiryInterval) + writeUint32(*i.SessionExpiryInterval, &b) + } + } + + if p == CONNECT || p == CONNACK || p == AUTH { + if i.AuthMethod != "" { + b.WriteByte(PropAuthMethod) + writeString(i.AuthMethod, &b) + } + + if i.AuthData != nil && len(i.AuthData) > 0 { + b.WriteByte(PropAuthData) + writeBinary(i.AuthData, &b) + } + } + + if p == CONNACK || p == DISCONNECT { + if i.ServerReference != "" { + b.WriteByte(PropServerReference) + writeString(i.ServerReference, &b) + } + } + + if p != CONNECT { + if i.ReasonString != "" { + b.WriteByte(PropReasonString) + writeString(i.ReasonString, &b) + } + } + + for _, v := range i.User { + b.WriteByte(PropUser) + writeString(v.Key, &b) + writeString(v.Value, &b) + } + + return b.Bytes() +} + +// PackBuf will create a bytes.Buffer of the packed properties, it +// will only pack the properties appropriate to the packet type p +// even though other properties may exist, it will silently ignore +// them +func (i *Properties) PackBuf(p byte) *bytes.Buffer { + var b bytes.Buffer + + if i == nil { + return nil + } + + if p == PUBLISH { + if i.PayloadFormat != nil { + b.WriteByte(PropPayloadFormat) + b.WriteByte(*i.PayloadFormat) + } + + if i.MessageExpiry != nil { + b.WriteByte(PropMessageExpiry) + writeUint32(*i.MessageExpiry, &b) + } + + if i.ContentType != "" { + b.WriteByte(PropContentType) + writeString(i.ContentType, &b) + } + + if i.ResponseTopic != "" { + b.WriteByte(PropResponseTopic) + writeString(i.ResponseTopic, &b) + } + + if i.CorrelationData != nil && len(i.CorrelationData) > 0 { + b.WriteByte(PropCorrelationData) + writeBinary(i.CorrelationData, &b) + } + + if i.TopicAlias != nil { + b.WriteByte(PropTopicAlias) + writeUint16(*i.TopicAlias, &b) + } + } + + if p == PUBLISH || p == SUBSCRIBE { + if i.SubscriptionIdentifier != nil { + b.WriteByte(PropSubscriptionIdentifier) + encodeVBIdirect(*i.SubscriptionIdentifier, &b) + } + } + + if p == CONNECT || p == CONNACK { + if i.ReceiveMaximum != nil { + b.WriteByte(PropReceiveMaximum) + writeUint16(*i.ReceiveMaximum, &b) + } + + if i.TopicAliasMaximum != nil { + b.WriteByte(PropTopicAliasMaximum) + writeUint16(*i.TopicAliasMaximum, &b) + } + + if i.MaximumQOS != nil { + b.WriteByte(PropMaximumQOS) + b.WriteByte(*i.MaximumQOS) + } + + if i.MaximumPacketSize != nil { + b.WriteByte(PropMaximumPacketSize) + writeUint32(*i.MaximumPacketSize, &b) + } + } + + if p == CONNACK { + if i.AssignedClientID != "" { + b.WriteByte(PropAssignedClientID) + writeString(i.AssignedClientID, &b) + } + + if i.ServerKeepAlive != nil { + b.WriteByte(PropServerKeepAlive) + writeUint16(*i.ServerKeepAlive, &b) + } + + if i.WildcardSubAvailable != nil { + b.WriteByte(PropWildcardSubAvailable) + b.WriteByte(*i.WildcardSubAvailable) + } + + if i.SubIDAvailable != nil { + b.WriteByte(PropSubIDAvailable) + b.WriteByte(*i.SubIDAvailable) + } + + if i.SharedSubAvailable != nil { + b.WriteByte(PropSharedSubAvailable) + b.WriteByte(*i.SharedSubAvailable) + } + + if i.RetainAvailable != nil { + b.WriteByte(PropRetainAvailable) + b.WriteByte(*i.RetainAvailable) + } + + if i.ResponseInfo != "" { + b.WriteByte(PropResponseInfo) + writeString(i.ResponseInfo, &b) + } + } + + if p == CONNECT { + if i.RequestProblemInfo != nil { + b.WriteByte(PropRequestProblemInfo) + b.WriteByte(*i.RequestProblemInfo) + } + + if i.WillDelayInterval != nil { + b.WriteByte(PropWillDelayInterval) + writeUint32(*i.WillDelayInterval, &b) + } + + if i.RequestResponseInfo != nil { + b.WriteByte(PropRequestResponseInfo) + b.WriteByte(*i.RequestResponseInfo) + } + } + + if p == CONNECT || p == CONNACK || p == DISCONNECT { + if i.SessionExpiryInterval != nil { + b.WriteByte(PropSessionExpiryInterval) + writeUint32(*i.SessionExpiryInterval, &b) + } + } + + if p == CONNECT || p == CONNACK || p == AUTH { + if i.AuthMethod != "" { + b.WriteByte(PropAuthMethod) + writeString(i.AuthMethod, &b) + } + + if i.AuthData != nil && len(i.AuthData) > 0 { + b.WriteByte(PropAuthData) + writeBinary(i.AuthData, &b) + } + } + + if p == CONNACK || p == DISCONNECT { + if i.ServerReference != "" { + b.WriteByte(PropServerReference) + writeString(i.ServerReference, &b) + } + } + + if p != CONNECT { + if i.ReasonString != "" { + b.WriteByte(PropReasonString) + writeString(i.ReasonString, &b) + } + } + + for _, v := range i.User { + b.WriteByte(PropUser) + writeString(v.Key, &b) + writeString(v.Value, &b) + } + + return &b +} + +// Unpack takes a buffer of bytes and reads out the defined properties +// filling in the appropriate entries in the struct, it returns the number +// of bytes used to store the Prop data and any error in decoding them +func (i *Properties) Unpack(r *bytes.Buffer, p byte) error { + vbi, err := getVBI(r) + if err != nil { + return err + } + size, err := decodeVBI(vbi) + if err != nil { + return err + } + if size == 0 { + return nil + } + + buf := bytes.NewBuffer(r.Next(size)) + for { + PropType, err := buf.ReadByte() + if err != nil && err != io.EOF { + return err + } + if err == io.EOF { + break + } + if !ValidateID(p, PropType) { + return fmt.Errorf("invalid Prop type %d for packet %d", PropType, p) + } + switch PropType { + case PropPayloadFormat: + pf, err := buf.ReadByte() + if err != nil { + return err + } + i.PayloadFormat = &pf + case PropMessageExpiry: + pe, err := readUint32(buf) + if err != nil { + return err + } + i.MessageExpiry = &pe + case PropContentType: + ct, err := readString(buf) + if err != nil { + return err + } + i.ContentType = ct + case PropResponseTopic: + tr, err := readString(buf) + if err != nil { + return err + } + i.ResponseTopic = tr + case PropCorrelationData: + cd, err := readBinary(buf) + if err != nil { + return err + } + i.CorrelationData = cd + case PropSubscriptionIdentifier: + si, err := decodeVBI(buf) + if err != nil { + return err + } + i.SubscriptionIdentifier = &si + case PropSessionExpiryInterval: + se, err := readUint32(buf) + if err != nil { + return err + } + i.SessionExpiryInterval = &se + case PropAssignedClientID: + ac, err := readString(buf) + if err != nil { + return err + } + i.AssignedClientID = ac + case PropServerKeepAlive: + sk, err := readUint16(buf) + if err != nil { + return err + } + i.ServerKeepAlive = &sk + case PropAuthMethod: + am, err := readString(buf) + if err != nil { + return err + } + i.AuthMethod = am + case PropAuthData: + ad, err := readBinary(buf) + if err != nil { + return err + } + i.AuthData = ad + case PropRequestProblemInfo: + rp, err := buf.ReadByte() + if err != nil { + return err + } + i.RequestProblemInfo = &rp + case PropWillDelayInterval: + wd, err := readUint32(buf) + if err != nil { + return err + } + i.WillDelayInterval = &wd + case PropRequestResponseInfo: + rp, err := buf.ReadByte() + if err != nil { + return err + } + i.RequestResponseInfo = &rp + case PropResponseInfo: + ri, err := readString(buf) + if err != nil { + return err + } + i.ResponseInfo = ri + case PropServerReference: + sr, err := readString(buf) + if err != nil { + return err + } + i.ServerReference = sr + case PropReasonString: + rs, err := readString(buf) + if err != nil { + return err + } + i.ReasonString = rs + case PropReceiveMaximum: + rm, err := readUint16(buf) + if err != nil { + return err + } + i.ReceiveMaximum = &rm + case PropTopicAliasMaximum: + ta, err := readUint16(buf) + if err != nil { + return err + } + i.TopicAliasMaximum = &ta + case PropTopicAlias: + ta, err := readUint16(buf) + if err != nil { + return err + } + i.TopicAlias = &ta + case PropMaximumQOS: + mq, err := buf.ReadByte() + if err != nil { + return err + } + i.MaximumQOS = &mq + case PropRetainAvailable: + ra, err := buf.ReadByte() + if err != nil { + return err + } + i.RetainAvailable = &ra + case PropUser: + k, err := readString(buf) + if err != nil { + return err + } + v, err := readString(buf) + if err != nil { + return err + } + i.User = append(i.User, User{k, v}) + case PropMaximumPacketSize: + mp, err := readUint32(buf) + if err != nil { + return err + } + i.MaximumPacketSize = &mp + case PropWildcardSubAvailable: + ws, err := buf.ReadByte() + if err != nil { + return err + } + i.WildcardSubAvailable = &ws + case PropSubIDAvailable: + si, err := buf.ReadByte() + if err != nil { + return err + } + i.SubIDAvailable = &si + case PropSharedSubAvailable: + ss, err := buf.ReadByte() + if err != nil { + return err + } + i.SharedSubAvailable = &ss + default: + return fmt.Errorf("unknown Prop type %d", PropType) + } + } + + return nil +} + +// ValidProperties is a map of the various properties and the +// PacketTypes that property is valid for. +var ValidProperties = map[byte]map[byte]struct{}{ + PropPayloadFormat: {PUBLISH: {}}, + PropMessageExpiry: {PUBLISH: {}}, + PropContentType: {PUBLISH: {}}, + PropResponseTopic: {PUBLISH: {}}, + PropCorrelationData: {PUBLISH: {}}, + PropTopicAlias: {PUBLISH: {}}, + PropSubscriptionIdentifier: {PUBLISH: {}, SUBSCRIBE: {}}, + PropSessionExpiryInterval: {CONNECT: {}, CONNACK: {}, DISCONNECT: {}}, + PropAssignedClientID: {CONNACK: {}}, + PropServerKeepAlive: {CONNACK: {}}, + PropWildcardSubAvailable: {CONNACK: {}}, + PropSubIDAvailable: {CONNACK: {}}, + PropSharedSubAvailable: {CONNACK: {}}, + PropRetainAvailable: {CONNACK: {}}, + PropResponseInfo: {CONNACK: {}}, + PropAuthMethod: {CONNECT: {}, CONNACK: {}, AUTH: {}}, + PropAuthData: {CONNECT: {}, CONNACK: {}, AUTH: {}}, + PropRequestProblemInfo: {CONNECT: {}}, + PropWillDelayInterval: {CONNECT: {}}, + PropRequestResponseInfo: {CONNECT: {}}, + PropServerReference: {CONNACK: {}, DISCONNECT: {}}, + PropReasonString: {CONNACK: {}, PUBACK: {}, PUBREC: {}, PUBREL: {}, PUBCOMP: {}, SUBACK: {}, UNSUBACK: {}, DISCONNECT: {}, AUTH: {}}, + PropReceiveMaximum: {CONNECT: {}, CONNACK: {}}, + PropTopicAliasMaximum: {CONNECT: {}, CONNACK: {}}, + PropMaximumQOS: {CONNECT: {}, CONNACK: {}}, + PropMaximumPacketSize: {CONNECT: {}, CONNACK: {}}, + PropUser: {CONNECT: {}, CONNACK: {}, PUBLISH: {}, PUBACK: {}, PUBREC: {}, PUBREL: {}, PUBCOMP: {}, SUBSCRIBE: {}, UNSUBSCRIBE: {}, SUBACK: {}, UNSUBACK: {}, DISCONNECT: {}, AUTH: {}}, +} + +// ValidateID takes a PacketType and a property name and returns +// a boolean indicating if that property is valid for that +// PacketType +func ValidateID(p byte, i byte) bool { + _, ok := ValidProperties[i][p] + return ok +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/puback.go b/vendor/github.com/eclipse/paho.golang/packets/puback.go new file mode 100644 index 000000000..67f404ce6 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/puback.go @@ -0,0 +1,115 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Puback is the Variable Header definition for a Puback control packet +type Puback struct { + Properties *Properties + PacketID uint16 + ReasonCode byte +} + +// PubackSuccess, etc are the list of valid puback reason codes. +const ( + PubackSuccess = 0x00 + PubackNoMatchingSubscribers = 0x10 + PubackUnspecifiedError = 0x80 + PubackImplementationSpecificError = 0x83 + PubackNotAuthorized = 0x87 + PubackTopicNameInvalid = 0x90 + PubackPacketIdentifierInUse = 0x91 + PubackQuotaExceeded = 0x97 + PubackPayloadFormatInvalid = 0x99 +) + +func (p *Puback) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "PUBACK: PacketID:%d ReasonCode:%X", p.PacketID, p.ReasonCode) + if p.Properties != nil { + fmt.Fprintf(&b, " Properties:\n%s", p.Properties) + } else { + fmt.Fprint(&b, "\n") + } + + return b.String() +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Puback) Unpack(r *bytes.Buffer) error { + var err error + success := r.Len() == 2 + noProps := r.Len() == 3 + p.PacketID, err = readUint16(r) + if err != nil { + return err + } + if !success { + p.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + if !noProps { + err = p.Properties.Unpack(r, PUBACK) + if err != nil { + return err + } + } + } + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Puback) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(p.PacketID, &b) + b.WriteByte(p.ReasonCode) + idvp := p.Properties.Pack(PUBACK) + propLen := encodeVBI(len(idvp)) + n := net.Buffers{b.Bytes(), propLen} + if len(idvp) > 0 { + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Puback) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PUBACK}} + cp.Content = p + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (p *Puback) Reason() string { + switch p.ReasonCode { + case 0: + return "The message is accepted. Publication of the QoS 1 message proceeds." + case 16: + return "The message is accepted but there are no subscribers. This is sent only by the Server. If the Server knows that there are no matching subscribers, it MAY use this Reason Code instead of 0x00 (Success)." + case 128: + return "The receiver does not accept the publish but either does not want to reveal the reason, or it does not match one of the other values." + case 131: + return "The PUBLISH is valid but the receiver is not willing to accept it." + case 135: + return "The PUBLISH is not authorized." + case 144: + return "The Topic Name is not malformed, but is not accepted by this Client or Server." + case 145: + return "The Packet Identifier is already in use. This might indicate a mismatch in the Session State between the Client and Server." + case 151: + return "An implementation or administrative imposed limit has been exceeded." + case 153: + return "The payload format does not match the specified Payload Format Indicator." + } + + return "" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/pubcomp.go b/vendor/github.com/eclipse/paho.golang/packets/pubcomp.go new file mode 100644 index 000000000..1cdfe61e9 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/pubcomp.go @@ -0,0 +1,95 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Pubcomp is the Variable Header definition for a Pubcomp control packet +type Pubcomp struct { + Properties *Properties + PacketID uint16 + ReasonCode byte +} + +// PubcompSuccess, etc are the list of valid pubcomp reason codes. +const ( + PubcompSuccess = 0x00 + PubcompPacketIdentifierNotFound = 0x92 +) + +func (p *Pubcomp) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "PUBCOMP: ReasonCode:%X PacketID:%d", p.ReasonCode, p.PacketID) + if p.Properties != nil { + fmt.Fprintf(&b, " Properties:\n%s", p.Properties) + } else { + fmt.Fprint(&b, "\n") + } + + return b.String() +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Pubcomp) Unpack(r *bytes.Buffer) error { + var err error + success := r.Len() == 2 + noProps := r.Len() == 3 + p.PacketID, err = readUint16(r) + if err != nil { + return err + } + if !success { + p.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + if !noProps { + err = p.Properties.Unpack(r, PUBACK) + if err != nil { + return err + } + } + } + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Pubcomp) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(p.PacketID, &b) + b.WriteByte(p.ReasonCode) + n := net.Buffers{b.Bytes()} + idvp := p.Properties.Pack(PUBCOMP) + propLen := encodeVBI(len(idvp)) + if len(idvp) > 0 { + n = append(n, propLen) + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Pubcomp) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PUBCOMP}} + cp.Content = p + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (p *Pubcomp) Reason() string { + switch p.ReasonCode { + case 0: + return "Success - Packet Identifier released. Publication of QoS 2 message is complete." + case 146: + return "Packet Identifier not found - The Packet Identifier is not known. This is not an error during recovery, but at other times indicates a mismatch between the Session State on the Client and Server." + } + + return "" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/publish.go b/vendor/github.com/eclipse/paho.golang/packets/publish.go new file mode 100644 index 000000000..ef834b7b9 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/publish.go @@ -0,0 +1,80 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net" +) + +// Publish is the Variable Header definition for a publish control packet +type Publish struct { + Payload []byte + Topic string + Properties *Properties + PacketID uint16 + QoS byte + Duplicate bool + Retain bool +} + +func (p *Publish) String() string { + return fmt.Sprintf("PUBLISH: PacketID:%d QOS:%d Topic:%s Duplicate:%t Retain:%t Payload:\n%s\nProperties\n%s", p.PacketID, p.QoS, p.Topic, p.Duplicate, p.Retain, string(p.Payload), p.Properties) +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Publish) Unpack(r *bytes.Buffer) error { + var err error + p.Topic, err = readString(r) + if err != nil { + return err + } + if p.QoS > 0 { + p.PacketID, err = readUint16(r) + if err != nil { + return err + } + } + + err = p.Properties.Unpack(r, PUBLISH) + if err != nil { + return err + } + + p.Payload, err = ioutil.ReadAll(r) + if err != nil { + return err + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Publish) Buffers() net.Buffers { + var b bytes.Buffer + writeString(p.Topic, &b) + if p.QoS > 0 { + _ = writeUint16(p.PacketID, &b) + } + idvp := p.Properties.Pack(PUBLISH) + encodeVBIdirect(len(idvp), &b) + return net.Buffers{b.Bytes(), idvp, p.Payload} + +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Publish) WriteTo(w io.Writer) (int64, error) { + f := p.QoS << 1 + if p.Duplicate { + f |= 1 << 3 + } + if p.Retain { + f |= 1 + } + + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PUBLISH, Flags: f}} + cp.Content = p + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/pubrec.go b/vendor/github.com/eclipse/paho.golang/packets/pubrec.go new file mode 100644 index 000000000..c3820191a --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/pubrec.go @@ -0,0 +1,117 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Pubrec is the Variable Header definition for a Pubrec control packet +type Pubrec struct { + Properties *Properties + PacketID uint16 + ReasonCode byte +} + +// PubrecSuccess, etc are the list of valid Pubrec reason codes +const ( + PubrecSuccess = 0x00 + PubrecNoMatchingSubscribers = 0x10 + PubrecUnspecifiedError = 0x80 + PubrecImplementationSpecificError = 0x83 + PubrecNotAuthorized = 0x87 + PubrecTopicNameInvalid = 0x90 + PubrecPacketIdentifierInUse = 0x91 + PubrecQuotaExceeded = 0x97 + PubrecPayloadFormatInvalid = 0x99 +) + +func (p *Pubrec) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "PUBREC: ReasonCode:%X PacketID:%d", p.ReasonCode, p.PacketID) + if p.Properties != nil { + fmt.Fprintf(&b, " Properties:\n%s", p.Properties) + } else { + fmt.Fprint(&b, "\n") + } + + return b.String() +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Pubrec) Unpack(r *bytes.Buffer) error { + var err error + success := r.Len() == 2 + noProps := r.Len() == 3 + p.PacketID, err = readUint16(r) + if err != nil { + return err + } + if !success { + p.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + if !noProps { + err = p.Properties.Unpack(r, PUBACK) + if err != nil { + return err + } + } + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Pubrec) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(p.PacketID, &b) + b.WriteByte(p.ReasonCode) + n := net.Buffers{b.Bytes()} + idvp := p.Properties.Pack(PUBREC) + propLen := encodeVBI(len(idvp)) + if len(idvp) > 0 { + n = append(n, propLen) + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Pubrec) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PUBREC}} + cp.Content = p + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (p *Pubrec) Reason() string { + switch p.ReasonCode { + case 0: + return "Success - The message is accepted. Publication of the QoS 2 message proceeds." + case 16: + return "No matching subscribers. - The message is accepted but there are no subscribers. This is sent only by the Server. If the Server knows that case there are no matching subscribers, it MAY use this Reason Code instead of 0x00 (Success)" + case 128: + return "Unspecified error - The receiver does not accept the publish but either does not want to reveal the reason, or it does not match one of the other values." + case 131: + return "Implementation specific error - The PUBLISH is valid but the receiver is not willing to accept it." + case 135: + return "Not authorized - The PUBLISH is not authorized." + case 144: + return "Topic Name invalid - The Topic Name is not malformed, but is not accepted by this Client or Server." + case 145: + return "Packet Identifier in use - The Packet Identifier is already in use. This might indicate a mismatch in the Session State between the Client and Server." + case 151: + return "Quota exceeded - An implementation or administrative imposed limit has been exceeded." + case 153: + return "Payload format invalid - The payload format does not match the one specified in the Payload Format Indicator." + } + + return "" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/pubrel.go b/vendor/github.com/eclipse/paho.golang/packets/pubrel.go new file mode 100644 index 000000000..27c48c240 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/pubrel.go @@ -0,0 +1,77 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Pubrel is the Variable Header definition for a Pubrel control packet +type Pubrel struct { + Properties *Properties + PacketID uint16 + ReasonCode byte +} + +func (p *Pubrel) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "PUBREL: ReasonCode:%X PacketID:%d", p.ReasonCode, p.PacketID) + if p.Properties != nil { + fmt.Fprintf(&b, " Properties:\n%s", p.Properties) + } else { + fmt.Fprint(&b, "\n") + } + + return b.String() +} + +//Unpack is the implementation of the interface required function for a packet +func (p *Pubrel) Unpack(r *bytes.Buffer) error { + var err error + success := r.Len() == 2 + noProps := r.Len() == 3 + p.PacketID, err = readUint16(r) + if err != nil { + return err + } + if !success { + p.ReasonCode, err = r.ReadByte() + if err != nil { + return err + } + + if !noProps { + err = p.Properties.Unpack(r, PUBACK) + if err != nil { + return err + } + } + } + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (p *Pubrel) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(p.PacketID, &b) + b.WriteByte(p.ReasonCode) + n := net.Buffers{b.Bytes()} + idvp := p.Properties.Pack(PUBREL) + propLen := encodeVBI(len(idvp)) + if len(idvp) > 0 { + n = append(n, propLen) + n = append(n, idvp) + } + return n +} + +// WriteTo is the implementation of the interface required function for a packet +func (p *Pubrel) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: PUBREL, Flags: 2}} + cp.Content = p + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/suback.go b/vendor/github.com/eclipse/paho.golang/packets/suback.go new file mode 100644 index 000000000..2503aaf1a --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/suback.go @@ -0,0 +1,103 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Suback is the Variable Header definition for a Suback control packet +type Suback struct { + Properties *Properties + Reasons []byte + PacketID uint16 +} + +func (s *Suback) String() string { + return fmt.Sprintf("SUBACK: ReasonCode:%v PacketID:%d Properties:\n%s", s.Reasons, s.PacketID, s.Properties) +} + +// SubackGrantedQoS0, etc are the list of valid suback reason codes. +const ( + SubackGrantedQoS0 = 0x00 + SubackGrantedQoS1 = 0x01 + SubackGrantedQoS2 = 0x02 + SubackUnspecifiederror = 0x80 + SubackImplementationspecificerror = 0x83 + SubackNotauthorized = 0x87 + SubackTopicFilterinvalid = 0x8F + SubackPacketIdentifierinuse = 0x91 + SubackQuotaexceeded = 0x97 + SubackSharedSubscriptionnotsupported = 0x9E + SubackSubscriptionIdentifiersnotsupported = 0xA1 + SubackWildcardsubscriptionsnotsupported = 0xA2 +) + +//Unpack is the implementation of the interface required function for a packet +func (s *Suback) Unpack(r *bytes.Buffer) error { + var err error + s.PacketID, err = readUint16(r) + if err != nil { + return err + } + + err = s.Properties.Unpack(r, SUBACK) + if err != nil { + return err + } + + s.Reasons = r.Bytes() + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (s *Suback) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(s.PacketID, &b) + idvp := s.Properties.Pack(SUBACK) + propLen := encodeVBI(len(idvp)) + return net.Buffers{b.Bytes(), propLen, idvp, s.Reasons} +} + +// WriteTo is the implementation of the interface required function for a packet +func (s *Suback) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: SUBACK}} + cp.Content = s + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (s *Suback) Reason(index int) string { + if index >= 0 && index < len(s.Reasons) { + switch s.Reasons[index] { + case 0: + return "Granted QoS 0 - The subscription is accepted and the maximum QoS sent will be QoS 0. This might be a lower QoS than was requested." + case 1: + return "Granted QoS 1 - The subscription is accepted and the maximum QoS sent will be QoS 1. This might be a lower QoS than was requested." + case 2: + return "Granted QoS 2 - The subscription is accepted and any received QoS will be sent to this subscription." + case 128: + return "Unspecified error - The subscription is not accepted and the Server either does not wish to reveal the reason or none of the other Reason Codes apply." + case 131: + return "Implementation specific error - The SUBSCRIBE is valid but the Server does not accept it." + case 135: + return "Not authorized - The Client is not authorized to make this subscription." + case 143: + return "Topic Filter invalid - The Topic Filter is correctly formed but is not allowed for this Client." + case 145: + return "Packet Identifier in use - The specified Packet Identifier is already in use." + case 151: + return "Quota exceeded - An implementation or administrative imposed limit has been exceeded." + case 158: + return "Shared Subscription not supported - The Server does not support Shared Subscriptions for this Client." + case 161: + return "Subscription Identifiers not supported - The Server does not support Subscription Identifiers; the subscription is not accepted." + case 162: + return "Wildcard subscriptions not supported - The Server does not support Wildcard subscription; the subscription is not accepted." + } + } + return "Invalid Reason index" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/subscribe.go b/vendor/github.com/eclipse/paho.golang/packets/subscribe.go new file mode 100644 index 000000000..3f457a28a --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/subscribe.go @@ -0,0 +1,116 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" +) + +// Subscribe is the Variable Header definition for a Subscribe control packet +type Subscribe struct { + Properties *Properties + Subscriptions map[string]SubOptions + PacketID uint16 +} + +func (s *Subscribe) String() string { + var b strings.Builder + + fmt.Fprintf(&b, "SUBSCRIBE: PacketID:%d Subscriptions:\n", s.PacketID) + for sub, o := range s.Subscriptions { + fmt.Fprintf(&b, "\t%s: QOS:%d RetainHandling:%X NoLocal:%t RetainAsPublished:%t\n", sub, o.QoS, o.RetainHandling, o.NoLocal, o.RetainAsPublished) + } + fmt.Fprintf(&b, "Properties:\n%s", s.Properties) + + return b.String() +} + +// SubOptions is the struct representing the options for a subscription +type SubOptions struct { + QoS byte + RetainHandling byte + NoLocal bool + RetainAsPublished bool +} + +// Pack is the implementation of the interface required function for a packet +func (s *SubOptions) Pack() byte { + var ret byte + ret |= s.QoS & 0x03 + if s.NoLocal { + ret |= 1 << 2 + } + if s.RetainAsPublished { + ret |= 1 << 3 + } + ret |= s.RetainHandling & 0x30 + + return ret +} + +// Unpack is the implementation of the interface required function for a packet +func (s *SubOptions) Unpack(r *bytes.Buffer) error { + b, err := r.ReadByte() + if err != nil { + return err + } + + s.QoS = b & 0x03 + s.NoLocal = (b & 1 << 2) == 1 + s.RetainAsPublished = (b & 1 << 3) == 1 + s.RetainHandling = b & 0x30 + + return nil +} + +// Unpack is the implementation of the interface required function for a packet +func (s *Subscribe) Unpack(r *bytes.Buffer) error { + var err error + s.PacketID, err = readUint16(r) + if err != nil { + return err + } + + err = s.Properties.Unpack(r, SUBSCRIBE) + if err != nil { + return err + } + + for r.Len() > 0 { + var so SubOptions + t, err := readString(r) + if err != nil { + return err + } + if err = so.Unpack(r); err != nil { + return err + } + s.Subscriptions[t] = so + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (s *Subscribe) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(s.PacketID, &b) + var subs bytes.Buffer + for t, o := range s.Subscriptions { + writeString(t, &subs) + subs.WriteByte(o.Pack()) + } + idvp := s.Properties.Pack(SUBSCRIBE) + propLen := encodeVBI(len(idvp)) + return net.Buffers{b.Bytes(), propLen, idvp, subs.Bytes()} +} + +// WriteTo is the implementation of the interface required function for a packet +func (s *Subscribe) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: SUBSCRIBE, Flags: 2}} + cp.Content = s + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/unsuback.go b/vendor/github.com/eclipse/paho.golang/packets/unsuback.go new file mode 100644 index 000000000..ba5164b9f --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/unsuback.go @@ -0,0 +1,88 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Unsuback is the Variable Header definition for a Unsuback control packet +type Unsuback struct { + Reasons []byte + Properties *Properties + PacketID uint16 +} + +func (u *Unsuback) String() string { + return fmt.Sprintf("UNSUBACK: ReasonCode:%v PacketID:%d Properties:\n%s", u.Reasons, u.PacketID, u.Properties) +} + +// UnsubackSuccess, etc are the list of valid unsuback reason codes. +const ( + UnsubackSuccess = 0x00 + UnsubackNoSubscriptionFound = 0x11 + UnsubackUnspecifiedError = 0x80 + UnsubackImplementationSpecificError = 0x83 + UnsubackNotAuthorized = 0x87 + UnsubackTopicFilterInvalid = 0x8F + UnsubackPacketIdentifierInUse = 0x91 +) + +// Unpack is the implementation of the interface required function for a packet +func (u *Unsuback) Unpack(r *bytes.Buffer) error { + var err error + u.PacketID, err = readUint16(r) + if err != nil { + return err + } + + err = u.Properties.Unpack(r, UNSUBACK) + if err != nil { + return err + } + + u.Reasons = r.Bytes() + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (u *Unsuback) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(u.PacketID, &b) + idvp := u.Properties.Pack(UNSUBACK) + propLen := encodeVBI(len(idvp)) + return net.Buffers{b.Bytes(), propLen, idvp, u.Reasons} +} + +// WriteTo is the implementation of the interface required function for a packet +func (u *Unsuback) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: UNSUBACK}} + cp.Content = u + + return cp.WriteTo(w) +} + +// Reason returns a string representation of the meaning of the ReasonCode +func (u *Unsuback) Reason(index int) string { + if index >= 0 && index < len(u.Reasons) { + switch u.Reasons[index] { + case 0x00: + return "Success - The subscription is deleted" + case 0x11: + return "No subscription found - No matching Topic Filter is being used by the Client." + case 0x80: + return "Unspecified error - The unsubscribe could not be completed and the Server either does not wish to reveal the reason or none of the other Reason Codes apply." + case 0x83: + return "Implementation specific error - The UNSUBSCRIBE is valid but the Server does not accept it." + case 0x87: + return "Not authorized - The Client is not authorized to unsubscribe." + case 0x8F: + return "Topic Filter invalid - The Topic Filter is correctly formed but is not allowed for this Client." + case 0x91: + return "Packet Identifier in use - The specified Packet Identifier is already in use." + } + } + return "Invalid Reason index" +} diff --git a/vendor/github.com/eclipse/paho.golang/packets/unsubscribe.go b/vendor/github.com/eclipse/paho.golang/packets/unsubscribe.go new file mode 100644 index 000000000..dc4e2f89e --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/packets/unsubscribe.go @@ -0,0 +1,67 @@ +package packets + +import ( + "bytes" + "fmt" + "io" + "net" +) + +// Unsubscribe is the Variable Header definition for a Unsubscribe control packet +type Unsubscribe struct { + Topics []string + Properties *Properties + PacketID uint16 +} + +func (u *Unsubscribe) String() string { + return fmt.Sprintf("UNSUBSCRIBE: PacketID:%d Topics:%v Properties:\n%s", u.PacketID, u.Topics, u.Properties) +} + +// Unpack is the implementation of the interface required function for a packet +func (u *Unsubscribe) Unpack(r *bytes.Buffer) error { + var err error + u.PacketID, err = readUint16(r) + if err != nil { + return err + } + + err = u.Properties.Unpack(r, UNSUBSCRIBE) + if err != nil { + return err + } + + for { + t, err := readString(r) + if err != nil && err != io.EOF { + return err + } + if err == io.EOF { + break + } + u.Topics = append(u.Topics, t) + } + + return nil +} + +// Buffers is the implementation of the interface required function for a packet +func (u *Unsubscribe) Buffers() net.Buffers { + var b bytes.Buffer + writeUint16(u.PacketID, &b) + var topics bytes.Buffer + for _, t := range u.Topics { + writeString(t, &topics) + } + idvp := u.Properties.Pack(UNSUBSCRIBE) + propLen := encodeVBI(len(idvp)) + return net.Buffers{b.Bytes(), propLen, idvp, topics.Bytes()} +} + +// WriteTo is the implementation of the interface required function for a packet +func (u *Unsubscribe) WriteTo(w io.Writer) (int64, error) { + cp := &ControlPacket{FixedHeader: FixedHeader{Type: UNSUBSCRIBE, Flags: 2}} + cp.Content = u + + return cp.WriteTo(w) +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/acks_tracker.go b/vendor/github.com/eclipse/paho.golang/paho/acks_tracker.go new file mode 100644 index 000000000..47f11cb67 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/acks_tracker.go @@ -0,0 +1,79 @@ +package paho + +import ( + "errors" + "sync" + + "github.com/eclipse/paho.golang/packets" +) + +var ( + ErrPacketNotFound = errors.New("packet not found") +) + +type acksTracker struct { + mx sync.Mutex + order []packet +} + +func (t *acksTracker) add(pb *packets.Publish) { + t.mx.Lock() + defer t.mx.Unlock() + + for _, v := range t.order { + if v.pb.PacketID == pb.PacketID { + return // already added + } + } + + t.order = append(t.order, packet{pb: pb}) +} + +func (t *acksTracker) markAsAcked(pb *packets.Publish) error { + t.mx.Lock() + defer t.mx.Unlock() + + for k, v := range t.order { + if pb.PacketID == v.pb.PacketID { + t.order[k].acknowledged = true + return nil + } + } + + return ErrPacketNotFound +} + +func (t *acksTracker) flush(do func([]*packets.Publish)) { + t.mx.Lock() + defer t.mx.Unlock() + + var ( + buf []*packets.Publish + ) + for _, v := range t.order { + if v.acknowledged { + buf = append(buf, v.pb) + } else { + break + } + } + + if len(buf) == 0 { + return + } + + do(buf) + t.order = t.order[len(buf):] +} + +// reset should be used upon disconnections +func (t *acksTracker) reset() { + t.mx.Lock() + defer t.mx.Unlock() + t.order = nil +} + +type packet struct { + pb *packets.Publish + acknowledged bool +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/auth.go b/vendor/github.com/eclipse/paho.golang/paho/auth.go new file mode 100644 index 000000000..7d3a3c972 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/auth.go @@ -0,0 +1,8 @@ +package paho + +// Auther is the interface for something that implements the extended authentication +// flows in MQTT v5 +type Auther interface { + Authenticate(*Auth) *Auth + Authenticated() +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/client.go b/vendor/github.com/eclipse/paho.golang/paho/client.go new file mode 100644 index 000000000..f41e3d068 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/client.go @@ -0,0 +1,923 @@ +package paho + +import ( + "context" + "errors" + "fmt" + "math" + "net" + "strings" + "sync" + "time" + + "github.com/eclipse/paho.golang/packets" + "golang.org/x/sync/semaphore" +) + +type MQTTVersion byte + +const ( + MQTTv311 MQTTVersion = 4 + MQTTv5 MQTTVersion = 5 +) + +const defaultSendAckInterval = 50 * time.Millisecond + +var ( + ErrManualAcknowledgmentDisabled = errors.New("manual acknowledgments disabled") +) + +type ( + // ClientConfig are the user configurable options for the client, an + // instance of this struct is passed into NewClient(), not all options + // are required to be set, defaults are provided for Persistence, MIDs, + // PingHandler, PacketTimeout and Router. + ClientConfig struct { + ClientID string + // Conn is the connection to broker. + // BEWARE that most wrapped net.Conn implementations like tls.Conn are + // not thread safe for writing. To fix, use packets.NewThreadSafeConn + // wrapper or extend the custom net.Conn struct with sync.Locker. + Conn net.Conn + MIDs MIDService + AuthHandler Auther + PingHandler Pinger + Router Router + Persistence Persistence + PacketTimeout time.Duration + // OnServerDisconnect is called only when a packets.DISCONNECT is received from server + OnServerDisconnect func(*Disconnect) + // OnClientError is for example called on net.Error + OnClientError func(error) + // PublishHook allows a user provided function to be called before + // a Publish packet is sent allowing it to inspect or modify the + // Publish, an example of the utility of this is provided in the + // Topic Alias Handler extension which will automatically assign + // and use topic alias values rather than topic strings. + PublishHook func(*Publish) + // EnableManualAcknowledgment is used to control the acknowledgment of packets manually. + // BEWARE that the MQTT specs require clients to send acknowledgments in the order in which the corresponding + // PUBLISH packets were received. + // Consider the following scenario: the client receives packets 1,2,3,4 + // If you acknowledge 3 first, no ack is actually sent to the server but it's buffered until also 1 and 2 + // are acknowledged. + EnableManualAcknowledgment bool + // SendAcksInterval is used only when EnableManualAcknowledgment is true + // it determines how often the client tries to send a batch of acknowledgments in the right order to the server. + SendAcksInterval time.Duration + } + // Client is the struct representing an MQTT client + Client struct { + mu sync.Mutex + ClientConfig + // raCtx is used for handling the MQTTv5 authentication exchange. + raCtx *CPContext + stop chan struct{} + publishPackets chan *packets.Publish + acksTracker acksTracker + workers sync.WaitGroup + serverProps CommsProperties + clientProps CommsProperties + serverInflight *semaphore.Weighted + clientInflight *semaphore.Weighted + debug Logger + errors Logger + } + + // CommsProperties is a struct of the communication properties that may + // be set by the server in the Connack and that the client needs to be + // aware of for future subscribes/publishes + CommsProperties struct { + MaximumPacketSize uint32 + ReceiveMaximum uint16 + TopicAliasMaximum uint16 + MaximumQoS byte + RetainAvailable bool + WildcardSubAvailable bool + SubIDAvailable bool + SharedSubAvailable bool + } + + caContext struct { + Context context.Context + Return chan *packets.Connack + } +) + +// NewClient is used to create a new default instance of an MQTT client. +// It returns a pointer to the new client instance. +// The default client uses the provided PingHandler, MessageID and +// StandardRouter implementations, and a noop Persistence. +// These should be replaced if desired before the client is connected. +// client.Conn *MUST* be set to an already connected net.Conn before +// Connect() is called. +func NewClient(conf ClientConfig) *Client { + c := &Client{ + serverProps: CommsProperties{ + ReceiveMaximum: 65535, + MaximumQoS: 2, + MaximumPacketSize: 0, + TopicAliasMaximum: 0, + RetainAvailable: true, + WildcardSubAvailable: true, + SubIDAvailable: true, + SharedSubAvailable: true, + }, + clientProps: CommsProperties{ + ReceiveMaximum: 65535, + MaximumQoS: 2, + MaximumPacketSize: 0, + TopicAliasMaximum: 0, + }, + ClientConfig: conf, + errors: NOOPLogger{}, + debug: NOOPLogger{}, + } + + if c.Persistence == nil { + c.Persistence = &noopPersistence{} + } + if c.MIDs == nil { + c.MIDs = &MIDs{index: make([]*CPContext, int(midMax))} + } + if c.PacketTimeout == 0 { + c.PacketTimeout = 10 * time.Second + } + if c.Router == nil { + c.Router = NewStandardRouter() + } + if c.PingHandler == nil { + c.PingHandler = DefaultPingerWithCustomFailHandler(func(e error) { + go c.error(e) + }) + } + if c.OnClientError == nil { + c.OnClientError = func(e error) {} + } + + return c +} + +// Connect is used to connect the client to a server. It presumes that +// the Client instance already has a working network connection. +// The function takes a pre-prepared Connect packet, and uses that to +// establish an MQTT connection. Assuming the connection completes +// successfully the rest of the client is initiated and the Connack +// returned. Otherwise the failure Connack (if there is one) is returned +// along with an error indicating the reason for the failure to connect. +func (c *Client) Connect(ctx context.Context, cp *Connect) (*Connack, error) { + if c.Conn == nil { + return nil, fmt.Errorf("client connection is nil") + } + + cleanup := func() { + close(c.stop) + close(c.publishPackets) + _ = c.Conn.Close() + c.mu.Unlock() + } + + c.mu.Lock() + c.stop = make(chan struct{}) + + var publishPacketsSize uint16 = math.MaxUint16 + if cp.Properties != nil && cp.Properties.ReceiveMaximum != nil { + publishPacketsSize = *cp.Properties.ReceiveMaximum + } + c.publishPackets = make(chan *packets.Publish, publishPacketsSize) + + keepalive := cp.KeepAlive + c.ClientID = cp.ClientID + if cp.Properties != nil { + if cp.Properties.MaximumPacketSize != nil { + c.clientProps.MaximumPacketSize = *cp.Properties.MaximumPacketSize + } + if cp.Properties.MaximumQOS != nil { + c.clientProps.MaximumQoS = *cp.Properties.MaximumQOS + } + if cp.Properties.ReceiveMaximum != nil { + c.clientProps.ReceiveMaximum = *cp.Properties.ReceiveMaximum + } + if cp.Properties.TopicAliasMaximum != nil { + c.clientProps.TopicAliasMaximum = *cp.Properties.TopicAliasMaximum + } + } + + c.debug.Println("connecting") + connCtx, cf := context.WithTimeout(ctx, c.PacketTimeout) + defer cf() + + ccp := cp.Packet() + ccp.ProtocolName = "MQTT" + ccp.ProtocolVersion = 5 + + c.debug.Println("sending CONNECT") + if _, err := ccp.WriteTo(c.Conn); err != nil { + cleanup() + return nil, err + } + + c.debug.Println("waiting for CONNACK/AUTH") + var ( + caPacket *packets.Connack + caPacketCh = make(chan *packets.Connack) + caPacketErr = make(chan error) + ) + go c.expectConnack(caPacketCh, caPacketErr) + select { + case <-connCtx.Done(): + if ctxErr := connCtx.Err(); ctxErr != nil { + c.debug.Println(fmt.Sprintf("terminated due to context: %v", ctxErr)) + } + cleanup() + return nil, connCtx.Err() + case err := <-caPacketErr: + c.debug.Println(err) + cleanup() + return nil, err + case caPacket = <-caPacketCh: + } + + ca := ConnackFromPacketConnack(caPacket) + + if ca.ReasonCode >= 0x80 { + var reason string + c.debug.Println("received an error code in Connack:", ca.ReasonCode) + if ca.Properties != nil { + reason = ca.Properties.ReasonString + } + cleanup() + return ca, fmt.Errorf("failed to connect to server: %s", reason) + } + + // no more possible calls to cleanup(), defer an unlock + defer c.mu.Unlock() + + if ca.Properties != nil { + if ca.Properties.ServerKeepAlive != nil { + keepalive = *ca.Properties.ServerKeepAlive + } + if ca.Properties.AssignedClientID != "" { + c.ClientID = ca.Properties.AssignedClientID + } + if ca.Properties.ReceiveMaximum != nil { + c.serverProps.ReceiveMaximum = *ca.Properties.ReceiveMaximum + } + if ca.Properties.MaximumQoS != nil { + c.serverProps.MaximumQoS = *ca.Properties.MaximumQoS + } + if ca.Properties.MaximumPacketSize != nil { + c.serverProps.MaximumPacketSize = *ca.Properties.MaximumPacketSize + } + if ca.Properties.TopicAliasMaximum != nil { + c.serverProps.TopicAliasMaximum = *ca.Properties.TopicAliasMaximum + } + c.serverProps.RetainAvailable = ca.Properties.RetainAvailable + c.serverProps.WildcardSubAvailable = ca.Properties.WildcardSubAvailable + c.serverProps.SubIDAvailable = ca.Properties.SubIDAvailable + c.serverProps.SharedSubAvailable = ca.Properties.SharedSubAvailable + } + + c.serverInflight = semaphore.NewWeighted(int64(c.serverProps.ReceiveMaximum)) + c.clientInflight = semaphore.NewWeighted(int64(c.clientProps.ReceiveMaximum)) + + c.debug.Println("received CONNACK, starting PingHandler") + c.workers.Add(1) + go func() { + defer c.workers.Done() + defer c.debug.Println("returning from ping handler worker") + c.PingHandler.Start(c.Conn, time.Duration(keepalive)*time.Second) + }() + + c.debug.Println("starting publish packets loop") + c.workers.Add(1) + go func() { + defer c.workers.Done() + defer c.debug.Println("returning from publish packets loop worker") + c.routePublishPackets() + }() + + c.debug.Println("starting incoming") + c.workers.Add(1) + go func() { + defer c.workers.Done() + defer c.debug.Println("returning from incoming worker") + c.incoming() + }() + + if c.EnableManualAcknowledgment { + c.debug.Println("starting acking routine") + + c.acksTracker.reset() + sendAcksInterval := defaultSendAckInterval + if c.SendAcksInterval > 0 { + sendAcksInterval = c.SendAcksInterval + } + + c.workers.Add(1) + go func() { + defer c.workers.Done() + defer c.debug.Println("returning from ack tracker routine") + t := time.NewTicker(sendAcksInterval) + for { + select { + case <-c.stop: + return + case <-t.C: + c.acksTracker.flush(func(pbs []*packets.Publish) { + for _, pb := range pbs { + c.ack(pb) + } + }) + } + } + }() + } + + return ca, nil +} + +func (c *Client) Ack(pb *Publish) error { + if !c.EnableManualAcknowledgment { + return ErrManualAcknowledgmentDisabled + } + if pb.QoS == 0 { + return nil + } + return c.acksTracker.markAsAcked(pb.Packet()) +} + +func (c *Client) ack(pb *packets.Publish) { + switch pb.QoS { + case 1: + pa := packets.Puback{ + Properties: &packets.Properties{}, + PacketID: pb.PacketID, + } + c.debug.Println("sending PUBACK") + _, err := pa.WriteTo(c.Conn) + if err != nil { + c.errors.Printf("failed to send PUBACK for %d: %s", pb.PacketID, err) + } + case 2: + pr := packets.Pubrec{ + Properties: &packets.Properties{}, + PacketID: pb.PacketID, + } + c.debug.Printf("sending PUBREC") + _, err := pr.WriteTo(c.Conn) + if err != nil { + c.errors.Printf("failed to send PUBREC for %d: %s", pb.PacketID, err) + } + } +} + +func (c *Client) routePublishPackets() { + for { + select { + case <-c.stop: + return + case pb, open := <-c.publishPackets: + if !open { + return + } + + if !c.ClientConfig.EnableManualAcknowledgment { + c.Router.Route(pb) + c.ack(pb) + continue + } + + if pb.QoS != 0 { + c.acksTracker.add(pb) + } + + c.Router.Route(pb) + } + } +} + +// incoming is the Client function that reads and handles incoming +// packets from the server. The function is started as a goroutine +// from Connect(), it exits when it receives a server initiated +// Disconnect, the Stop channel is closed or there is an error reading +// a packet from the network connection +func (c *Client) incoming() { + defer c.debug.Println("client stopping, incoming stopping") + for { + select { + case <-c.stop: + return + default: + recv, err := packets.ReadPacket(c.Conn) + if err != nil { + go c.error(err) + return + } + switch recv.Type { + case packets.CONNACK: + c.debug.Println("received CONNACK") + go c.error(fmt.Errorf("received unexpected CONNACK")) + return + case packets.AUTH: + c.debug.Println("received AUTH") + ap := recv.Content.(*packets.Auth) + switch ap.ReasonCode { + case 0x0: + if c.AuthHandler != nil { + go c.AuthHandler.Authenticated() + } + if c.raCtx != nil { + c.raCtx.Return <- *recv + } + case 0x18: + if c.AuthHandler != nil { + if _, err := c.AuthHandler.Authenticate(AuthFromPacketAuth(ap)).Packet().WriteTo(c.Conn); err != nil { + go c.error(err) + return + } + } + } + case packets.PUBLISH: + pb := recv.Content.(*packets.Publish) + c.debug.Printf("received QoS%d PUBLISH", pb.QoS) + c.mu.Lock() + select { + case <-c.stop: + c.mu.Unlock() + return + default: + c.publishPackets <- pb + c.mu.Unlock() + } + case packets.PUBACK, packets.PUBCOMP, packets.SUBACK, packets.UNSUBACK: + c.debug.Printf("received %s packet with id %d", recv.PacketType(), recv.PacketID()) + if cpCtx := c.MIDs.Get(recv.PacketID()); cpCtx != nil { + cpCtx.Return <- *recv + } else { + c.debug.Println("received a response for a message ID we don't know:", recv.PacketID()) + } + case packets.PUBREC: + c.debug.Println("received PUBREC for", recv.PacketID()) + if cpCtx := c.MIDs.Get(recv.PacketID()); cpCtx == nil { + c.debug.Println("received a PUBREC for a message ID we don't know:", recv.PacketID()) + pl := packets.Pubrel{ + PacketID: recv.Content.(*packets.Pubrec).PacketID, + ReasonCode: 0x92, + } + c.debug.Println("sending PUBREL for", pl.PacketID) + _, err := pl.WriteTo(c.Conn) + if err != nil { + c.errors.Printf("failed to send PUBREL for %d: %s", pl.PacketID, err) + } + } else { + pr := recv.Content.(*packets.Pubrec) + if pr.ReasonCode >= 0x80 { + //Received a failure code, shortcut and return + cpCtx.Return <- *recv + } else { + pl := packets.Pubrel{ + PacketID: pr.PacketID, + } + c.debug.Println("sending PUBREL for", pl.PacketID) + _, err := pl.WriteTo(c.Conn) + if err != nil { + c.errors.Printf("failed to send PUBREL for %d: %s", pl.PacketID, err) + } + } + } + case packets.PUBREL: + c.debug.Println("received PUBREL for", recv.PacketID()) + //Auto respond to pubrels unless failure code + pr := recv.Content.(*packets.Pubrel) + if pr.ReasonCode >= 0x80 { + //Received a failure code, continue + continue + } else { + pc := packets.Pubcomp{ + PacketID: pr.PacketID, + } + c.debug.Println("sending PUBCOMP for", pr.PacketID) + _, err := pc.WriteTo(c.Conn) + if err != nil { + c.errors.Printf("failed to send PUBCOMP for %d: %s", pc.PacketID, err) + } + } + case packets.DISCONNECT: + c.debug.Println("received DISCONNECT") + if c.raCtx != nil { + c.raCtx.Return <- *recv + } + go func() { + if c.OnServerDisconnect != nil { + go c.serverDisconnect(DisconnectFromPacketDisconnect(recv.Content.(*packets.Disconnect))) + } else { + go c.error(fmt.Errorf("server initiated disconnect")) + } + }() + return + case packets.PINGRESP: + c.debug.Println("received PINGRESP") + c.PingHandler.PingResp() + } + } + } +} + +func (c *Client) close() { + c.mu.Lock() + defer c.mu.Unlock() + + select { + case <-c.stop: + //already shutting down, do nothing + return + default: + } + + close(c.stop) + close(c.publishPackets) + + c.debug.Println("client stopped") + c.PingHandler.Stop() + c.debug.Println("ping stopped") + _ = c.Conn.Close() + c.debug.Println("conn closed") + c.acksTracker.reset() + c.debug.Println("acks tracker reset") +} + +// error is called to signify that an error situation has occurred, this +// causes the client's Stop channel to be closed (if it hasn't already been) +// which results in the other client goroutines terminating. +// It also closes the client network connection. +func (c *Client) error(e error) { + c.debug.Println("error called:", e) + c.close() + c.workers.Wait() + go c.OnClientError(e) +} + +func (c *Client) serverDisconnect(d *Disconnect) { + c.close() + c.workers.Wait() + c.debug.Println("calling OnServerDisconnect") + go c.OnServerDisconnect(d) +} + +// Authenticate is used to initiate a reauthentication of credentials with the +// server. This function sends the initial Auth packet to start the reauthentication +// then relies on the client AuthHandler managing any further requests from the +// server until either a successful Auth packet is passed back, or a Disconnect +// is received. +func (c *Client) Authenticate(ctx context.Context, a *Auth) (*AuthResponse, error) { + c.debug.Println("client initiated reauthentication") + + c.mu.Lock() + if c.raCtx != nil { + c.mu.Unlock() + return nil, fmt.Errorf("previous authentication is still in progress") + } + c.raCtx = &CPContext{ctx, make(chan packets.ControlPacket, 1)} + c.mu.Unlock() + defer func() { + c.mu.Lock() + c.raCtx = nil + c.mu.Unlock() + }() + + c.debug.Println("sending AUTH") + if _, err := a.Packet().WriteTo(c.Conn); err != nil { + return nil, err + } + + var rp packets.ControlPacket + select { + case <-ctx.Done(): + if ctxErr := ctx.Err(); ctxErr != nil { + c.debug.Println(fmt.Sprintf("terminated due to context: %v", ctxErr)) + return nil, ctxErr + } + case rp = <-c.raCtx.Return: + } + + switch rp.Type { + case packets.AUTH: + //If we've received one here it must be successful, the only way + //to abort a reauth is a server initiated disconnect + return AuthResponseFromPacketAuth(rp.Content.(*packets.Auth)), nil + case packets.DISCONNECT: + return AuthResponseFromPacketDisconnect(rp.Content.(*packets.Disconnect)), nil + } + + return nil, fmt.Errorf("error with Auth, didn't receive Auth or Disconnect") +} + +// Subscribe is used to send a Subscription request to the MQTT server. +// It is passed a pre-prepared Subscribe packet and blocks waiting for +// a response Suback, or for the timeout to fire. Any response Suback +// is returned from the function, along with any errors. +func (c *Client) Subscribe(ctx context.Context, s *Subscribe) (*Suback, error) { + if !c.serverProps.WildcardSubAvailable { + for t := range s.Subscriptions { + if strings.ContainsAny(t, "#+") { + // Using a wildcard in a subscription when not supported + return nil, fmt.Errorf("cannot subscribe to %s, server does not support wildcards", t) + } + } + } + if !c.serverProps.SubIDAvailable && s.Properties != nil && s.Properties.SubscriptionIdentifier != nil { + return nil, fmt.Errorf("cannot send subscribe with subID set, server does not support subID") + } + if !c.serverProps.SharedSubAvailable { + for t := range s.Subscriptions { + if strings.HasPrefix(t, "$share") { + return nil, fmt.Errorf("cannont subscribe to %s, server does not support shared subscriptions", t) + } + } + } + + c.debug.Printf("subscribing to %+v", s.Subscriptions) + + subCtx, cf := context.WithTimeout(ctx, c.PacketTimeout) + defer cf() + cpCtx := &CPContext{subCtx, make(chan packets.ControlPacket, 1)} + + sp := s.Packet() + + mid, err := c.MIDs.Request(cpCtx) + if err != nil { + return nil, err + } + defer c.MIDs.Free(mid) + sp.PacketID = mid + + c.debug.Println("sending SUBSCRIBE") + if _, err := sp.WriteTo(c.Conn); err != nil { + return nil, err + } + c.debug.Println("waiting for SUBACK") + var sap packets.ControlPacket + + select { + case <-subCtx.Done(): + if ctxErr := subCtx.Err(); ctxErr != nil { + c.debug.Println(fmt.Sprintf("terminated due to context: %v", ctxErr)) + return nil, ctxErr + } + case sap = <-cpCtx.Return: + } + + if sap.Type != packets.SUBACK { + return nil, fmt.Errorf("received %d instead of Suback", sap.Type) + } + c.debug.Println("received SUBACK") + + sa := SubackFromPacketSuback(sap.Content.(*packets.Suback)) + switch { + case len(sa.Reasons) == 1: + if sa.Reasons[0] >= 0x80 { + var reason string + c.debug.Println("received an error code in Suback:", sa.Reasons[0]) + if sa.Properties != nil { + reason = sa.Properties.ReasonString + } + return sa, fmt.Errorf("failed to subscribe to topic: %s", reason) + } + default: + for _, code := range sa.Reasons { + if code >= 0x80 { + c.debug.Println("received an error code in Suback:", code) + return sa, fmt.Errorf("at least one requested subscription failed") + } + } + } + + return sa, nil +} + +// Unsubscribe is used to send an Unsubscribe request to the MQTT server. +// It is passed a pre-prepared Unsubscribe packet and blocks waiting for +// a response Unsuback, or for the timeout to fire. Any response Unsuback +// is returned from the function, along with any errors. +func (c *Client) Unsubscribe(ctx context.Context, u *Unsubscribe) (*Unsuback, error) { + c.debug.Printf("unsubscribing from %+v", u.Topics) + unsubCtx, cf := context.WithTimeout(ctx, c.PacketTimeout) + defer cf() + cpCtx := &CPContext{unsubCtx, make(chan packets.ControlPacket, 1)} + + up := u.Packet() + + mid, err := c.MIDs.Request(cpCtx) + if err != nil { + return nil, err + } + defer c.MIDs.Free(mid) + up.PacketID = mid + + c.debug.Println("sending UNSUBSCRIBE") + if _, err := up.WriteTo(c.Conn); err != nil { + return nil, err + } + c.debug.Println("waiting for UNSUBACK") + var uap packets.ControlPacket + + select { + case <-unsubCtx.Done(): + if ctxErr := unsubCtx.Err(); ctxErr != nil { + c.debug.Println(fmt.Sprintf("terminated due to context: %v", ctxErr)) + return nil, ctxErr + } + case uap = <-cpCtx.Return: + } + + if uap.Type != packets.UNSUBACK { + return nil, fmt.Errorf("received %d instead of Unsuback", uap.Type) + } + c.debug.Println("received SUBACK") + + ua := UnsubackFromPacketUnsuback(uap.Content.(*packets.Unsuback)) + switch { + case len(ua.Reasons) == 1: + if ua.Reasons[0] >= 0x80 { + var reason string + c.debug.Println("received an error code in Unsuback:", ua.Reasons[0]) + if ua.Properties != nil { + reason = ua.Properties.ReasonString + } + return ua, fmt.Errorf("failed to unsubscribe from topic: %s", reason) + } + default: + for _, code := range ua.Reasons { + if code >= 0x80 { + c.debug.Println("received an error code in Suback:", code) + return ua, fmt.Errorf("at least one requested unsubscribe failed") + } + } + } + + return ua, nil +} + +// Publish is used to send a publication to the MQTT server. +// It is passed a pre-prepared Publish packet and blocks waiting for +// the appropriate response, or for the timeout to fire. +// Any response message is returned from the function, along with any errors. +func (c *Client) Publish(ctx context.Context, p *Publish) (*PublishResponse, error) { + if p.QoS > c.serverProps.MaximumQoS { + return nil, fmt.Errorf("cannot send Publish with QoS %d, server maximum QoS is %d", p.QoS, c.serverProps.MaximumQoS) + } + if p.Properties != nil && p.Properties.TopicAlias != nil { + if c.serverProps.TopicAliasMaximum > 0 && *p.Properties.TopicAlias > c.serverProps.TopicAliasMaximum { + return nil, fmt.Errorf("cannot send publish with TopicAlias %d, server topic alias maximum is %d", *p.Properties.TopicAlias, c.serverProps.TopicAliasMaximum) + } + } + if !c.serverProps.RetainAvailable && p.Retain { + return nil, fmt.Errorf("cannot send Publish with retain flag set, server does not support retained messages") + } + if (p.Properties == nil || p.Properties.TopicAlias == nil) && p.Topic == "" { + return nil, fmt.Errorf("cannot send a publish with no TopicAlias and no Topic set") + } + + if c.ClientConfig.PublishHook != nil { + c.ClientConfig.PublishHook(p) + } + + c.debug.Printf("sending message to %s", p.Topic) + + pb := p.Packet() + + switch p.QoS { + case 0: + c.debug.Println("sending QoS0 message") + if _, err := pb.WriteTo(c.Conn); err != nil { + return nil, err + } + return nil, nil + case 1, 2: + return c.publishQoS12(ctx, pb) + } + + return nil, fmt.Errorf("QoS isn't 0, 1 or 2") +} + +func (c *Client) publishQoS12(ctx context.Context, pb *packets.Publish) (*PublishResponse, error) { + c.debug.Println("sending QoS12 message") + pubCtx, cf := context.WithTimeout(ctx, c.PacketTimeout) + defer cf() + if err := c.serverInflight.Acquire(pubCtx, 1); err != nil { + return nil, err + } + defer c.serverInflight.Release(1) + cpCtx := &CPContext{pubCtx, make(chan packets.ControlPacket, 1)} + + mid, err := c.MIDs.Request(cpCtx) + if err != nil { + return nil, err + } + defer c.MIDs.Free(mid) + pb.PacketID = mid + + if _, err := pb.WriteTo(c.Conn); err != nil { + return nil, err + } + var resp packets.ControlPacket + + select { + case <-pubCtx.Done(): + if ctxErr := pubCtx.Err(); ctxErr != nil { + c.debug.Println(fmt.Sprintf("terminated due to context: %v", ctxErr)) + return nil, ctxErr + } + case resp = <-cpCtx.Return: + } + + switch pb.QoS { + case 1: + if resp.Type != packets.PUBACK { + return nil, fmt.Errorf("received %d instead of PUBACK", resp.Type) + } + + pr := PublishResponseFromPuback(resp.Content.(*packets.Puback)) + if pr.ReasonCode >= 0x80 { + c.debug.Println("received an error code in Puback:", pr.ReasonCode) + return pr, fmt.Errorf("error publishing: %s", resp.Content.(*packets.Puback).Reason()) + } + return pr, nil + case 2: + switch resp.Type { + case packets.PUBCOMP: + pr := PublishResponseFromPubcomp(resp.Content.(*packets.Pubcomp)) + return pr, nil + case packets.PUBREC: + c.debug.Printf("received PUBREC for %s (must have errored)", pb.PacketID) + pr := PublishResponseFromPubrec(resp.Content.(*packets.Pubrec)) + return pr, nil + default: + return nil, fmt.Errorf("received %d instead of PUBCOMP", resp.Type) + } + } + + c.debug.Println("ended up with a non QoS1/2 message:", pb.QoS) + return nil, fmt.Errorf("ended up with a non QoS1/2 message: %d", pb.QoS) +} + +func (c *Client) expectConnack(packet chan<- *packets.Connack, errs chan<- error) { + recv, err := packets.ReadPacket(c.Conn) + if err != nil { + errs <- err + return + } + switch r := recv.Content.(type) { + case *packets.Connack: + c.debug.Println("received CONNACK") + if r.ReasonCode == packets.ConnackSuccess && r.Properties != nil && r.Properties.AuthMethod != "" { + // Successful connack and AuthMethod is defined, must have successfully authed during connect + go c.AuthHandler.Authenticated() + } + packet <- r + case *packets.Auth: + c.debug.Println("received AUTH") + if c.AuthHandler == nil { + errs <- fmt.Errorf("enhanced authentication flow started but no AuthHandler configured") + return + } + c.debug.Println("sending AUTH") + _, err := c.AuthHandler.Authenticate(AuthFromPacketAuth(r)).Packet().WriteTo(c.Conn) + if err != nil { + errs <- fmt.Errorf("error sending authentication packet: %w", err) + return + } + // go round again, either another AUTH or CONNACK + go c.expectConnack(packet, errs) + default: + errs <- fmt.Errorf("received unexpected packet %v", recv.Type) + } + +} + +// Disconnect is used to send a Disconnect packet to the MQTT server +// Whether or not the attempt to send the Disconnect packet fails +// (and if it does this function returns any error) the network connection +// is closed. +func (c *Client) Disconnect(d *Disconnect) error { + c.debug.Println("disconnecting") + _, err := d.Packet().WriteTo(c.Conn) + + c.close() + c.workers.Wait() + + return err +} + +// SetDebugLogger takes an instance of the paho Logger interface +// and sets it to be used by the debug log endpoint +func (c *Client) SetDebugLogger(l Logger) { + c.debug = l +} + +// SetErrorLogger takes an instance of the paho Logger interface +// and sets it to be used by the error log endpoint +func (c *Client) SetErrorLogger(l Logger) { + c.errors = l +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_auth.go b/vendor/github.com/eclipse/paho.golang/paho/cp_auth.go new file mode 100644 index 000000000..6ccef9b47 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_auth.go @@ -0,0 +1,92 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Auth is a representation of the MQTT Auth packet + Auth struct { + Properties *AuthProperties + ReasonCode byte + } + + // AuthProperties is a struct of the properties that can be set + // for a Auth packet + AuthProperties struct { + AuthData []byte + AuthMethod string + ReasonString string + User UserProperties + } +) + +// InitProperties is a function that takes a lower level +// Properties struct and completes the properties of the Auth on +// which it is called +func (a *Auth) InitProperties(p *packets.Properties) { + a.Properties = &AuthProperties{ + AuthMethod: p.AuthMethod, + AuthData: p.AuthData, + ReasonString: p.ReasonString, + User: UserPropertiesFromPacketUser(p.User), + } +} + +// AuthFromPacketAuth takes a packets library Auth and +// returns a paho library Auth +func AuthFromPacketAuth(a *packets.Auth) *Auth { + v := &Auth{ReasonCode: a.ReasonCode} + v.InitProperties(a.Properties) + + return v +} + +// Packet returns a packets library Auth from the paho Auth +// on which it is called +func (a *Auth) Packet() *packets.Auth { + v := &packets.Auth{ReasonCode: a.ReasonCode} + + if a.Properties != nil { + v.Properties = &packets.Properties{ + AuthMethod: a.Properties.AuthMethod, + AuthData: a.Properties.AuthData, + ReasonString: a.Properties.ReasonString, + User: a.Properties.User.ToPacketProperties(), + } + } + + return v +} + +// AuthResponse is a represenation of the response to an Auth +// packet +type AuthResponse struct { + Properties *AuthProperties + ReasonCode byte + Success bool +} + +// AuthResponseFromPacketAuth takes a packets library Auth and +// returns a paho library AuthResponse +func AuthResponseFromPacketAuth(a *packets.Auth) *AuthResponse { + return &AuthResponse{ + Success: true, + ReasonCode: a.ReasonCode, + Properties: &AuthProperties{ + ReasonString: a.Properties.ReasonString, + User: UserPropertiesFromPacketUser(a.Properties.User), + }, + } +} + +// AuthResponseFromPacketDisconnect takes a packets library Disconnect and +// returns a paho library AuthResponse +func AuthResponseFromPacketDisconnect(d *packets.Disconnect) *AuthResponse { + return &AuthResponse{ + Success: true, + ReasonCode: d.ReasonCode, + Properties: &AuthProperties{ + ReasonString: d.Properties.ReasonString, + User: UserPropertiesFromPacketUser(d.Properties.User), + }, + } +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_connack.go b/vendor/github.com/eclipse/paho.golang/paho/cp_connack.go new file mode 100644 index 000000000..9c7233618 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_connack.go @@ -0,0 +1,84 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Connack is a representation of the MQTT Connack packet + Connack struct { + Properties *ConnackProperties + ReasonCode byte + SessionPresent bool + } + + // ConnackProperties is a struct of the properties that can be set + // for a Connack packet + ConnackProperties struct { + SessionExpiryInterval *uint32 + AuthData []byte + AuthMethod string + ResponseInfo string + ServerReference string + ReasonString string + AssignedClientID string + MaximumPacketSize *uint32 + ReceiveMaximum *uint16 + TopicAliasMaximum *uint16 + ServerKeepAlive *uint16 + MaximumQoS *byte + User UserProperties + WildcardSubAvailable bool + SubIDAvailable bool + SharedSubAvailable bool + RetainAvailable bool + } +) + +// InitProperties is a function that takes a lower level +// Properties struct and completes the properties of the Connack on +// which it is called +func (c *Connack) InitProperties(p *packets.Properties) { + c.Properties = &ConnackProperties{ + AssignedClientID: p.AssignedClientID, + ServerKeepAlive: p.ServerKeepAlive, + WildcardSubAvailable: true, + SubIDAvailable: true, + SharedSubAvailable: true, + RetainAvailable: true, + ResponseInfo: p.ResponseInfo, + SessionExpiryInterval: p.SessionExpiryInterval, + AuthMethod: p.AuthMethod, + AuthData: p.AuthData, + ServerReference: p.ServerReference, + ReasonString: p.ReasonString, + ReceiveMaximum: p.ReceiveMaximum, + TopicAliasMaximum: p.TopicAliasMaximum, + MaximumQoS: p.MaximumQOS, + MaximumPacketSize: p.MaximumPacketSize, + User: UserPropertiesFromPacketUser(p.User), + } + + if p.WildcardSubAvailable != nil { + c.Properties.WildcardSubAvailable = *p.WildcardSubAvailable == 1 + } + if p.SubIDAvailable != nil { + c.Properties.SubIDAvailable = *p.SubIDAvailable == 1 + } + if p.SharedSubAvailable != nil { + c.Properties.SharedSubAvailable = *p.SharedSubAvailable == 1 + } + if p.RetainAvailable != nil { + c.Properties.RetainAvailable = *p.RetainAvailable == 1 + } +} + +// ConnackFromPacketConnack takes a packets library Connack and +// returns a paho library Connack +func ConnackFromPacketConnack(c *packets.Connack) *Connack { + v := &Connack{ + SessionPresent: c.SessionPresent, + ReasonCode: c.ReasonCode, + } + v.InitProperties(c.Properties) + + return v +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_connect.go b/vendor/github.com/eclipse/paho.golang/paho/cp_connect.go new file mode 100644 index 000000000..8d731764d --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_connect.go @@ -0,0 +1,180 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Connect is a representation of the MQTT Connect packet + Connect struct { + Password []byte + Username string + ClientID string + Properties *ConnectProperties + WillMessage *WillMessage + WillProperties *WillProperties + KeepAlive uint16 + CleanStart bool + UsernameFlag bool + PasswordFlag bool + } + + // ConnectProperties is a struct of the properties that can be set + // for a Connect packet + ConnectProperties struct { + AuthData []byte + AuthMethod string + SessionExpiryInterval *uint32 + WillDelayInterval *uint32 + ReceiveMaximum *uint16 + TopicAliasMaximum *uint16 + MaximumQOS *byte + MaximumPacketSize *uint32 + User UserProperties + RequestProblemInfo bool + RequestResponseInfo bool + } +) + +// InitProperties is a function that takes a lower level +// Properties struct and completes the properties of the Connect on +// which it is called +func (c *Connect) InitProperties(p *packets.Properties) { + c.Properties = &ConnectProperties{ + SessionExpiryInterval: p.SessionExpiryInterval, + AuthMethod: p.AuthMethod, + AuthData: p.AuthData, + WillDelayInterval: p.WillDelayInterval, + RequestResponseInfo: false, + RequestProblemInfo: true, + ReceiveMaximum: p.ReceiveMaximum, + TopicAliasMaximum: p.TopicAliasMaximum, + MaximumQOS: p.MaximumQOS, + MaximumPacketSize: p.MaximumPacketSize, + User: UserPropertiesFromPacketUser(p.User), + } + + if p.RequestResponseInfo != nil { + c.Properties.RequestResponseInfo = *p.RequestProblemInfo == 1 + } + if p.RequestProblemInfo != nil { + c.Properties.RequestProblemInfo = *p.RequestProblemInfo == 1 + } +} + +// InitWillProperties is a function that takes a lower level +// Properties struct and completes the properties of the Will in the Connect on +// which it is called +func (c *Connect) InitWillProperties(p *packets.Properties) { + c.WillProperties = &WillProperties{ + WillDelayInterval: p.WillDelayInterval, + PayloadFormat: p.PayloadFormat, + MessageExpiry: p.MessageExpiry, + ContentType: p.ContentType, + ResponseTopic: p.ResponseTopic, + CorrelationData: p.CorrelationData, + User: UserPropertiesFromPacketUser(p.User), + } +} + +// ConnectFromPacketConnect takes a packets library Connect and +// returns a paho library Connect +func ConnectFromPacketConnect(p *packets.Connect) *Connect { + v := &Connect{ + UsernameFlag: p.UsernameFlag, + Username: p.Username, + PasswordFlag: p.PasswordFlag, + Password: p.Password, + ClientID: p.ClientID, + CleanStart: p.CleanStart, + KeepAlive: p.KeepAlive, + } + v.InitProperties(p.Properties) + if p.WillFlag { + v.WillMessage = &WillMessage{ + Retain: p.WillRetain, + QoS: p.WillQOS, + Topic: p.WillTopic, + Payload: p.WillMessage, + } + v.InitWillProperties(p.WillProperties) + } + + return v +} + +// Packet returns a packets library Connect from the paho Connect +// on which it is called +func (c *Connect) Packet() *packets.Connect { + v := &packets.Connect{ + UsernameFlag: c.UsernameFlag, + Username: c.Username, + PasswordFlag: c.PasswordFlag, + Password: c.Password, + ClientID: c.ClientID, + CleanStart: c.CleanStart, + KeepAlive: c.KeepAlive, + } + + if c.Properties != nil { + v.Properties = &packets.Properties{ + SessionExpiryInterval: c.Properties.SessionExpiryInterval, + AuthMethod: c.Properties.AuthMethod, + AuthData: c.Properties.AuthData, + WillDelayInterval: c.Properties.WillDelayInterval, + ReceiveMaximum: c.Properties.ReceiveMaximum, + TopicAliasMaximum: c.Properties.TopicAliasMaximum, + MaximumQOS: c.Properties.MaximumQOS, + MaximumPacketSize: c.Properties.MaximumPacketSize, + User: c.Properties.User.ToPacketProperties(), + } + if c.Properties.RequestResponseInfo { + v.Properties.RequestResponseInfo = Byte(1) + } + if !c.Properties.RequestProblemInfo { + v.Properties.RequestProblemInfo = Byte(0) + } + } + + if c.WillMessage != nil { + v.WillFlag = true + v.WillQOS = c.WillMessage.QoS + v.WillTopic = c.WillMessage.Topic + v.WillRetain = c.WillMessage.Retain + v.WillMessage = c.WillMessage.Payload + if c.WillProperties != nil { + v.WillProperties = &packets.Properties{ + WillDelayInterval: c.WillProperties.WillDelayInterval, + PayloadFormat: c.WillProperties.PayloadFormat, + MessageExpiry: c.WillProperties.MessageExpiry, + ContentType: c.WillProperties.ContentType, + ResponseTopic: c.WillProperties.ResponseTopic, + CorrelationData: c.WillProperties.CorrelationData, + User: c.WillProperties.User.ToPacketProperties(), + } + } + } + + return v +} + +type ( + // WillMessage is a representation of the LWT message that can + // be sent with the Connect packet + WillMessage struct { + Retain bool + QoS byte + Topic string + Payload []byte + } + + // WillProperties is a struct of the properties that can be set + // for a Will in a Connect packet + WillProperties struct { + WillDelayInterval *uint32 + PayloadFormat *byte + MessageExpiry *uint32 + ContentType string + ResponseTopic string + CorrelationData []byte + User UserProperties + } +) diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_disconnect.go b/vendor/github.com/eclipse/paho.golang/paho/cp_disconnect.go new file mode 100644 index 000000000..5caa85b14 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_disconnect.go @@ -0,0 +1,58 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Disconnect is a representation of the MQTT Disconnect packet + Disconnect struct { + Properties *DisconnectProperties + ReasonCode byte + } + + // DisconnectProperties is a struct of the properties that can be set + // for a Disconnect packet + DisconnectProperties struct { + ServerReference string + ReasonString string + SessionExpiryInterval *uint32 + User UserProperties + } +) + +// InitProperties is a function that takes a lower level +// Properties struct and completes the properties of the Disconnect on +// which it is called +func (d *Disconnect) InitProperties(p *packets.Properties) { + d.Properties = &DisconnectProperties{ + SessionExpiryInterval: p.SessionExpiryInterval, + ServerReference: p.ServerReference, + ReasonString: p.ReasonString, + User: UserPropertiesFromPacketUser(p.User), + } +} + +// DisconnectFromPacketDisconnect takes a packets library Disconnect and +// returns a paho library Disconnect +func DisconnectFromPacketDisconnect(p *packets.Disconnect) *Disconnect { + v := &Disconnect{ReasonCode: p.ReasonCode} + v.InitProperties(p.Properties) + + return v +} + +// Packet returns a packets library Disconnect from the paho Disconnect +// on which it is called +func (d *Disconnect) Packet() *packets.Disconnect { + v := &packets.Disconnect{ReasonCode: d.ReasonCode} + + if d.Properties != nil { + v.Properties = &packets.Properties{ + SessionExpiryInterval: d.Properties.SessionExpiryInterval, + ServerReference: d.Properties.ServerReference, + ReasonString: d.Properties.ReasonString, + User: d.Properties.User.ToPacketProperties(), + } + } + + return v +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_publish.go b/vendor/github.com/eclipse/paho.golang/paho/cp_publish.go new file mode 100644 index 000000000..1bb9654b3 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_publish.go @@ -0,0 +1,123 @@ +package paho + +import ( + "bytes" + "fmt" + + "github.com/eclipse/paho.golang/packets" +) + +type ( + // Publish is a representation of the MQTT Publish packet + Publish struct { + PacketID uint16 + QoS byte + Retain bool + Topic string + Properties *PublishProperties + Payload []byte + } + + // PublishProperties is a struct of the properties that can be set + // for a Publish packet + PublishProperties struct { + CorrelationData []byte + ContentType string + ResponseTopic string + PayloadFormat *byte + MessageExpiry *uint32 + SubscriptionIdentifier *int + TopicAlias *uint16 + User UserProperties + } +) + +// InitProperties is a function that takes a lower level +// Properties struct and completes the properties of the Publish on +// which it is called +func (p *Publish) InitProperties(prop *packets.Properties) { + p.Properties = &PublishProperties{ + PayloadFormat: prop.PayloadFormat, + MessageExpiry: prop.MessageExpiry, + ContentType: prop.ContentType, + ResponseTopic: prop.ResponseTopic, + CorrelationData: prop.CorrelationData, + TopicAlias: prop.TopicAlias, + SubscriptionIdentifier: prop.SubscriptionIdentifier, + User: UserPropertiesFromPacketUser(prop.User), + } +} + +// PublishFromPacketPublish takes a packets library Publish and +// returns a paho library Publish +func PublishFromPacketPublish(p *packets.Publish) *Publish { + v := &Publish{ + PacketID: p.PacketID, + QoS: p.QoS, + Retain: p.Retain, + Topic: p.Topic, + Payload: p.Payload, + } + v.InitProperties(p.Properties) + + return v +} + +// Packet returns a packets library Publish from the paho Publish +// on which it is called +func (p *Publish) Packet() *packets.Publish { + v := &packets.Publish{ + PacketID: p.PacketID, + QoS: p.QoS, + Retain: p.Retain, + Topic: p.Topic, + Payload: p.Payload, + } + if p.Properties != nil { + v.Properties = &packets.Properties{ + PayloadFormat: p.Properties.PayloadFormat, + MessageExpiry: p.Properties.MessageExpiry, + ContentType: p.Properties.ContentType, + ResponseTopic: p.Properties.ResponseTopic, + CorrelationData: p.Properties.CorrelationData, + TopicAlias: p.Properties.TopicAlias, + SubscriptionIdentifier: p.Properties.SubscriptionIdentifier, + User: p.Properties.User.ToPacketProperties(), + } + } + + return v +} + +func (p *Publish) String() string { + var b bytes.Buffer + + fmt.Fprintf(&b, "topic: %s qos: %d retain: %t\n", p.Topic, p.QoS, p.Retain) + if p.Properties.PayloadFormat != nil { + fmt.Fprintf(&b, "PayloadFormat: %v\n", p.Properties.PayloadFormat) + } + if p.Properties.MessageExpiry != nil { + fmt.Fprintf(&b, "MessageExpiry: %v\n", p.Properties.MessageExpiry) + } + if p.Properties.ContentType != "" { + fmt.Fprintf(&b, "ContentType: %v\n", p.Properties.ContentType) + } + if p.Properties.ResponseTopic != "" { + fmt.Fprintf(&b, "ResponseTopic: %v\n", p.Properties.ResponseTopic) + } + if p.Properties.CorrelationData != nil { + fmt.Fprintf(&b, "CorrelationData: %v\n", p.Properties.CorrelationData) + } + if p.Properties.TopicAlias != nil { + fmt.Fprintf(&b, "TopicAlias: %d\n", p.Properties.TopicAlias) + } + if p.Properties.SubscriptionIdentifier != nil { + fmt.Fprintf(&b, "SubscriptionIdentifier: %v\n", p.Properties.SubscriptionIdentifier) + } + for _, v := range p.Properties.User { + fmt.Fprintf(&b, "User: %s : %s\n", v.Key, v.Value) + } + b.WriteString(string(p.Payload)) + + return b.String() +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_pubresp.go b/vendor/github.com/eclipse/paho.golang/paho/cp_pubresp.go new file mode 100644 index 000000000..0c4e174aa --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_pubresp.go @@ -0,0 +1,55 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // PublishResponse is a generic representation of a response + // to a QoS1 or QoS2 Publish + PublishResponse struct { + Properties *PublishResponseProperties + ReasonCode byte + } + + // PublishResponseProperties is the properties associated with + // a response to a QoS1 or QoS2 Publish + PublishResponseProperties struct { + ReasonString string + User UserProperties + } +) + +// PublishResponseFromPuback takes a packets library Puback and +// returns a paho library PublishResponse +func PublishResponseFromPuback(pa *packets.Puback) *PublishResponse { + return &PublishResponse{ + ReasonCode: pa.ReasonCode, + Properties: &PublishResponseProperties{ + ReasonString: pa.Properties.ReasonString, + User: UserPropertiesFromPacketUser(pa.Properties.User), + }, + } +} + +// PublishResponseFromPubcomp takes a packets library Pubcomp and +// returns a paho library PublishResponse +func PublishResponseFromPubcomp(pc *packets.Pubcomp) *PublishResponse { + return &PublishResponse{ + ReasonCode: pc.ReasonCode, + Properties: &PublishResponseProperties{ + ReasonString: pc.Properties.ReasonString, + User: UserPropertiesFromPacketUser(pc.Properties.User), + }, + } +} + +// PublishResponseFromPubrec takes a packets library Pubrec and +// returns a paho library PublishResponse +func PublishResponseFromPubrec(pr *packets.Pubrec) *PublishResponse { + return &PublishResponse{ + ReasonCode: pr.ReasonCode, + Properties: &PublishResponseProperties{ + ReasonString: pr.Properties.ReasonString, + User: UserPropertiesFromPacketUser(pr.Properties.User), + }, + } +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_suback.go b/vendor/github.com/eclipse/paho.golang/paho/cp_suback.go new file mode 100644 index 000000000..c1034c26c --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_suback.go @@ -0,0 +1,41 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Suback is a representation of an MQTT suback packet + Suback struct { + Properties *SubackProperties + Reasons []byte + } + + // SubackProperties is a struct of the properties that can be set + // for a Suback packet + SubackProperties struct { + ReasonString string + User UserProperties + } +) + +// Packet returns a packets library Suback from the paho Suback +// on which it is called +func (s *Suback) Packet() *packets.Suback { + return &packets.Suback{ + Reasons: s.Reasons, + Properties: &packets.Properties{ + User: s.Properties.User.ToPacketProperties(), + }, + } +} + +// SubackFromPacketSuback takes a packets library Suback and +// returns a paho library Suback +func SubackFromPacketSuback(s *packets.Suback) *Suback { + return &Suback{ + Reasons: s.Reasons, + Properties: &SubackProperties{ + ReasonString: s.Properties.ReasonString, + User: UserPropertiesFromPacketUser(s.Properties.User), + }, + } +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_subscribe.go b/vendor/github.com/eclipse/paho.golang/paho/cp_subscribe.go new file mode 100644 index 000000000..e111f0cf6 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_subscribe.go @@ -0,0 +1,67 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Subscribe is a representation of a MQTT subscribe packet + Subscribe struct { + Properties *SubscribeProperties + Subscriptions map[string]SubscribeOptions + } + + // SubscribeOptions is the struct representing the options for a subscription + SubscribeOptions struct { + QoS byte + RetainHandling byte + NoLocal bool + RetainAsPublished bool + } +) + +// SubscribeProperties is a struct of the properties that can be set +// for a Subscribe packet +type SubscribeProperties struct { + SubscriptionIdentifier *int + User UserProperties +} + +// InitProperties is a function that takes a packet library +// Properties struct and completes the properties of the Subscribe on +// which it is called +func (s *Subscribe) InitProperties(prop *packets.Properties) { + s.Properties = &SubscribeProperties{ + SubscriptionIdentifier: prop.SubscriptionIdentifier, + User: UserPropertiesFromPacketUser(prop.User), + } +} + +// PacketSubOptionsFromSubscribeOptions returns a map of string to packet +// library SubOptions for the paho Subscribe on which it is called +func (s *Subscribe) PacketSubOptionsFromSubscribeOptions() map[string]packets.SubOptions { + r := make(map[string]packets.SubOptions) + for k, v := range s.Subscriptions { + r[k] = packets.SubOptions{ + QoS: v.QoS, + NoLocal: v.NoLocal, + RetainAsPublished: v.RetainAsPublished, + RetainHandling: v.RetainHandling, + } + } + + return r +} + +// Packet returns a packets library Subscribe from the paho Subscribe +// on which it is called +func (s *Subscribe) Packet() *packets.Subscribe { + v := &packets.Subscribe{Subscriptions: s.PacketSubOptionsFromSubscribeOptions()} + + if s.Properties != nil { + v.Properties = &packets.Properties{ + SubscriptionIdentifier: s.Properties.SubscriptionIdentifier, + User: s.Properties.User.ToPacketProperties(), + } + } + + return v +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_unsuback.go b/vendor/github.com/eclipse/paho.golang/paho/cp_unsuback.go new file mode 100644 index 000000000..15ca83885 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_unsuback.go @@ -0,0 +1,41 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Unsuback is a representation of an MQTT Unsuback packet + Unsuback struct { + Reasons []byte + Properties *UnsubackProperties + } + + // UnsubackProperties is a struct of the properties that can be set + // for a Unsuback packet + UnsubackProperties struct { + ReasonString string + User UserProperties + } +) + +// Packet returns a packets library Unsuback from the paho Unsuback +// on which it is called +func (u *Unsuback) Packet() *packets.Unsuback { + return &packets.Unsuback{ + Reasons: u.Reasons, + Properties: &packets.Properties{ + User: u.Properties.User.ToPacketProperties(), + }, + } +} + +// UnsubackFromPacketUnsuback takes a packets library Unsuback and +// returns a paho library Unsuback +func UnsubackFromPacketUnsuback(u *packets.Unsuback) *Unsuback { + return &Unsuback{ + Reasons: u.Reasons, + Properties: &UnsubackProperties{ + ReasonString: u.Properties.ReasonString, + User: UserPropertiesFromPacketUser(u.Properties.User), + }, + } +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_unsubscribe.go b/vendor/github.com/eclipse/paho.golang/paho/cp_unsubscribe.go new file mode 100644 index 000000000..375b917c8 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_unsubscribe.go @@ -0,0 +1,31 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type ( + // Unsubscribe is a representation of an MQTT unsubscribe packet + Unsubscribe struct { + Topics []string + Properties *UnsubscribeProperties + } + + // UnsubscribeProperties is a struct of the properties that can be set + // for a Unsubscribe packet + UnsubscribeProperties struct { + User UserProperties + } +) + +// Packet returns a packets library Unsubscribe from the paho Unsubscribe +// on which it is called +func (u *Unsubscribe) Packet() *packets.Unsubscribe { + v := &packets.Unsubscribe{Topics: u.Topics} + + if u.Properties != nil { + v.Properties = &packets.Properties{ + User: u.Properties.User.ToPacketProperties(), + } + } + + return v +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/cp_utils.go b/vendor/github.com/eclipse/paho.golang/paho/cp_utils.go new file mode 100644 index 000000000..2d7995f5c --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/cp_utils.go @@ -0,0 +1,100 @@ +package paho + +import ( + "github.com/eclipse/paho.golang/packets" +) + +// UserProperty is a struct for the user provided values +// permitted in the properties section +type UserProperty struct { + Key, Value string +} + +// UserProperties is a slice of UserProperty +type UserProperties []UserProperty + +// Add is a helper function for easily adding a new user property +func (u *UserProperties) Add(key, value string) *UserProperties { + *u = append(*u, UserProperty{key, value}) + + return u +} + +// Get returns the first entry in the UserProperties that matches +// key, or an empty string if the key is not found. Note that it is +// permitted to have multiple entries with the same key, use GetAll +// if it is expected to have multiple matches +func (u UserProperties) Get(key string) string { + for _, v := range u { + if v.Key == key { + return v.Value + } + } + + return "" +} + +// GetAll returns a slice of all entries in the UserProperties +// that match key, or a nil slice if none were found. +func (u UserProperties) GetAll(key string) []string { + var ret []string + for _, v := range u { + if v.Key == key { + ret = append(ret, v.Value) + } + } + + return ret +} + +// ToPacketProperties converts a UserProperties to a slice +// of packets.User which is used internally in the packets +// library for user properties +func (u UserProperties) ToPacketProperties() []packets.User { + ret := make([]packets.User, len(u)) + for i, v := range u { + ret[i] = packets.User{Key: v.Key, Value: v.Value} + } + + return ret +} + +// UserPropertiesFromPacketUser converts a slice of packets.User +// to an instance of UserProperties for easier consumption within +// the client library +func UserPropertiesFromPacketUser(up []packets.User) UserProperties { + ret := make(UserProperties, len(up)) + for i, v := range up { + ret[i] = UserProperty{v.Key, v.Value} + } + + return ret +} + +// Byte is a helper function that take a byte and returns +// a pointer to a byte of that value +func Byte(b byte) *byte { + return &b +} + +// Uint32 is a helper function that take a uint32 and returns +// a pointer to a uint32 of that value +func Uint32(u uint32) *uint32 { + return &u +} + +// Uint16 is a helper function that take a uint16 and returns +// a pointer to a uint16 of that value +func Uint16(u uint16) *uint16 { + return &u +} + +// BoolToByte is a helper function that take a bool and returns +// a pointer to a byte of value 1 if true or 0 if false +func BoolToByte(b bool) *byte { + var v byte + if b { + v = 1 + } + return &v +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/message_ids.go b/vendor/github.com/eclipse/paho.golang/paho/message_ids.go new file mode 100644 index 000000000..58b03e324 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/message_ids.go @@ -0,0 +1,93 @@ +package paho + +import ( + "context" + "errors" + "sync" + + "github.com/eclipse/paho.golang/packets" +) + +const ( + midMin uint16 = 1 + midMax uint16 = 65535 +) + +// ErrorMidsExhausted is returned from Request() when there are no +// free message ids to be used. +var ErrorMidsExhausted = errors.New("all message ids in use") + +// MIDService defines the interface for a struct that handles the +// relationship between message ids and CPContexts +// Request() takes a *CPContext and returns a uint16 that is the +// messageid that should be used by the code that called Request() +// Get() takes a uint16 that is a messageid and returns the matching +// *CPContext that the MIDService has associated with that messageid +// Free() takes a uint16 that is a messageid and instructs the MIDService +// to mark that messageid as available for reuse +// Clear() resets the internal state of the MIDService +type MIDService interface { + Request(*CPContext) (uint16, error) + Get(uint16) *CPContext + Free(uint16) + Clear() +} + +// CPContext is the struct that is used to return responses to +// ControlPackets that have them, eg: the suback to a subscribe. +// The response packet is send down the Return channel and the +// Context is used to track timeouts. +type CPContext struct { + Context context.Context + Return chan packets.ControlPacket +} + +// MIDs is the default MIDService provided by this library. +// It uses a map of uint16 to *CPContext to track responses +// to messages with a messageid +type MIDs struct { + sync.Mutex + lastMid uint16 + index []*CPContext +} + +// Request is the library provided MIDService's implementation of +// the required interface function() +func (m *MIDs) Request(c *CPContext) (uint16, error) { + m.Lock() + defer m.Unlock() + for i := uint16(1); i < midMax; i++ { + v := (m.lastMid + i) % midMax + if v == 0 { + continue + } + if inuse := m.index[v]; inuse == nil { + m.index[v] = c + m.lastMid = v + return v, nil + } + } + return 0, ErrorMidsExhausted +} + +// Get is the library provided MIDService's implementation of +// the required interface function() +func (m *MIDs) Get(i uint16) *CPContext { + m.Lock() + defer m.Unlock() + return m.index[i] +} + +// Free is the library provided MIDService's implementation of +// the required interface function() +func (m *MIDs) Free(i uint16) { + m.Lock() + m.index[i] = nil + m.Unlock() +} + +// Clear is the library provided MIDService's implementation of +// the required interface function() +func (m *MIDs) Clear() { + m.index = make([]*CPContext, int(midMax)) +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/noop_persistence.go b/vendor/github.com/eclipse/paho.golang/paho/noop_persistence.go new file mode 100644 index 000000000..d2d15704f --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/noop_persistence.go @@ -0,0 +1,23 @@ +package paho + +import "github.com/eclipse/paho.golang/packets" + +type noopPersistence struct{} + +func (n *noopPersistence) Open() {} + +func (n *noopPersistence) Put(id uint16, cp packets.ControlPacket) {} + +func (n *noopPersistence) Get(id uint16) packets.ControlPacket { + return packets.ControlPacket{} +} + +func (n *noopPersistence) All() []packets.ControlPacket { + return nil +} + +func (n *noopPersistence) Delete(id uint16) {} + +func (n *noopPersistence) Close() {} + +func (n *noopPersistence) Reset() {} diff --git a/vendor/github.com/eclipse/paho.golang/paho/persistence.go b/vendor/github.com/eclipse/paho.golang/paho/persistence.go new file mode 100644 index 000000000..f02b846cc --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/persistence.go @@ -0,0 +1,98 @@ +package paho + +import ( + "sync" + + "github.com/eclipse/paho.golang/packets" +) + +// Persistence is an interface of the functions for a struct +// that is used to persist ControlPackets. +// Open() is an initialiser to prepare the Persistence for use +// Put() takes a uint16 which is a messageid and a ControlPacket +// to persist against that messageid +// Get() takes a uint16 which is a messageid and returns the +// persisted ControlPacket from the Persistence for that messageid +// All() returns a slice of all ControlPackets persisted +// Delete() takes a uint16 which is a messageid and deletes the +// associated stored ControlPacket from the Persistence +// Close() closes the Persistence +// Reset() clears the Persistence and prepares it to be reused +type Persistence interface { + Open() + Put(uint16, packets.ControlPacket) + Get(uint16) packets.ControlPacket + All() []packets.ControlPacket + Delete(uint16) + Close() + Reset() +} + +// MemoryPersistence is an implementation of a Persistence +// that stores the ControlPackets in memory using a map +type MemoryPersistence struct { + sync.RWMutex + packets map[uint16]packets.ControlPacket +} + +// Open is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Open() { + m.Lock() + m.packets = make(map[uint16]packets.ControlPacket) + m.Unlock() +} + +// Put is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Put(id uint16, cp packets.ControlPacket) { + m.Lock() + m.packets[id] = cp + m.Unlock() +} + +// Get is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Get(id uint16) packets.ControlPacket { + m.RLock() + defer m.RUnlock() + return m.packets[id] +} + +// All is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) All() []packets.ControlPacket { + m.Lock() + defer m.RUnlock() + ret := make([]packets.ControlPacket, len(m.packets)) + + for _, cp := range m.packets { + ret = append(ret, cp) + } + + return ret +} + +// Delete is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Delete(id uint16) { + m.Lock() + delete(m.packets, id) + m.Unlock() +} + +// Close is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Close() { + m.Lock() + m.packets = nil + m.Unlock() +} + +// Reset is the library provided MemoryPersistence's implementation of +// the required interface function() +func (m *MemoryPersistence) Reset() { + m.Lock() + m.packets = make(map[uint16]packets.ControlPacket) + m.Unlock() +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/pinger.go b/vendor/github.com/eclipse/paho.golang/paho/pinger.go new file mode 100644 index 000000000..e135d25ac --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/pinger.go @@ -0,0 +1,122 @@ +package paho + +import ( + "fmt" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/eclipse/paho.golang/packets" +) + +// PingFailHandler is a type for the function that is invoked +// when we have sent a Pingreq to the server and not received +// a Pingresp within 1.5x our pingtimeout +type PingFailHandler func(error) + +// Pinger is an interface of the functions for a struct that is +// used to manage sending PingRequests and responding to +// PingResponses +// Start() takes a net.Conn which is a connection over which an +// MQTT session has already been established, and a time.Duration +// of the keepalive setting passed to the server when the MQTT +// session was established. +// Stop() is used to stop the Pinger +// PingResp() is the function that is called by the Client when +// a PingResponse is received +// SetDebug() is used to pass in a Logger to be used to log debug +// information, for example sharing a logger with the main client +type Pinger interface { + Start(net.Conn, time.Duration) + Stop() + PingResp() + SetDebug(Logger) +} + +// PingHandler is the library provided default Pinger +type PingHandler struct { + mu sync.Mutex + lastPing time.Time + conn net.Conn + stop chan struct{} + pingFailHandler PingFailHandler + pingOutstanding int32 + debug Logger +} + +// DefaultPingerWithCustomFailHandler returns an instance of the +// default Pinger but with a custom PingFailHandler that is called +// when the client has not received a response to a PingRequest +// within the appropriate amount of time +func DefaultPingerWithCustomFailHandler(pfh PingFailHandler) *PingHandler { + return &PingHandler{ + pingFailHandler: pfh, + debug: NOOPLogger{}, + } +} + +// Start is the library provided Pinger's implementation of +// the required interface function() +func (p *PingHandler) Start(c net.Conn, pt time.Duration) { + p.mu.Lock() + p.conn = c + p.stop = make(chan struct{}) + p.mu.Unlock() + checkTicker := time.NewTicker(pt / 4) + defer checkTicker.Stop() + for { + select { + case <-p.stop: + return + case <-checkTicker.C: + if atomic.LoadInt32(&p.pingOutstanding) > 0 && time.Since(p.lastPing) > (pt+pt>>1) { + p.pingFailHandler(fmt.Errorf("ping resp timed out")) + //ping outstanding and not reset in 1.5 times ping timer + return + } + if time.Since(p.lastPing) >= pt { + //time to send a ping + if _, err := packets.NewControlPacket(packets.PINGREQ).WriteTo(p.conn); err != nil { + if p.pingFailHandler != nil { + p.pingFailHandler(err) + } + return + } + atomic.AddInt32(&p.pingOutstanding, 1) + p.lastPing = time.Now() + p.debug.Println("pingHandler sending ping request") + } + } + } +} + +// Stop is the library provided Pinger's implementation of +// the required interface function() +func (p *PingHandler) Stop() { + p.mu.Lock() + defer p.mu.Unlock() + if p.stop == nil { + return + } + p.debug.Println("pingHandler stopping") + select { + case <-p.stop: + //Already stopped, do nothing + default: + close(p.stop) + } +} + +// PingResp is the library provided Pinger's implementation of +// the required interface function() +func (p *PingHandler) PingResp() { + p.debug.Println("pingHandler resetting pingOutstanding") + atomic.StoreInt32(&p.pingOutstanding, 0) +} + +// SetDebug sets the logger l to be used for printing debug +// information for the pinger +func (p *PingHandler) SetDebug(l Logger) { + p.debug = l +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/router.go b/vendor/github.com/eclipse/paho.golang/paho/router.go new file mode 100644 index 000000000..05031596f --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/router.go @@ -0,0 +1,212 @@ +package paho + +import ( + "strings" + "sync" + + "github.com/eclipse/paho.golang/packets" +) + +// MessageHandler is a type for a function that is invoked +// by a Router when it has received a Publish. +type MessageHandler func(*Publish) + +// Router is an interface of the functions for a struct that is +// used to handle invoking MessageHandlers depending on the +// the topic the message was published on. +// RegisterHandler() takes a string of the topic, and a MessageHandler +// to be invoked when Publishes are received that match that topic +// UnregisterHandler() takes a string of the topic to remove +// MessageHandlers for +// Route() takes a Publish message and determines which MessageHandlers +// should be invoked +type Router interface { + RegisterHandler(string, MessageHandler) + UnregisterHandler(string) + Route(*packets.Publish) + SetDebugLogger(Logger) +} + +// StandardRouter is a library provided implementation of a Router that +// allows for unique and multiple MessageHandlers per topic +type StandardRouter struct { + sync.RWMutex + subscriptions map[string][]MessageHandler + aliases map[uint16]string + debug Logger +} + +// NewStandardRouter instantiates and returns an instance of a StandardRouter +func NewStandardRouter() *StandardRouter { + return &StandardRouter{ + subscriptions: make(map[string][]MessageHandler), + aliases: make(map[uint16]string), + debug: NOOPLogger{}, + } +} + +// RegisterHandler is the library provided StandardRouter's +// implementation of the required interface function() +func (r *StandardRouter) RegisterHandler(topic string, h MessageHandler) { + r.debug.Println("registering handler for:", topic) + r.Lock() + defer r.Unlock() + + r.subscriptions[topic] = append(r.subscriptions[topic], h) +} + +// UnregisterHandler is the library provided StandardRouter's +// implementation of the required interface function() +func (r *StandardRouter) UnregisterHandler(topic string) { + r.debug.Println("unregistering handler for:", topic) + r.Lock() + defer r.Unlock() + + delete(r.subscriptions, topic) +} + +// Route is the library provided StandardRouter's implementation +// of the required interface function() +func (r *StandardRouter) Route(pb *packets.Publish) { + r.debug.Println("routing message for:", pb.Topic) + r.RLock() + defer r.RUnlock() + + m := PublishFromPacketPublish(pb) + + var topic string + if pb.Properties.TopicAlias != nil { + r.debug.Println("message is using topic aliasing") + if pb.Topic != "" { + //Register new alias + r.debug.Printf("registering new topic alias '%d' for topic '%s'", *pb.Properties.TopicAlias, m.Topic) + r.aliases[*pb.Properties.TopicAlias] = pb.Topic + } + if t, ok := r.aliases[*pb.Properties.TopicAlias]; ok { + r.debug.Printf("aliased topic '%d' translates to '%s'", *pb.Properties.TopicAlias, m.Topic) + topic = t + } + } else { + topic = m.Topic + } + + for route, handlers := range r.subscriptions { + if match(route, topic) { + r.debug.Println("found handler for:", route) + for _, handler := range handlers { + handler(m) + } + } + } +} + +// SetDebugLogger sets the logger l to be used for printing debug +// information for the router +func (r *StandardRouter) SetDebugLogger(l Logger) { + r.debug = l +} + +func match(route, topic string) bool { + return route == topic || routeIncludesTopic(route, topic) +} + +func matchDeep(route []string, topic []string) bool { + if len(route) == 0 { + return len(topic) == 0 + } + + if len(topic) == 0 { + return route[0] == "#" + } + + if route[0] == "#" { + return true + } + + if (route[0] == "+") || (route[0] == topic[0]) { + return matchDeep(route[1:], topic[1:]) + } + return false +} + +func routeIncludesTopic(route, topic string) bool { + return matchDeep(routeSplit(route), topicSplit(topic)) +} + +func routeSplit(route string) []string { + if len(route) == 0 { + return nil + } + var result []string + if strings.HasPrefix(route, "$share") { + result = strings.Split(route, "/")[2:] + } else { + result = strings.Split(route, "/") + } + return result +} + +func topicSplit(topic string) []string { + if len(topic) == 0 { + return nil + } + return strings.Split(topic, "/") +} + +// SingleHandlerRouter is a library provided implementation of a Router +// that stores only a single MessageHandler and invokes this MessageHandler +// for all received Publishes +type SingleHandlerRouter struct { + sync.Mutex + aliases map[uint16]string + handler MessageHandler + debug Logger +} + +// NewSingleHandlerRouter instantiates and returns an instance of a SingleHandlerRouter +func NewSingleHandlerRouter(h MessageHandler) *SingleHandlerRouter { + return &SingleHandlerRouter{ + aliases: make(map[uint16]string), + handler: h, + debug: NOOPLogger{}, + } +} + +// RegisterHandler is the library provided SingleHandlerRouter's +// implementation of the required interface function() +func (s *SingleHandlerRouter) RegisterHandler(topic string, h MessageHandler) { + s.debug.Println("registering handler for:", topic) + s.handler = h +} + +// UnregisterHandler is the library provided SingleHandlerRouter's +// implementation of the required interface function() +func (s *SingleHandlerRouter) UnregisterHandler(topic string) {} + +// Route is the library provided SingleHandlerRouter's +// implementation of the required interface function() +func (s *SingleHandlerRouter) Route(pb *packets.Publish) { + m := PublishFromPacketPublish(pb) + + s.debug.Println("routing message for:", m.Topic) + + if pb.Properties.TopicAlias != nil { + s.debug.Println("message is using topic aliasing") + if pb.Topic != "" { + //Register new alias + s.debug.Printf("registering new topic alias '%d' for topic '%s'", *pb.Properties.TopicAlias, m.Topic) + s.aliases[*pb.Properties.TopicAlias] = pb.Topic + } + if t, ok := s.aliases[*pb.Properties.TopicAlias]; ok { + s.debug.Printf("aliased topic '%d' translates to '%s'", *pb.Properties.TopicAlias, m.Topic) + m.Topic = t + } + } + s.handler(m) +} + +// SetDebugLogger sets the logger l to be used for printing debug +// information for the router +func (s *SingleHandlerRouter) SetDebugLogger(l Logger) { + s.debug = l +} diff --git a/vendor/github.com/eclipse/paho.golang/paho/trace.go b/vendor/github.com/eclipse/paho.golang/paho/trace.go new file mode 100644 index 000000000..586c92398 --- /dev/null +++ b/vendor/github.com/eclipse/paho.golang/paho/trace.go @@ -0,0 +1,22 @@ +package paho + +type ( + // Logger interface allows implementations to provide to this package any + // object that implements the methods defined in it. + Logger interface { + Println(v ...interface{}) + Printf(format string, v ...interface{}) + } + + // NOOPLogger implements the logger that does not perform any operation + // by default. This allows us to efficiently discard the unwanted messages. + NOOPLogger struct{} +) + +// Println is the library provided NOOPLogger's +// implementation of the required interface function() +func (NOOPLogger) Println(v ...interface{}) {} + +// Printf is the library provided NOOPLogger's +// implementation of the required interface function(){} +func (NOOPLogger) Printf(format string, v ...interface{}) {} diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go new file mode 100644 index 000000000..30f632c57 --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -0,0 +1,136 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semaphore provides a weighted semaphore implementation. +package semaphore // import "golang.org/x/sync/semaphore" + +import ( + "container/list" + "context" + "sync" +) + +type waiter struct { + n int64 + ready chan<- struct{} // Closed when semaphore acquired. +} + +// NewWeighted creates a new weighted semaphore with the given +// maximum combined weight for concurrent access. +func NewWeighted(n int64) *Weighted { + w := &Weighted{size: n} + return w +} + +// Weighted provides a way to bound concurrent access to a resource. +// The callers can request access with a given weight. +type Weighted struct { + size int64 + cur int64 + mu sync.Mutex + waiters list.List +} + +// Acquire acquires the semaphore with a weight of n, blocking until resources +// are available or ctx is done. On success, returns nil. On failure, returns +// ctx.Err() and leaves the semaphore unchanged. +// +// If ctx is already done, Acquire may still succeed without blocking. +func (s *Weighted) Acquire(ctx context.Context, n int64) error { + s.mu.Lock() + if s.size-s.cur >= n && s.waiters.Len() == 0 { + s.cur += n + s.mu.Unlock() + return nil + } + + if n > s.size { + // Don't make other Acquire calls block on one that's doomed to fail. + s.mu.Unlock() + <-ctx.Done() + return ctx.Err() + } + + ready := make(chan struct{}) + w := waiter{n: n, ready: ready} + elem := s.waiters.PushBack(w) + s.mu.Unlock() + + select { + case <-ctx.Done(): + err := ctx.Err() + s.mu.Lock() + select { + case <-ready: + // Acquired the semaphore after we were canceled. Rather than trying to + // fix up the queue, just pretend we didn't notice the cancelation. + err = nil + default: + isFront := s.waiters.Front() == elem + s.waiters.Remove(elem) + // If we're at the front and there're extra tokens left, notify other waiters. + if isFront && s.size > s.cur { + s.notifyWaiters() + } + } + s.mu.Unlock() + return err + + case <-ready: + return nil + } +} + +// TryAcquire acquires the semaphore with a weight of n without blocking. +// On success, returns true. On failure, returns false and leaves the semaphore unchanged. +func (s *Weighted) TryAcquire(n int64) bool { + s.mu.Lock() + success := s.size-s.cur >= n && s.waiters.Len() == 0 + if success { + s.cur += n + } + s.mu.Unlock() + return success +} + +// Release releases the semaphore with a weight of n. +func (s *Weighted) Release(n int64) { + s.mu.Lock() + s.cur -= n + if s.cur < 0 { + s.mu.Unlock() + panic("semaphore: released more than held") + } + s.notifyWaiters() + s.mu.Unlock() +} + +func (s *Weighted) notifyWaiters() { + for { + next := s.waiters.Front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value.(waiter) + if s.size-s.cur < w.n { + // Not enough tokens for the next waiter. We could keep going (to try to + // find a waiter with a smaller request), but under load that could cause + // starvation for large requests; instead, we leave all remaining waiters + // blocked. + // + // Consider a semaphore used as a read-write lock, with N tokens, N + // readers, and one writer. Each reader can Acquire(1) to obtain a read + // lock. The writer can Acquire(N) to obtain a write lock, excluding all + // of the readers. If we allow the readers to jump ahead in the queue, + // the writer will starve — there is always one token available for every + // reader. + break + } + + s.cur += w.n + s.waiters.Remove(next) + close(w.ready) + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 5f47f8f34..b318f0bd1 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -32,6 +32,25 @@ github.com/cenkalti/backoff/v4 # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 +# github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20231030012137-0836a524e995 +## explicit; go 1.18 +github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 +# github.com/cloudevents/sdk-go/v2 v2.14.0 +## explicit; go 1.17 +github.com/cloudevents/sdk-go/v2 +github.com/cloudevents/sdk-go/v2/binding +github.com/cloudevents/sdk-go/v2/binding/format +github.com/cloudevents/sdk-go/v2/binding/spec +github.com/cloudevents/sdk-go/v2/client +github.com/cloudevents/sdk-go/v2/context +github.com/cloudevents/sdk-go/v2/event +github.com/cloudevents/sdk-go/v2/event/datacodec +github.com/cloudevents/sdk-go/v2/event/datacodec/json +github.com/cloudevents/sdk-go/v2/event/datacodec/text +github.com/cloudevents/sdk-go/v2/event/datacodec/xml +github.com/cloudevents/sdk-go/v2/protocol +github.com/cloudevents/sdk-go/v2/protocol/http +github.com/cloudevents/sdk-go/v2/types # github.com/coreos/go-semver v0.3.1 ## explicit; go 1.8 github.com/coreos/go-semver/semver @@ -45,6 +64,10 @@ github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew +# github.com/eclipse/paho.golang v0.11.0 +## explicit; go 1.15 +github.com/eclipse/paho.golang/packets +github.com/eclipse/paho.golang/paho # github.com/emicklei/go-restful/v3 v3.9.0 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 @@ -554,6 +577,7 @@ golang.org/x/oauth2 golang.org/x/oauth2/internal # golang.org/x/sync v0.5.0 ## explicit; go 1.18 +golang.org/x/sync/semaphore golang.org/x/sync/singleflight # golang.org/x/sys v0.13.0 ## explicit; go 1.17 @@ -1502,6 +1526,19 @@ open-cluster-management.io/api/client/work/informers/externalversions/work/v1 open-cluster-management.io/api/client/work/informers/externalversions/work/v1alpha1 open-cluster-management.io/api/client/work/listers/work/v1 open-cluster-management.io/api/client/work/listers/work/v1alpha1 +open-cluster-management.io/api/cloudevents/generic +open-cluster-management.io/api/cloudevents/generic/options +open-cluster-management.io/api/cloudevents/generic/options/mqtt +open-cluster-management.io/api/cloudevents/generic/payload +open-cluster-management.io/api/cloudevents/generic/types +open-cluster-management.io/api/cloudevents/work +open-cluster-management.io/api/cloudevents/work/agent/client +open-cluster-management.io/api/cloudevents/work/agent/codec +open-cluster-management.io/api/cloudevents/work/agent/handler +open-cluster-management.io/api/cloudevents/work/internal +open-cluster-management.io/api/cloudevents/work/payload +open-cluster-management.io/api/cloudevents/work/utils +open-cluster-management.io/api/cloudevents/work/watcher open-cluster-management.io/api/cluster/v1 open-cluster-management.io/api/cluster/v1alpha1 open-cluster-management.io/api/cluster/v1beta1 @@ -1509,8 +1546,10 @@ open-cluster-management.io/api/cluster/v1beta2 open-cluster-management.io/api/crdsv1beta1 open-cluster-management.io/api/feature open-cluster-management.io/api/operator/v1 +open-cluster-management.io/api/utils/work/v1/utils open-cluster-management.io/api/utils/work/v1/workapplier open-cluster-management.io/api/utils/work/v1/workbuilder +open-cluster-management.io/api/utils/work/v1/workvalidator open-cluster-management.io/api/work/v1 open-cluster-management.io/api/work/v1alpha1 # sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 diff --git a/vendor/open-cluster-management.io/api/cloudevents/generic/agentclient.go b/vendor/open-cluster-management.io/api/cloudevents/generic/agentclient.go new file mode 100644 index 000000000..95691d660 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/generic/agentclient.go @@ -0,0 +1,300 @@ +package generic + +import ( + "context" + "fmt" + "strconv" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + "k8s.io/klog/v2" + + "open-cluster-management.io/api/cloudevents/generic/options" + "open-cluster-management.io/api/cloudevents/generic/payload" + "open-cluster-management.io/api/cloudevents/generic/types" +) + +// CloudEventAgentClient is a client for an agent to resync/send/receive its resources with cloud events. +// +// An agent is a component that handles the deployment of requested resources on the managed cluster and status report +// to the source. +type CloudEventAgentClient[T ResourceObject] struct { + *baseClient + lister Lister[T] + codecs map[types.CloudEventsDataType]Codec[T] + statusHashGetter StatusHashGetter[T] + agentID string + clusterName string +} + +// NewCloudEventAgentClient returns an instance for CloudEventAgentClient. The following arguments are required to +// create a client. +// - agentOptions provides the clusterName and agentID and the cloudevents clients that are based on different event +// protocols for sending/receiving the cloudevents. +// - lister gets the resources from a cache/store of an agent. +// - statusHashGetter calculates the resource status hash. +// - codecs is list of codecs for encoding/decoding a resource objet/cloudevent to/from a cloudevent/resource objet. +func NewCloudEventAgentClient[T ResourceObject]( + ctx context.Context, + agentOptions *options.CloudEventsAgentOptions, + lister Lister[T], + statusHashGetter StatusHashGetter[T], + codecs ...Codec[T], +) (*CloudEventAgentClient[T], error) { + baseClient := &baseClient{ + cloudEventsOptions: agentOptions.CloudEventsOptions, + cloudEventsRateLimiter: NewRateLimiter(agentOptions.EventRateLimit), + } + + if err := baseClient.connect(ctx); err != nil { + return nil, err + } + + evtCodes := make(map[types.CloudEventsDataType]Codec[T]) + for _, codec := range codecs { + evtCodes[codec.EventDataType()] = codec + } + + return &CloudEventAgentClient[T]{ + baseClient: baseClient, + lister: lister, + codecs: evtCodes, + statusHashGetter: statusHashGetter, + agentID: agentOptions.AgentID, + clusterName: agentOptions.ClusterName, + }, nil +} + +// Resync the resources spec by sending a spec resync request from an agent to all sources. +func (c *CloudEventAgentClient[T]) Resync(ctx context.Context) error { + // list the resource objects that are maintained by the current agent from all sources + objs, err := c.lister.List(types.ListOptions{ClusterName: c.clusterName, Source: types.SourceAll}) + if err != nil { + return err + } + + resources := &payload.ResourceVersionList{Versions: make([]payload.ResourceVersion, len(objs))} + for i, obj := range objs { + resourceVersion, err := strconv.ParseInt(obj.GetResourceVersion(), 10, 64) + if err != nil { + return err + } + + resources.Versions[i] = payload.ResourceVersion{ + ResourceID: string(obj.GetUID()), + ResourceVersion: resourceVersion, + } + } + + // only resync the resources whose event data type is registered + for eventDataType := range c.codecs { + eventType := types.CloudEventsType{ + CloudEventsDataType: eventDataType, + SubResource: types.SubResourceSpec, + Action: types.ResyncRequestAction, + } + + evt := types.NewEventBuilder(c.agentID, eventType).WithClusterName(c.clusterName).NewEvent() + if err := evt.SetData(cloudevents.ApplicationJSON, resources); err != nil { + return fmt.Errorf("failed to set data to cloud event: %v", err) + } + + if err := c.publish(ctx, evt); err != nil { + return err + } + } + + return nil +} + +// Publish a resource status from an agent to a source. +func (c *CloudEventAgentClient[T]) Publish(ctx context.Context, eventType types.CloudEventsType, obj T) error { + codec, ok := c.codecs[eventType.CloudEventsDataType] + if !ok { + return fmt.Errorf("failed to find a codec for event %s", eventType.CloudEventsDataType) + } + + if eventType.SubResource != types.SubResourceStatus { + return fmt.Errorf("unsupported event eventType %s", eventType) + } + + evt, err := codec.Encode(c.agentID, eventType, obj) + if err != nil { + return err + } + + if err := c.publish(ctx, *evt); err != nil { + return err + } + + return nil +} + +// Subscribe the events that are from the source status resync request or source resource spec request. +// For status resync request, agent publish the current resources status back as response. +// For resource spec request, agent receives resource spec and handles the spec with resource handlers. +func (c *CloudEventAgentClient[T]) Subscribe(ctx context.Context, handlers ...ResourceHandler[T]) { + c.subscribe(ctx, func(ctx context.Context, evt cloudevents.Event) { + c.receive(ctx, evt, handlers...) + }) +} + +func (c *CloudEventAgentClient[T]) receive(ctx context.Context, evt cloudevents.Event, handlers ...ResourceHandler[T]) { + klog.V(4).Infof("Received event:\n%s", evt) + + eventType, err := types.ParseCloudEventsType(evt.Type()) + if err != nil { + klog.Errorf("failed to parse cloud event type %s, %v", evt.Type(), err) + return + } + + if eventType.Action == types.ResyncRequestAction { + if eventType.SubResource != types.SubResourceStatus { + klog.Warningf("unsupported resync event type %s, ignore", eventType) + return + } + + if err := c.respondResyncStatusRequest(ctx, eventType.CloudEventsDataType, evt); err != nil { + klog.Errorf("failed to resync manifestsstatus, %v", err) + } + + return + } + + if eventType.SubResource != types.SubResourceSpec { + klog.Warningf("unsupported event type %s, ignore", eventType) + return + } + + codec, ok := c.codecs[eventType.CloudEventsDataType] + if !ok { + klog.Warningf("failed to find the codec for event %s, ignore", eventType.CloudEventsDataType) + return + } + + obj, err := codec.Decode(&evt) + if err != nil { + klog.Errorf("failed to decode spec, %v", err) + return + } + + action, err := c.specAction(evt.Source(), obj) + if err != nil { + klog.Errorf("failed to generate spec action %s, %v", evt, err) + return + } + + if len(action) == 0 { + // no action is required, ignore + return + } + + for _, handler := range handlers { + if err := handler(action, obj); err != nil { + klog.Errorf("failed to handle spec event %s, %v", evt, err) + } + } +} + +// Upon receiving the status resync event, the agent responds by sending resource status events to the broker as +// follows: +// - If the event payload is empty, the agent returns the status of all resources it maintains. +// - If the event payload is not empty, the agent retrieves the resource with the specified ID and compares the +// received resource status hash with the current resource status hash. If they are not equal, the agent sends the +// resource status message. +func (c *CloudEventAgentClient[T]) respondResyncStatusRequest( + ctx context.Context, eventDataType types.CloudEventsDataType, evt cloudevents.Event) error { + objs, err := c.lister.List(types.ListOptions{ClusterName: c.clusterName, Source: evt.Source()}) + if err != nil { + return err + } + + statusHashes, err := payload.DecodeStatusResyncRequest(evt) + if err != nil { + return err + } + + eventType := types.CloudEventsType{ + CloudEventsDataType: eventDataType, + SubResource: types.SubResourceStatus, + Action: types.ResyncResponseAction, + } + + if len(statusHashes.Hashes) == 0 { + // publish all resources status + for _, obj := range objs { + if err := c.Publish(ctx, eventType, obj); err != nil { + return err + } + } + + return nil + } + + for _, obj := range objs { + lastHash, ok := findStatusHash(string(obj.GetUID()), statusHashes.Hashes) + if !ok { + // ignore the resource that is not on the source, but exists on the agent, wait for the source deleting it + klog.Infof("The resource %s is not found from the source, ignore", obj.GetUID()) + continue + } + + currentHash, err := c.statusHashGetter(obj) + if err != nil { + continue + } + + if currentHash == lastHash { + // the status is not changed, do nothing + continue + } + + if err := c.Publish(ctx, eventType, obj); err != nil { + return err + } + } + + return nil +} + +func (c *CloudEventAgentClient[T]) specAction(source string, obj T) (evt types.ResourceAction, err error) { + objs, err := c.lister.List(types.ListOptions{ClusterName: c.clusterName, Source: source}) + if err != nil { + return evt, err + } + + lastObj, exists := getObj(string(obj.GetUID()), objs) + if !exists { + return types.Added, nil + } + + if !obj.GetDeletionTimestamp().IsZero() { + return types.Deleted, nil + } + + if obj.GetResourceVersion() == lastObj.GetResourceVersion() { + return evt, nil + } + + return types.Modified, nil +} + +func getObj[T ResourceObject](resourceID string, objs []T) (obj T, exists bool) { + for _, obj := range objs { + if string(obj.GetUID()) == resourceID { + return obj, true + } + } + + return obj, false +} + +func findStatusHash(id string, hashes []payload.ResourceStatusHash) (string, bool) { + for _, hash := range hashes { + if id == hash.ResourceID { + return hash.StatusHash, true + } + } + + return "", false +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/generic/baseclient.go b/vendor/open-cluster-management.io/api/cloudevents/generic/baseclient.go new file mode 100644 index 000000000..be9e6b7fd --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/generic/baseclient.go @@ -0,0 +1,209 @@ +package generic + +import ( + "context" + "fmt" + "sync" + "time" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog/v2" + "k8s.io/utils/clock" + + "open-cluster-management.io/api/cloudevents/generic/options" +) + +const ( + restartReceiverSignal = iota + stopReceiverSignal +) + +type receiveFn func(ctx context.Context, evt cloudevents.Event) + +type baseClient struct { + sync.RWMutex + cloudEventsOptions options.CloudEventsOptions + cloudEventsClient cloudevents.Client + cloudEventsRateLimiter flowcontrol.RateLimiter + receiverChan chan int +} + +func (c *baseClient) connect(ctx context.Context) error { + var err error + c.cloudEventsClient, err = c.cloudEventsOptions.Client(ctx) + if err != nil { + return err + } + + // start a go routine to handle cloudevents client connection errors + go func() { + var err error + + // the reconnect backoff will stop at [1,5) min interval. If we don't backoff for 10min, we reset the backoff. + connBackoffManager := wait.NewExponentialBackoffManager(5*time.Second, 1*time.Minute, 10*time.Minute, 5.0, 1.0, &clock.RealClock{}) + cloudEventsClient := c.cloudEventsClient + + for { + if cloudEventsClient == nil { + klog.V(4).Infof("reconnecting the cloudevents client") + cloudEventsClient, err = c.cloudEventsOptions.Client(ctx) + // TODO enhance the cloudevents SKD to avoid wrapping the error type to distinguish the net connection + // errors + if err != nil { + // failed to reconnect, try agin + runtime.HandleError(fmt.Errorf("the cloudevents client reconnect failed, %v", err)) + <-connBackoffManager.Backoff().C() + continue + } + + // the cloudevents network connection is back, refresh the current cloudevents client and send the + // receiver restart signal + klog.V(4).Infof("the cloudevents client is reconnected") + c.resetClient(cloudEventsClient) + c.sendReceiverSignal(restartReceiverSignal) + } + + select { + case <-ctx.Done(): + return + case err, ok := <-c.cloudEventsOptions.ErrorChan(): + if !ok { + // error channel is closed, do nothing + return + } + + runtime.HandleError(fmt.Errorf("the cloudevents client is disconnected, %v", err)) + + // the cloudevents client network connection is closed, send the receiver stop signal, set the current + // client to nil and retry + c.sendReceiverSignal(stopReceiverSignal) + + cloudEventsClient = nil + c.resetClient(cloudEventsClient) + + <-connBackoffManager.Backoff().C() + } + } + }() + + return nil +} + +func (c *baseClient) publish(ctx context.Context, evt cloudevents.Event) error { + now := time.Now() + + if err := c.cloudEventsRateLimiter.Wait(ctx); err != nil { + return fmt.Errorf("client rate limiter Wait returned an error: %w", err) + } + + latency := time.Since(now) + if latency > longThrottleLatency { + klog.Warningf(fmt.Sprintf("Waited for %v due to client-side throttling, not priority and fairness, request: %s", + latency, evt)) + } + + sendingCtx, err := c.cloudEventsOptions.WithContext(ctx, evt.Context) + if err != nil { + return err + } + + klog.V(4).Infof("Sent event: %v\n%s", ctx, evt) + + // make sure the current client is the newest + c.RLock() + defer c.RUnlock() + + if c.cloudEventsClient == nil { + return fmt.Errorf("the cloudevents client is not ready") + } + + if result := c.cloudEventsClient.Send(sendingCtx, evt); cloudevents.IsUndelivered(result) { + return fmt.Errorf("failed to send event %s, %v", evt, result) + } + + return nil +} + +func (c *baseClient) subscribe(ctx context.Context, receive receiveFn) { + c.Lock() + defer c.Unlock() + + // make sure there is only one subscription go routine starting for one client. + if c.receiverChan != nil { + klog.Warningf("the subscription has already started") + return + } + + c.receiverChan = make(chan int) + + // start a go routine to handle cloudevents subscription + go func() { + receiverCtx, receiverCancel := context.WithCancel(context.TODO()) + cloudEventsClient := c.cloudEventsClient + + for { + if cloudEventsClient != nil { + // TODO send a resync request + + go func() { + if err := cloudEventsClient.StartReceiver(receiverCtx, func(evt cloudevents.Event) { + receive(receiverCtx, evt) + }); err != nil { + runtime.HandleError(fmt.Errorf("failed to receive cloudevents, %v", err)) + } + }() + } + + select { + case <-ctx.Done(): + receiverCancel() + close(c.receiverChan) + return + case signal, ok := <-c.receiverChan: + if !ok { + // receiver channel is closed, stop the receiver + receiverCancel() + return + } + + switch signal { + case restartReceiverSignal: + klog.V(4).Infof("restart the cloudevents receiver") + // make sure the current client is the newest + c.RLock() + cloudEventsClient = c.cloudEventsClient + c.RUnlock() + + // rebuild the receiver context + receiverCtx, receiverCancel = context.WithCancel(context.TODO()) + case stopReceiverSignal: + klog.V(4).Infof("stop the cloudevents receiver") + receiverCancel() + cloudEventsClient = nil + default: + runtime.HandleError(fmt.Errorf("unknown receiver signal %d", signal)) + } + } + } + }() +} + +func (c *baseClient) resetClient(client cloudevents.Client) { + c.Lock() + defer c.Unlock() + + c.cloudEventsClient = client +} + +func (c *baseClient) sendReceiverSignal(signal int) { + c.RLock() + defer c.RUnlock() + + if c.receiverChan != nil { + c.receiverChan <- signal + } +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/generic/interface.go b/vendor/open-cluster-management.io/api/cloudevents/generic/interface.go new file mode 100644 index 000000000..484b99255 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/generic/interface.go @@ -0,0 +1,65 @@ +package generic + +import ( + "context" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubetypes "k8s.io/apimachinery/pkg/types" + + "open-cluster-management.io/api/cloudevents/generic/types" +) + +// ResourceHandler handles the received resource object. +type ResourceHandler[T ResourceObject] func(action types.ResourceAction, obj T) error + +// StatusHashGetter gets the status hash of one resource object. +type StatusHashGetter[T ResourceObject] func(obj T) (string, error) + +type ResourceObject interface { + // GetUID returns the resource ID of this object. The resource ID represents the unique identifier for this object. + // The source should ensure its uniqueness and consistency. + GetUID() kubetypes.UID + + // GetResourceVersion returns the resource version of this object. The resource version is a required int64 sequence + // number property that must be incremented by the source whenever this resource changes. + // The source should guarantee its incremental nature. + GetResourceVersion() string + + // GetDeletionTimestamp returns the deletion timestamp of this object. The deletiontimestamp is an optional + // timestamp property representing the resource is deleting from the source, the agent needs to clean up the + // resource from its cluster. + GetDeletionTimestamp() *metav1.Time +} + +type Lister[T ResourceObject] interface { + // List returns the list of resource objects that are maintained by source/agent. + List(options types.ListOptions) ([]T, error) +} + +type Codec[T ResourceObject] interface { + // EventDataType indicates which type of the event data the codec is used for. + EventDataType() types.CloudEventsDataType + + // Encode a resource object to cloudevents event. + // Each event should have the following extensions: `resourceid`, `resourceversion` and `clustername`. + // The source set the `deletiontimestamp` extension to indicate one resource object is deleting from a source. + // The agent set the `originalsource` extension to indicate one resource belonged to which source. + Encode(source string, eventType types.CloudEventsType, obj T) (*cloudevents.Event, error) + + // Decode a cloudevents event to a resource object. + Decode(event *cloudevents.Event) (T, error) +} + +type CloudEventsClient[T ResourceObject] interface { + // Resync the resources of one source/agent by sending resync request. + Resync(ctx context.Context) error + + // Publish the resources spec/status event to the broker. + Publish(ctx context.Context, eventType types.CloudEventsType, obj T) error + + // Subscribe the resources status/spec event to the broker to receive the resources status/spec and use + // ResourceHandler to handle them. + Subscribe(ctx context.Context, handlers ...ResourceHandler[T]) +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/generic/options/mqtt/agentoptions.go b/vendor/open-cluster-management.io/api/cloudevents/generic/options/mqtt/agentoptions.go new file mode 100644 index 000000000..3f9361870 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/generic/options/mqtt/agentoptions.go @@ -0,0 +1,87 @@ +package mqtt + +import ( + "context" + "fmt" + "strings" + + cloudeventsmqtt "github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2" + cloudevents "github.com/cloudevents/sdk-go/v2" + cloudeventscontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/eclipse/paho.golang/paho" + + "open-cluster-management.io/api/cloudevents/generic/options" + "open-cluster-management.io/api/cloudevents/generic/types" +) + +type mqttAgentOptions struct { + MQTTOptions + errorChan chan error + clusterName string + agentID string +} + +func NewAgentOptions(mqttOptions *MQTTOptions, clusterName, agentID string) *options.CloudEventsAgentOptions { + return &options.CloudEventsAgentOptions{ + CloudEventsOptions: &mqttAgentOptions{ + MQTTOptions: *mqttOptions, + errorChan: make(chan error), + clusterName: clusterName, + agentID: agentID, + }, + AgentID: agentID, + ClusterName: clusterName, + } +} + +func (o *mqttAgentOptions) WithContext(ctx context.Context, evtCtx cloudevents.EventContext) (context.Context, error) { + eventType, err := types.ParseCloudEventsType(evtCtx.GetType()) + if err != nil { + return nil, fmt.Errorf("unsupported event type %s, %v", eventType, err) + } + + if eventType.Action == types.ResyncRequestAction { + // agent publishes event to spec resync topic to request to get resources spec from all sources + topic := strings.Replace(SpecResyncTopic, "+", o.clusterName, -1) + return cloudeventscontext.WithTopic(ctx, topic), nil + } + + // agent publishes event to status topic to send the resource status from a specified cluster + originalSource, err := evtCtx.GetExtension(types.ExtensionOriginalSource) + if err != nil { + return nil, err + } + + statusTopic := strings.Replace(StatusTopic, "+", fmt.Sprintf("%s", originalSource), 1) + statusTopic = strings.Replace(statusTopic, "+", o.clusterName, -1) + return cloudeventscontext.WithTopic(ctx, statusTopic), nil +} + +func (o *mqttAgentOptions) Client(ctx context.Context) (cloudevents.Client, error) { + receiver, err := o.GetCloudEventsClient( + ctx, + fmt.Sprintf("%s-client", o.agentID), + func(err error) { + o.errorChan <- err + }, + cloudeventsmqtt.WithPublish(&paho.Publish{QoS: byte(o.PubQoS)}), + cloudeventsmqtt.WithSubscribe( + &paho.Subscribe{ + Subscriptions: map[string]paho.SubscribeOptions{ + // receiving the resources spec from sources with spec topic + replaceNth(SpecTopic, "+", o.clusterName, 2): {QoS: byte(o.SubQoS)}, + // receiving the resources status resync request from sources with status resync topic + StatusResyncTopic: {QoS: byte(o.SubQoS)}, + }, + }, + ), + ) + if err != nil { + return nil, err + } + return receiver, nil +} + +func (o *mqttAgentOptions) ErrorChan() <-chan error { + return o.errorChan +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/generic/options/mqtt/options.go b/vendor/open-cluster-management.io/api/cloudevents/generic/options/mqtt/options.go new file mode 100644 index 000000000..89992b00c --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/generic/options/mqtt/options.go @@ -0,0 +1,169 @@ +package mqtt + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "os" + "strings" + + cloudeventsmqtt "github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2" + cloudevents "github.com/cloudevents/sdk-go/v2" + "github.com/eclipse/paho.golang/packets" + "github.com/eclipse/paho.golang/paho" + "github.com/spf13/pflag" +) + +const ( + // SpecTopic is a MQTT topic for resource spec. + SpecTopic = "sources/+/clusters/+/spec" + + // StatusTopic is a MQTT topic for resource status. + StatusTopic = "sources/+/clusters/+/status" + + // SpecResyncTopic is a MQTT topic for resource spec resync. + SpecResyncTopic = "sources/clusters/+/specresync" + + // StatusResyncTopic is a MQTT topic for resource status resync. + StatusResyncTopic = "sources/+/clusters/statusresync" +) + +type MQTTOptions struct { + BrokerHost string + Username string + Password string + CAFile string + ClientCertFile string + ClientKeyFile string + KeepAlive uint16 + PubQoS int + SubQoS int +} + +func NewMQTTOptions() *MQTTOptions { + return &MQTTOptions{ + KeepAlive: 60, + PubQoS: 1, + SubQoS: 1, + } +} + +func (o *MQTTOptions) AddFlags(flags *pflag.FlagSet) { + flags.StringVar(&o.BrokerHost, "mqtt-broker-host", o.BrokerHost, "The host of MQTT broker") + flags.StringVar(&o.Username, "mqtt-username", o.Username, "The username to connect the MQTT broker") + flags.StringVar(&o.Password, "mqtt-password", o.Password, "The password to connect the MQTT broker") + flags.StringVar(&o.CAFile, "mqtt-broke-ca", o.CAFile, "A file containing trusted CA certificates MQTT broker") + flags.StringVar(&o.ClientCertFile, "mqtt-client-certificate", o.ClientCertFile, "The MQTT client certificate file") + flags.StringVar(&o.ClientKeyFile, "mqtt-client-key", o.ClientKeyFile, "The MQTT client private key file") + flags.Uint16Var(&o.KeepAlive, "mqtt-keep-alive", o.KeepAlive, "Keep alive in seconds for MQTT clients") + flags.IntVar(&o.SubQoS, "mqtt-sub-qos", o.SubQoS, "The OoS for subscribe") + flags.IntVar(&o.PubQoS, "mqtt-pub-qos", o.PubQoS, "The Qos for publish") +} + +func (o *MQTTOptions) GetNetConn() (net.Conn, error) { + if len(o.CAFile) != 0 { + certPool, err := x509.SystemCertPool() + if err != nil { + return nil, err + } + + caPEM, err := os.ReadFile(o.CAFile) + if err != nil { + return nil, err + } + + if ok := certPool.AppendCertsFromPEM(caPEM); !ok { + return nil, fmt.Errorf("invalid CA %s", o.CAFile) + } + + clientCerts, err := tls.LoadX509KeyPair(o.ClientCertFile, o.ClientKeyFile) + if err != nil { + return nil, err + } + + conn, err := tls.Dial("tcp", o.BrokerHost, &tls.Config{ + RootCAs: certPool, + Certificates: []tls.Certificate{clientCerts}, + }) + if err != nil { + return nil, fmt.Errorf("failed to connect to MQTT broker %s, %v", o.BrokerHost, err) + } + + // ensure parallel writes are thread-Safe + return packets.NewThreadSafeConn(conn), nil + } + + conn, err := net.Dial("tcp", o.BrokerHost) + if err != nil { + return nil, fmt.Errorf("failed to connect to MQTT broker %s, %v", o.BrokerHost, err) + } + + // ensure parallel writes are thread-Safe + return packets.NewThreadSafeConn(conn), nil +} + +func (o *MQTTOptions) GetMQTTConnectOption(clientID string) *paho.Connect { + connect := &paho.Connect{ + ClientID: clientID, + KeepAlive: o.KeepAlive, + CleanStart: true, + } + + if len(o.Username) != 0 { + connect.Username = o.Username + connect.UsernameFlag = true + } + + if len(o.Password) != 0 { + connect.Password = []byte(o.Password) + connect.PasswordFlag = true + } + + return connect +} + +func (o *MQTTOptions) GetCloudEventsClient( + ctx context.Context, + clientID string, + errorHandler func(error), + clientOpts ...cloudeventsmqtt.Option, +) (cloudevents.Client, error) { + netConn, err := o.GetNetConn() + if err != nil { + return nil, err + } + + config := &paho.ClientConfig{ + ClientID: clientID, + Conn: netConn, + OnClientError: errorHandler, + } + + opts := []cloudeventsmqtt.Option{cloudeventsmqtt.WithConnect(o.GetMQTTConnectOption(clientID))} + opts = append(opts, clientOpts...) + protocol, err := cloudeventsmqtt.New(ctx, config, opts...) + if err != nil { + return nil, err + } + + return cloudevents.NewClient(protocol) +} + +// Replace the nth occurrence of old in str by new. +func replaceNth(str, old, new string, n int) string { + i := 0 + for m := 1; m <= n; m++ { + x := strings.Index(str[i:], old) + if x < 0 { + break + } + i += x + if m == n { + return str[:i] + new + str[i+len(old):] + } + i += len(old) + } + return str +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/generic/options/mqtt/sourceoptions.go b/vendor/open-cluster-management.io/api/cloudevents/generic/options/mqtt/sourceoptions.go new file mode 100644 index 000000000..5e4f20898 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/generic/options/mqtt/sourceoptions.go @@ -0,0 +1,83 @@ +package mqtt + +import ( + "context" + "fmt" + "strings" + + cloudeventsmqtt "github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2" + cloudevents "github.com/cloudevents/sdk-go/v2" + cloudeventscontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/eclipse/paho.golang/paho" + + "open-cluster-management.io/api/cloudevents/generic/options" + "open-cluster-management.io/api/cloudevents/generic/types" +) + +type mqttSourceOptions struct { + MQTTOptions + errorChan chan error + sourceID string +} + +func NewSourceOptions(mqttOptions *MQTTOptions, sourceID string) *options.CloudEventsSourceOptions { + return &options.CloudEventsSourceOptions{ + CloudEventsOptions: &mqttSourceOptions{ + MQTTOptions: *mqttOptions, + errorChan: make(chan error), + sourceID: sourceID, + }, + SourceID: sourceID, + } +} + +func (o *mqttSourceOptions) WithContext(ctx context.Context, evtCtx cloudevents.EventContext) (context.Context, error) { + eventType, err := types.ParseCloudEventsType(evtCtx.GetType()) + if err != nil { + return nil, fmt.Errorf("unsupported event type %s, %v", eventType, err) + } + + if eventType.Action == types.ResyncRequestAction { + // source publishes event to status resync topic to request to get resources status from all clusters + return cloudeventscontext.WithTopic(ctx, strings.Replace(StatusResyncTopic, "+", o.sourceID, -1)), nil + } + + clusterName, err := evtCtx.GetExtension(types.ExtensionClusterName) + if err != nil { + return nil, err + } + + // source publishes event to spec topic to send the resource spec to a specified cluster + specTopic := strings.Replace(SpecTopic, "+", o.sourceID, 1) + specTopic = strings.Replace(specTopic, "+", fmt.Sprintf("%s", clusterName), -1) + return cloudeventscontext.WithTopic(ctx, specTopic), nil +} + +func (o *mqttSourceOptions) Client(ctx context.Context) (cloudevents.Client, error) { + receiver, err := o.GetCloudEventsClient( + ctx, + fmt.Sprintf("%s-client", o.sourceID), + func(err error) { + o.errorChan <- err + }, + cloudeventsmqtt.WithPublish(&paho.Publish{QoS: byte(o.PubQoS)}), + cloudeventsmqtt.WithSubscribe( + &paho.Subscribe{ + Subscriptions: map[string]paho.SubscribeOptions{ + // receiving the resources status from agents with status topic + strings.Replace(StatusTopic, "+", o.sourceID, 1): {QoS: byte(o.SubQoS)}, + // receiving the resources spec resync request from agents with spec resync topic + SpecResyncTopic: {QoS: byte(o.SubQoS)}, + }, + }, + ), + ) + if err != nil { + return nil, err + } + return receiver, nil +} + +func (o *mqttSourceOptions) ErrorChan() <-chan error { + return o.errorChan +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/generic/options/options.go b/vendor/open-cluster-management.io/api/cloudevents/generic/options/options.go new file mode 100644 index 000000000..57fbf14e9 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/generic/options/options.go @@ -0,0 +1,66 @@ +package options + +import ( + "context" + + cloudevents "github.com/cloudevents/sdk-go/v2" +) + +// CloudEventsOptions provides cloudevents clients to send/receive cloudevents based on different event protocol. +// +// Available implementations: +// - MQTT +type CloudEventsOptions interface { + // WithContext returns back a new context with the given cloudevent context. The new context will be used when + // sending a cloudevent.The new context is protocol-dependent, for example, for MQTT, the new context should contain + // the MQTT topic, for Kafka, the context should contain the message key, etc. + WithContext(ctx context.Context, evtContext cloudevents.EventContext) (context.Context, error) + + // Client returns a cloudevents client for sending and receiving cloudevents + Client(ctx context.Context) (cloudevents.Client, error) + + // ErrorChan returns a chan which will receive the cloudevents connection error. The source/agent client will try to + // reconnect the when this error occurs. + ErrorChan() <-chan error +} + +// EventRateLimit for limiting the event sending rate. +type EventRateLimit struct { + // QPS indicates the maximum QPS to send the event. + // If it's less than or equal to zero, the DefaultQPS (50) will be used. + QPS float32 + + // Maximum burst for throttle. + // If it's less than or equal to zero, the DefaultBurst (100) will be used. + Burst int +} + +// CloudEventsSourceOptions provides the required options to build a source CloudEventsClient +type CloudEventsSourceOptions struct { + // CloudEventsOptions provides cloudevents clients to send/receive cloudevents based on different event protocol. + CloudEventsOptions CloudEventsOptions + + // SourceID is a unique identifier for a source, for example, it can generate a source ID by hashing the hub cluster + // URL and appending the controller name. Similarly, a RESTful service can select a unique name or generate a unique + // ID in the associated database for its source identification. + SourceID string + + // EventRateLimit limits the event sending rate. + EventRateLimit EventRateLimit +} + +// CloudEventsAgentOptions provides the required options to build an agent CloudEventsClient +type CloudEventsAgentOptions struct { + // CloudEventsOptions provides cloudevents clients to send/receive cloudevents based on different event protocol. + CloudEventsOptions CloudEventsOptions + + // AgentID is a unique identifier for an agent, for example, it can consist of a managed cluster name and an agent + // name. + AgentID string + + // ClusterName is the name of a managed cluster on which the agent runs. + ClusterName string + + // EventRateLimit limits the event sending rate. + EventRateLimit EventRateLimit +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/generic/payload/payload.go b/vendor/open-cluster-management.io/api/cloudevents/generic/payload/payload.go new file mode 100644 index 000000000..cacfcd950 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/generic/payload/payload.go @@ -0,0 +1,48 @@ +package payload + +import ( + "encoding/json" + "fmt" + + cloudevents "github.com/cloudevents/sdk-go/v2" +) + +type ResourceVersion struct { + ResourceID string `json:"resourceID"` + ResourceVersion int64 `json:"resourceVersion"` +} + +type ResourceStatusHash struct { + ResourceID string `json:"resourceID"` + StatusHash string `json:"statusHash"` +} + +// ResourceVersionList represents the resource versions of the resources maintained by the agent. +// The item of this list includes the resource ID and resource version. +type ResourceVersionList struct { + Versions []ResourceVersion `json:"resourceVersions"` +} + +// ResourceStatusHashList represents the status hash of the resources maintained by the source. +// The item of this list includes the resource ID and resource status hash. +type ResourceStatusHashList struct { + Hashes []ResourceStatusHash `json:"statusHashes"` +} + +func DecodeSpecResyncRequest(evt cloudevents.Event) (*ResourceVersionList, error) { + versions := &ResourceVersionList{} + data := evt.Data() + if err := json.Unmarshal(data, versions); err != nil { + return nil, fmt.Errorf("failed to unmarshal spec resync request payload %s, %v", string(data), err) + } + return versions, nil +} + +func DecodeStatusResyncRequest(evt cloudevents.Event) (*ResourceStatusHashList, error) { + hashes := &ResourceStatusHashList{} + data := evt.Data() + if err := json.Unmarshal(data, hashes); err != nil { + return nil, fmt.Errorf("failed to unmarshal status resync request payload %s, %v", string(data), err) + } + return hashes, nil +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/generic/ratelimiter.go b/vendor/open-cluster-management.io/api/cloudevents/generic/ratelimiter.go new file mode 100644 index 000000000..a1061d318 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/generic/ratelimiter.go @@ -0,0 +1,34 @@ +package generic + +import ( + "time" + + "k8s.io/client-go/util/flowcontrol" + + "open-cluster-management.io/api/cloudevents/generic/options" +) + +// longThrottleLatency defines threshold for logging requests. All requests being +// throttled (via the provided rateLimiter) for more than longThrottleLatency will +// be logged. +const longThrottleLatency = 1 * time.Second + +const ( + // TODO we may adjust these after performance test + DefaultQPS float32 = 50.0 + DefaultBurst int = 100 +) + +func NewRateLimiter(limit options.EventRateLimit) flowcontrol.RateLimiter { + qps := limit.QPS + if qps <= 0.0 { + qps = DefaultQPS + } + + burst := limit.Burst + if burst <= 0 { + burst = DefaultBurst + } + + return flowcontrol.NewTokenBucketRateLimiter(qps, burst) +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/generic/sourceclient.go b/vendor/open-cluster-management.io/api/cloudevents/generic/sourceclient.go new file mode 100644 index 000000000..d0dc09f9c --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/generic/sourceclient.go @@ -0,0 +1,309 @@ +package generic + +import ( + "context" + "fmt" + "strconv" + + cloudevents "github.com/cloudevents/sdk-go/v2" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + + "open-cluster-management.io/api/cloudevents/generic/options" + "open-cluster-management.io/api/cloudevents/generic/payload" + "open-cluster-management.io/api/cloudevents/generic/types" +) + +// CloudEventSourceClient is a client for a source to resync/send/receive its resources with cloud events. +// +// A source is a component that runs on a server, it can be a controller on the hub cluster or a RESTful service +// handling resource requests. +type CloudEventSourceClient[T ResourceObject] struct { + *baseClient + lister Lister[T] + codecs map[types.CloudEventsDataType]Codec[T] + statusHashGetter StatusHashGetter[T] + sourceID string +} + +// NewCloudEventSourceClient returns an instance for CloudEventSourceClient. The following arguments are required to +// create a client +// - sourceOptions provides the sourceID and the cloudevents clients that are based on different event protocols for +// sending/receiving the cloudevents. +// - lister gets the resources from a cache/store of a source. +// - statusHashGetter calculates the resource status hash. +// - codecs is list of codecs for encoding/decoding a resource objet/cloudevent to/from a cloudevent/resource objet. +func NewCloudEventSourceClient[T ResourceObject]( + ctx context.Context, + sourceOptions *options.CloudEventsSourceOptions, + lister Lister[T], + statusHashGetter StatusHashGetter[T], + codecs ...Codec[T], +) (*CloudEventSourceClient[T], error) { + baseClient := &baseClient{ + cloudEventsOptions: sourceOptions.CloudEventsOptions, + cloudEventsRateLimiter: NewRateLimiter(sourceOptions.EventRateLimit), + } + + if err := baseClient.connect(ctx); err != nil { + return nil, err + } + + evtCodes := make(map[types.CloudEventsDataType]Codec[T]) + for _, codec := range codecs { + evtCodes[codec.EventDataType()] = codec + } + + return &CloudEventSourceClient[T]{ + baseClient: baseClient, + lister: lister, + codecs: evtCodes, + statusHashGetter: statusHashGetter, + sourceID: sourceOptions.SourceID, + }, nil +} + +// Resync the resources status by sending a status resync request from a source to all clusters. +func (c *CloudEventSourceClient[T]) Resync(ctx context.Context) error { + // list the resource objects that are maintained by the current source from all clusters + objs, err := c.lister.List(types.ListOptions{ClusterName: types.ClusterAll, Source: c.sourceID}) + if err != nil { + return err + } + + hashes := &payload.ResourceStatusHashList{Hashes: make([]payload.ResourceStatusHash, len(objs))} + for i, obj := range objs { + statusHash, err := c.statusHashGetter(obj) + if err != nil { + return err + } + + hashes.Hashes[i] = payload.ResourceStatusHash{ + ResourceID: string(obj.GetUID()), + StatusHash: statusHash, + } + } + + // only resync the resources whose event data type is registered + for eventDataType := range c.codecs { + eventType := types.CloudEventsType{ + CloudEventsDataType: eventDataType, + SubResource: types.SubResourceStatus, + Action: types.ResyncRequestAction, + } + + evt := types.NewEventBuilder(c.sourceID, eventType).NewEvent() + if err := evt.SetData(cloudevents.ApplicationJSON, hashes); err != nil { + return fmt.Errorf("failed to set data to cloud event: %v", err) + } + + if err := c.publish(ctx, evt); err != nil { + return err + } + } + + return nil +} + +// Publish a resource spec from a source to an agent. +func (c *CloudEventSourceClient[T]) Publish(ctx context.Context, eventType types.CloudEventsType, obj T) error { + if eventType.SubResource != types.SubResourceSpec { + return fmt.Errorf("unsupported event eventType %s", eventType) + } + + codec, ok := c.codecs[eventType.CloudEventsDataType] + if !ok { + return fmt.Errorf("failed to find the codec for event %s", eventType.CloudEventsDataType) + } + + evt, err := codec.Encode(c.sourceID, eventType, obj) + if err != nil { + return err + } + + if err := c.publish(ctx, *evt); err != nil { + return err + } + + return nil +} + +// Subscribe the events that are from the agent spec resync request or agent resource status request. +// For spec resync request, source publish the current resources spec back as response. +// For resource status request, source receives resource status and handles the status with resource handlers. +func (c *CloudEventSourceClient[T]) Subscribe(ctx context.Context, handlers ...ResourceHandler[T]) { + c.subscribe(ctx, func(ctx context.Context, evt cloudevents.Event) { + c.receive(ctx, evt, handlers...) + }) +} + +func (c *CloudEventSourceClient[T]) receive(ctx context.Context, evt cloudevents.Event, handlers ...ResourceHandler[T]) { + klog.V(4).Infof("Received event:\n%s", evt) + + eventType, err := types.ParseCloudEventsType(evt.Type()) + if err != nil { + klog.Errorf("failed to parse cloud event type, %v", err) + return + } + + if eventType.Action == types.ResyncRequestAction { + if eventType.SubResource != types.SubResourceSpec { + klog.Warningf("unsupported event type %s, ignore", eventType) + return + } + + if err := c.respondResyncSpecRequest(ctx, eventType.CloudEventsDataType, evt); err != nil { + klog.Errorf("failed to resync resources spec, %v", err) + } + + return + } + + codec, ok := c.codecs[eventType.CloudEventsDataType] + if !ok { + klog.Warningf("failed to find the codec for event %s, ignore", eventType.CloudEventsDataType) + return + } + + if eventType.SubResource != types.SubResourceStatus { + klog.Warningf("unsupported event type %s, ignore", eventType) + return + } + + clusterName, err := evt.Context.GetExtension(types.ExtensionClusterName) + if err != nil { + klog.Errorf("failed to find cluster name, %v", err) + return + } + + obj, err := codec.Decode(&evt) + if err != nil { + klog.Errorf("failed to decode status, %v", err) + return + } + + action, err := c.statusAction(fmt.Sprintf("%s", clusterName), obj) + if err != nil { + klog.Errorf("failed to generate status event %s, %v", evt, err) + return + } + + if len(action) == 0 { + // no action is required, ignore + return + } + + for _, handler := range handlers { + if err := handler(action, obj); err != nil { + klog.Errorf("failed to handle status event %s, %v", evt, err) + } + } +} + +// Upon receiving the spec resync event, the source responds by sending resource status events to the broker as follows: +// - If the request event message is empty, the source returns all resources associated with the work agent. +// - If the request event message contains resource IDs and versions, the source retrieves the resource with the +// specified ID and compares the versions. +// - If the requested resource version matches the source's current maintained resource version, the source does not +// resend the resource. +// - If the requested resource version is older than the source's current maintained resource version, the source +// sends the resource. +func (c *CloudEventSourceClient[T]) respondResyncSpecRequest( + ctx context.Context, evtDataType types.CloudEventsDataType, evt cloudevents.Event) error { + resourceVersions, err := payload.DecodeSpecResyncRequest(evt) + if err != nil { + return err + } + + eventType := types.CloudEventsType{ + CloudEventsDataType: evtDataType, + SubResource: types.SubResourceSpec, + Action: types.ResyncResponseAction, + } + + clusterName, err := evt.Context.GetExtension(types.ExtensionClusterName) + if err != nil { + return err + } + + objs, err := c.lister.List(types.ListOptions{ClusterName: fmt.Sprintf("%s", clusterName), Source: c.sourceID}) + if err != nil { + return err + } + + for _, obj := range objs { + lastResourceVersion := findResourceVersion(string(obj.GetUID()), resourceVersions.Versions) + currentResourceVersion, err := strconv.ParseInt(obj.GetResourceVersion(), 10, 64) + if err != nil { + continue + } + + if currentResourceVersion > lastResourceVersion { + if err := c.Publish(ctx, eventType, obj); err != nil { + return err + } + } + } + + // the resources do not exist on the source, but exist on the agent, delete them + for _, rv := range resourceVersions.Versions { + _, exists := getObj(rv.ResourceID, objs) + if exists { + continue + } + + // send a delete event for the current resource + evt := types.NewEventBuilder(c.sourceID, eventType). + WithResourceID(rv.ResourceID). + WithResourceVersion(rv.ResourceVersion). + WithClusterName(fmt.Sprintf("%s", clusterName)). + WithDeletionTimestamp(metav1.Now().Time). + NewEvent() + if err := c.publish(ctx, evt); err != nil { + return err + } + } + + return nil +} + +func (c *CloudEventSourceClient[T]) statusAction(clusterName string, obj T) (evt types.ResourceAction, err error) { + objs, err := c.lister.List(types.ListOptions{ClusterName: clusterName, Source: c.sourceID}) + if err != nil { + return evt, err + } + + lastObj, exists := getObj(string(obj.GetUID()), objs) + if !exists { + return evt, nil + } + + lastStatusHash, err := c.statusHashGetter(lastObj) + if err != nil { + klog.Warningf("failed to hash object %s status, %v", lastObj.GetUID(), err) + return evt, err + } + + currentStatusHash, err := c.statusHashGetter(obj) + if err != nil { + klog.Warningf("failed to hash object %s status, %v", obj.GetUID(), err) + return evt, nil + } + + if lastStatusHash == currentStatusHash { + return evt, nil + } + + return types.StatusModified, nil +} + +func findResourceVersion(id string, versions []payload.ResourceVersion) int64 { + for _, version := range versions { + if id == version.ResourceID { + return version.ResourceVersion + } + } + + return 0 +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/generic/types/types.go b/vendor/open-cluster-management.io/api/cloudevents/generic/types/types.go new file mode 100644 index 000000000..d3453f5fa --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/generic/types/types.go @@ -0,0 +1,227 @@ +package types + +import ( + "fmt" + "strings" + "time" + + cloudevents "github.com/cloudevents/sdk-go/v2" + "github.com/google/uuid" +) + +const ( + // ClusterAll is the default argument to specify on a context when you want to list or filter resources across all + // managed clusters. + ClusterAll = "" + + // SourceAll is the default argument to specify on a context when you want to list or filter resources across all + // sources. + SourceAll = "" +) + +// EventSubResource describes the subresource of a cloud event. Only `spec` and `status` are supported. +type EventSubResource string + +const ( + // SubResourceSpec represents the cloud event data is from the resource spec. + SubResourceSpec EventSubResource = "spec" + + // SubResourceSpec represents the cloud event data is from the resource status. + SubResourceStatus EventSubResource = "status" +) + +// EventAction describes the expected action of a cloud event. +type EventAction string + +const ( + // ResyncRequestAction represents the cloud event is for the resync request. + ResyncRequestAction EventAction = "resync_request" + + // ResyncRequestAction represents the cloud event is for the resync response. + ResyncResponseAction EventAction = "resync_response" +) + +const ( + // ExtensionResourceID is the cloud event extension key of the resource ID. + ExtensionResourceID = "resourceid" + + // ExtensionResourceVersion is the cloud event extension key of the resource version. + ExtensionResourceVersion = "resourceversion" + + // ExtensionDeletionTimestamp is the cloud event extension key of the deletion timestamp. + ExtensionDeletionTimestamp = "deletiontimestamp" + + // ExtensionClusterName is the cloud event extension key of the cluster name. + ExtensionClusterName = "clustername" + + // ExtensionOriginalSource is the cloud event extension key of the original source. + ExtensionOriginalSource = "originalsource" +) + +// ResourceAction represents an action on a resource object on the source or agent. +type ResourceAction string + +const ( + // Added represents a resource is added on the source part. + Added ResourceAction = "ADDED" + + // Modified represents a resource is modified on the source part. + Modified ResourceAction = "MODIFIED" + + // StatusModified represents the status of a resource is modified on the agent part. + StatusModified ResourceAction = "STATUSMODIFIED" + + // Deleted represents a resource is deleted from the source prat. + Deleted ResourceAction = "DELETED" +) + +// ListOptions is the query options for listing the resource objects from the source/agent. +type ListOptions struct { + // Source use the cluster name to restrict the list of returned objects by their cluster name. + // Defaults to all clusters. + ClusterName string + + // Agent use the source ID to restrict the list of returned objects by their source ID. + // Defaults to all sources. + Source string +} + +// CloudEventsDataType uniquely identifies the type of cloud event data. +type CloudEventsDataType struct { + Group string + Version string + Resource string +} + +func (t CloudEventsDataType) String() string { + return fmt.Sprintf("%s.%s.%s", t.Group, t.Version, t.Resource) +} + +// CloudEventsType represents the type of cloud events, which describes the type of cloud event data. +type CloudEventsType struct { + // CloudEventsDataType uniquely identifies the type of cloud event data. + CloudEventsDataType + + // SubResource represents the cloud event data is from the resource spec or status. + SubResource EventSubResource + + // Action represents the expected action for this cloud event. + Action EventAction +} + +func (t CloudEventsType) String() string { + return fmt.Sprintf("%s.%s.%s.%s.%s", t.Group, t.Version, t.Resource, t.SubResource, t.Action) +} + +// ParseCloudEventsDataType parse the cloud event data type to a struct object. +// The type format is `..`. +func ParseCloudEventsDataType(cloudEventsDataType string) (*CloudEventsDataType, error) { + types := strings.Split(cloudEventsDataType, ".") + length := len(types) + if length < 3 { + return nil, fmt.Errorf("unsupported cloudevents data type format") + } + return &CloudEventsDataType{ + Group: strings.Join(types[0:length-2], "."), + Version: types[length-2], + Resource: types[length-1], + }, nil +} + +// ParseCloudEventsType parse the cloud event type to a struct object. +// The type format is `....`. +// The `` must be one of "spec" and "status". +func ParseCloudEventsType(cloudEventsType string) (*CloudEventsType, error) { + types := strings.Split(cloudEventsType, ".") + length := len(types) + if length < 5 { + return nil, fmt.Errorf("unsupported cloudevents type format") + } + + subResource := EventSubResource(types[length-2]) + if subResource != SubResourceSpec && subResource != SubResourceStatus { + return nil, fmt.Errorf("unsupported subresource %s", subResource) + } + + return &CloudEventsType{ + CloudEventsDataType: CloudEventsDataType{ + Group: strings.Join(types[0:length-4], "."), + Version: types[length-4], + Resource: types[length-3], + }, + SubResource: subResource, + Action: EventAction(types[length-1]), + }, nil +} + +type EventBuilder struct { + source string + clusterName string + originalSource string + resourceID string + resourceVersion *int64 + eventType CloudEventsType + deletionTimestamp time.Time +} + +func NewEventBuilder(source string, eventType CloudEventsType) *EventBuilder { + return &EventBuilder{ + source: source, + eventType: eventType, + } +} + +func (b *EventBuilder) WithResourceID(resourceID string) *EventBuilder { + b.resourceID = resourceID + return b +} + +func (b *EventBuilder) WithResourceVersion(resourceVersion int64) *EventBuilder { + b.resourceVersion = &resourceVersion + return b +} + +func (b *EventBuilder) WithClusterName(clusterName string) *EventBuilder { + b.clusterName = clusterName + return b +} + +func (b *EventBuilder) WithOriginalSource(originalSource string) *EventBuilder { + b.originalSource = originalSource + return b +} + +func (b *EventBuilder) WithDeletionTimestamp(timestamp time.Time) *EventBuilder { + b.deletionTimestamp = timestamp + return b +} + +func (b *EventBuilder) NewEvent() cloudevents.Event { + evt := cloudevents.NewEvent() + evt.SetID(uuid.New().String()) + evt.SetType(b.eventType.String()) + evt.SetTime(time.Now()) + evt.SetSource(b.source) + + if len(b.resourceID) != 0 { + evt.SetExtension(ExtensionResourceID, b.resourceID) + } + + if b.resourceVersion != nil { + evt.SetExtension(ExtensionResourceVersion, *b.resourceVersion) + } + + if len(b.clusterName) != 0 { + evt.SetExtension(ExtensionClusterName, b.clusterName) + } + + if len(b.originalSource) != 0 { + evt.SetExtension(ExtensionOriginalSource, b.originalSource) + } + + if !b.deletionTimestamp.IsZero() { + evt.SetExtension(ExtensionDeletionTimestamp, b.deletionTimestamp) + } + + return evt +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/agent/client/manifestwork.go b/vendor/open-cluster-management.io/api/cloudevents/work/agent/client/manifestwork.go new file mode 100644 index 000000000..54261d88f --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/agent/client/manifestwork.go @@ -0,0 +1,171 @@ +package client + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + kubetypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog/v2" + + workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" + workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" + "open-cluster-management.io/api/cloudevents/generic" + "open-cluster-management.io/api/cloudevents/generic/types" + "open-cluster-management.io/api/cloudevents/work/agent/codec" + "open-cluster-management.io/api/cloudevents/work/utils" + "open-cluster-management.io/api/cloudevents/work/watcher" + workv1 "open-cluster-management.io/api/work/v1" +) + +const ManifestsDeleted = "Deleted" + +const ( + UpdateRequestAction = "update_request" + DeleteRequestAction = "delete_request" +) + +// ManifestWorkAgentClient implements the ManifestWorkInterface. It sends the manifestworks status back to source by +// CloudEventAgentClient. +type ManifestWorkAgentClient struct { + cloudEventsClient *generic.CloudEventAgentClient[*workv1.ManifestWork] + watcher *watcher.ManifestWorkWatcher + lister workv1lister.ManifestWorkNamespaceLister +} + +var manifestWorkGR = schema.GroupResource{Group: workv1.GroupName, Resource: "manifestworks"} + +var _ workv1client.ManifestWorkInterface = &ManifestWorkAgentClient{} + +func NewManifestWorkAgentClient(cloudEventsClient *generic.CloudEventAgentClient[*workv1.ManifestWork], watcher *watcher.ManifestWorkWatcher) *ManifestWorkAgentClient { + return &ManifestWorkAgentClient{ + cloudEventsClient: cloudEventsClient, + watcher: watcher, + } +} + +func (c *ManifestWorkAgentClient) SetLister(lister workv1lister.ManifestWorkNamespaceLister) { + c.lister = lister +} + +func (c *ManifestWorkAgentClient) Create(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.CreateOptions) (*workv1.ManifestWork, error) { + return nil, errors.NewMethodNotSupported(manifestWorkGR, "create") +} + +func (c *ManifestWorkAgentClient) Update(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.UpdateOptions) (*workv1.ManifestWork, error) { + return nil, errors.NewMethodNotSupported(manifestWorkGR, "update") +} + +func (c *ManifestWorkAgentClient) UpdateStatus(ctx context.Context, manifestWork *workv1.ManifestWork, opts metav1.UpdateOptions) (*workv1.ManifestWork, error) { + return nil, errors.NewMethodNotSupported(manifestWorkGR, "updatestatus") +} + +func (c *ManifestWorkAgentClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return errors.NewMethodNotSupported(manifestWorkGR, "delete") +} + +func (c *ManifestWorkAgentClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + return errors.NewMethodNotSupported(manifestWorkGR, "deletecollection") +} + +func (c *ManifestWorkAgentClient) Get(ctx context.Context, name string, opts metav1.GetOptions) (*workv1.ManifestWork, error) { + klog.V(4).Infof("getting manifestwork %s", name) + return c.lister.Get(name) +} + +func (c *ManifestWorkAgentClient) List(ctx context.Context, opts metav1.ListOptions) (*workv1.ManifestWorkList, error) { + klog.V(4).Infof("sync manifestworks") + // send resync request to fetch manifestworks from source when the ManifestWorkInformer starts + if err := c.cloudEventsClient.Resync(ctx); err != nil { + return nil, err + } + + return &workv1.ManifestWorkList{}, nil +} + +func (c *ManifestWorkAgentClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + // TODO (skeeey) consider resync the manifestworks when the ManifestWorkInformer reconnected + return c.watcher, nil +} + +func (c *ManifestWorkAgentClient) Patch(ctx context.Context, name string, pt kubetypes.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *workv1.ManifestWork, err error) { + klog.V(4).Infof("patching manifestwork %s", name) + + lastWork, err := c.lister.Get(name) + if err != nil { + return nil, err + } + + patchedWork, err := utils.Patch(pt, lastWork, data) + if err != nil { + return nil, err + } + + eventDataType, err := types.ParseCloudEventsDataType(patchedWork.Annotations[codec.CloudEventsDataTypeAnnotationKey]) + if err != nil { + return nil, err + } + + eventType := types.CloudEventsType{ + CloudEventsDataType: *eventDataType, + SubResource: types.SubResourceStatus, + } + + newWork := patchedWork.DeepCopy() + + statusUpdated, err := isStatusUpdate(subresources) + if err != nil { + return nil, err + } + + if statusUpdated { + eventType.Action = UpdateRequestAction + if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil { + return nil, err + } + + // refresh the work status in the ManifestWorkInformer local cache with patched work. + c.watcher.Receive(watch.Event{Type: watch.Modified, Object: newWork}) + return newWork, nil + } + + // the finalizers of a deleting manifestwork are removed, marking the manifestwork status to deleted and sending + // it back to source + if !newWork.DeletionTimestamp.IsZero() && len(newWork.Finalizers) == 0 { + meta.SetStatusCondition(&newWork.Status.Conditions, metav1.Condition{ + Type: ManifestsDeleted, + Status: metav1.ConditionTrue, + Reason: "ManifestsDeleted", + Message: fmt.Sprintf("The manifests are deleted from the cluster %s", newWork.Namespace), + }) + + eventType.Action = DeleteRequestAction + if err := c.cloudEventsClient.Publish(ctx, eventType, newWork); err != nil { + return nil, err + } + + // delete the manifestwork from the ManifestWorkInformer local cache. + c.watcher.Receive(watch.Event{Type: watch.Deleted, Object: newWork}) + return newWork, nil + } + + // refresh the work in the ManifestWorkInformer local cache with patched work. + c.watcher.Receive(watch.Event{Type: watch.Modified, Object: newWork}) + return newWork, nil +} + +func isStatusUpdate(subresources []string) (bool, error) { + if len(subresources) == 0 { + return false, nil + } + + if len(subresources) == 1 && subresources[0] == "status" { + return true, nil + } + + return false, fmt.Errorf("unsupported subresources %v", subresources) +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/agent/codec/manifest.go b/vendor/open-cluster-management.io/api/cloudevents/work/agent/codec/manifest.go new file mode 100644 index 000000000..515bb1962 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/agent/codec/manifest.go @@ -0,0 +1,184 @@ +package codec + +import ( + "fmt" + "strconv" + + cloudevents "github.com/cloudevents/sdk-go/v2" + cloudeventstypes "github.com/cloudevents/sdk-go/v2/types" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + kubetypes "k8s.io/apimachinery/pkg/types" + + "open-cluster-management.io/api/cloudevents/generic/types" + "open-cluster-management.io/api/cloudevents/work/payload" + "open-cluster-management.io/api/utils/work/v1/utils" + "open-cluster-management.io/api/utils/work/v1/workvalidator" + workv1 "open-cluster-management.io/api/work/v1" +) + +const ( + // CloudEventsDataTypeAnnotationKey is the key of the cloudevents data type annotation. + CloudEventsDataTypeAnnotationKey = "cloudevents.open-cluster-management.io/datatype" + + // CloudEventsDataTypeAnnotationKey is the key of the cloudevents original source annotation. + CloudEventsOriginalSourceAnnotationKey = "cloudevents.open-cluster-management.io/originalsource" +) + +// ManifestCodec is a codec to encode/decode a ManifestWork/cloudevent with ManifestBundle for an agent. +type ManifestCodec struct { + restMapper meta.RESTMapper +} + +func NewManifestCodec(restMapper meta.RESTMapper) *ManifestCodec { + return &ManifestCodec{ + restMapper: restMapper, + } +} + +// EventDataType returns the event data type for `io.open-cluster-management.works.v1alpha1.manifests`. +func (c *ManifestCodec) EventDataType() types.CloudEventsDataType { + return payload.ManifestEventDataType +} + +// Encode the status of a ManifestWork to a cloudevent with ManifestStatus. +func (c *ManifestCodec) Encode(source string, eventType types.CloudEventsType, work *workv1.ManifestWork) (*cloudevents.Event, error) { + if eventType.CloudEventsDataType != payload.ManifestEventDataType { + return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) + } + + resourceVersion, err := strconv.ParseInt(work.ResourceVersion, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse the resourceversion of the work %s, %v", work.UID, err) + } + + originalSource, ok := work.Annotations[CloudEventsOriginalSourceAnnotationKey] + if !ok { + return nil, fmt.Errorf("failed to find originalsource from the work %s", work.UID) + } + + if len(work.Spec.Workload.Manifests) != 1 { + return nil, fmt.Errorf("too many manifests in the work %s", work.UID) + } + + evt := types.NewEventBuilder(source, eventType). + WithResourceID(string(work.UID)). + WithResourceVersion(resourceVersion). + WithClusterName(work.Namespace). + WithOriginalSource(originalSource). + NewEvent() + + statusPayload := &payload.ManifestStatus{ + Conditions: work.Status.Conditions, + } + + if len(work.Status.ResourceStatus.Manifests) != 0 { + statusPayload.Status = &work.Status.ResourceStatus.Manifests[0] + } + + if err := evt.SetData(cloudevents.ApplicationJSON, statusPayload); err != nil { + return nil, fmt.Errorf("failed to encode manifestwork status to a cloudevent: %v", err) + } + + return &evt, nil +} + +// Decode a cloudevent whose data is Manifest to a ManifestWork. +func (c *ManifestCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWork, error) { + eventType, err := types.ParseCloudEventsType(evt.Type()) + if err != nil { + return nil, fmt.Errorf("failed to parse cloud event type %s, %v", evt.Type(), err) + } + + if eventType.CloudEventsDataType != payload.ManifestEventDataType { + return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) + } + + evtExtensions := evt.Context.GetExtensions() + + resourceID, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionResourceID]) + if err != nil { + return nil, fmt.Errorf("failed to get resourceid extension: %v", err) + } + + resourceVersion, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionResourceVersion]) + if err != nil { + return nil, fmt.Errorf("failed to get resourceversion extension: %v", err) + } + + clusterName, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionClusterName]) + if err != nil { + return nil, fmt.Errorf("failed to get clustername extension: %v", err) + } + + work := &workv1.ManifestWork{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + UID: kubetypes.UID(resourceID), + ResourceVersion: resourceVersion, + Name: resourceID, + Namespace: clusterName, + Annotations: map[string]string{ + CloudEventsDataTypeAnnotationKey: eventType.CloudEventsDataType.String(), + CloudEventsOriginalSourceAnnotationKey: evt.Source(), + }, + }, + } + + if _, ok := evtExtensions[types.ExtensionDeletionTimestamp]; ok { + deletionTimestamp, err := cloudeventstypes.ToTime(evtExtensions[types.ExtensionDeletionTimestamp]) + if err != nil { + return nil, fmt.Errorf("failed to get deletiontimestamp, %v", err) + } + + work.DeletionTimestamp = &metav1.Time{Time: deletionTimestamp} + return work, nil + } + + manifestPayload := &payload.Manifest{} + if err := evt.DataAs(manifestPayload); err != nil { + return nil, fmt.Errorf("failed to unmarshal event data %s, %v", string(evt.Data()), err) + } + + unstructuredObj := manifestPayload.Manifest + rawJson, err := unstructuredObj.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("failed to get manifest GVR from event %s, %v", string(evt.Data()), err) + } + + work.Spec = workv1.ManifestWorkSpec{ + Workload: workv1.ManifestsTemplate{ + Manifests: []workv1.Manifest{{RawExtension: runtime.RawExtension{Raw: rawJson}}}, + }, + DeleteOption: manifestPayload.DeleteOption, + } + + if manifestPayload.ConfigOption != nil { + _, gvr, err := utils.BuildResourceMeta(0, &unstructuredObj, c.restMapper) + if err != nil { + return nil, fmt.Errorf("failed to get manifest GVR from event %s, %v", string(evt.Data()), err) + } + + work.Spec.ManifestConfigs = []workv1.ManifestConfigOption{ + { + ResourceIdentifier: workv1.ResourceIdentifier{ + Group: gvr.Group, + Resource: gvr.Resource, + Name: unstructuredObj.GetName(), + Namespace: unstructuredObj.GetNamespace(), + }, + FeedbackRules: manifestPayload.ConfigOption.FeedbackRules, + UpdateStrategy: manifestPayload.ConfigOption.UpdateStrategy, + }, + } + } + + // validate the manifest + if err := workvalidator.ManifestValidator.ValidateManifests(work.Spec.Workload.Manifests); err != nil { + return nil, fmt.Errorf("manifest is invalid, %v", err) + } + + return work, nil +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/agent/codec/manifestbundle.go b/vendor/open-cluster-management.io/api/cloudevents/work/agent/codec/manifestbundle.go new file mode 100644 index 000000000..7eb10b668 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/agent/codec/manifestbundle.go @@ -0,0 +1,137 @@ +package codec + +import ( + "fmt" + "strconv" + + cloudevents "github.com/cloudevents/sdk-go/v2" + cloudeventstypes "github.com/cloudevents/sdk-go/v2/types" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubetypes "k8s.io/apimachinery/pkg/types" + + "open-cluster-management.io/api/cloudevents/generic/types" + "open-cluster-management.io/api/cloudevents/work/payload" + "open-cluster-management.io/api/utils/work/v1/workvalidator" + workv1 "open-cluster-management.io/api/work/v1" +) + +// ManifestBundleCodec is a codec to encode/decode a ManifestWork/cloudevent with ManifestBundle for an agent. +type ManifestBundleCodec struct{} + +func NewManifestBundleCodec() *ManifestBundleCodec { + return &ManifestBundleCodec{} +} + +// EventDataType always returns the event data type `io.open-cluster-management.works.v1alpha1.manifestbundles`. +func (c *ManifestBundleCodec) EventDataType() types.CloudEventsDataType { + return payload.ManifestBundleEventDataType +} + +// Encode the status of a ManifestWork to a cloudevent with ManifestBundleStatus. +func (c *ManifestBundleCodec) Encode(source string, eventType types.CloudEventsType, work *workv1.ManifestWork) (*cloudevents.Event, error) { + if eventType.CloudEventsDataType != payload.ManifestBundleEventDataType { + return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) + } + + resourceVersion, err := strconv.ParseInt(work.ResourceVersion, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse the resourceversion of the work %s, %v", work.UID, err) + } + + originalSource, ok := work.Annotations[CloudEventsOriginalSourceAnnotationKey] + if !ok { + return nil, fmt.Errorf("failed to find originalsource from the work %s", work.UID) + } + + evt := types.NewEventBuilder(source, eventType). + WithResourceID(string(work.UID)). + WithResourceVersion(resourceVersion). + WithClusterName(work.Namespace). + WithOriginalSource(originalSource). + NewEvent() + + manifestBundleStatus := &payload.ManifestBundleStatus{ + Conditions: work.Status.Conditions, + ResourceStatus: work.Status.ResourceStatus.Manifests, + } + + if err := evt.SetData(cloudevents.ApplicationJSON, manifestBundleStatus); err != nil { + return nil, fmt.Errorf("failed to encode manifestwork status to a cloudevent: %v", err) + } + + return &evt, nil +} + +// Decode a cloudevent whose data is ManifestBundle to a ManifestWork. +func (c *ManifestBundleCodec) Decode(evt *cloudevents.Event) (*workv1.ManifestWork, error) { + eventType, err := types.ParseCloudEventsType(evt.Type()) + if err != nil { + return nil, fmt.Errorf("failed to parse cloud event type %s, %v", evt.Type(), err) + } + + if eventType.CloudEventsDataType != payload.ManifestBundleEventDataType { + return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) + } + + evtExtensions := evt.Context.GetExtensions() + + resourceID, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionResourceID]) + if err != nil { + return nil, fmt.Errorf("failed to get resourceid extension: %v", err) + } + + resourceVersion, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionResourceVersion]) + if err != nil { + return nil, fmt.Errorf("failed to get resourceversion extension: %v", err) + } + + clusterName, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionClusterName]) + if err != nil { + return nil, fmt.Errorf("failed to get clustername extension: %v", err) + } + + work := &workv1.ManifestWork{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + UID: kubetypes.UID(resourceID), + ResourceVersion: resourceVersion, + Name: resourceID, + Namespace: clusterName, + Annotations: map[string]string{ + CloudEventsDataTypeAnnotationKey: eventType.CloudEventsDataType.String(), + CloudEventsOriginalSourceAnnotationKey: evt.Source(), + }, + }, + } + + if _, ok := evtExtensions[types.ExtensionDeletionTimestamp]; ok { + deletionTimestamp, err := cloudeventstypes.ToTime(evtExtensions[types.ExtensionDeletionTimestamp]) + if err != nil { + return nil, fmt.Errorf("failed to get deletiontimestamp, %v", err) + } + + work.DeletionTimestamp = &metav1.Time{Time: deletionTimestamp} + return work, nil + } + + manifests := &payload.ManifestBundle{} + if err := evt.DataAs(manifests); err != nil { + return nil, fmt.Errorf("failed to unmarshal event data %s, %v", string(evt.Data()), err) + } + + work.Spec = workv1.ManifestWorkSpec{ + Workload: workv1.ManifestsTemplate{ + Manifests: manifests.Manifests, + }, + DeleteOption: manifests.DeleteOption, + ManifestConfigs: manifests.ManifestConfigs, + } + + // validate the manifests + if err := workvalidator.ManifestValidator.ValidateManifests(work.Spec.Workload.Manifests); err != nil { + return nil, fmt.Errorf("manifests are invalid, %v", err) + } + + return work, nil +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/agent/handler/resourcehandler.go b/vendor/open-cluster-management.io/api/cloudevents/work/agent/handler/resourcehandler.go new file mode 100644 index 000000000..986f9ccab --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/agent/handler/resourcehandler.go @@ -0,0 +1,76 @@ +package handler + +import ( + "fmt" + "strconv" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog/v2" + workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" + + "open-cluster-management.io/api/cloudevents/generic" + "open-cluster-management.io/api/cloudevents/generic/types" + "open-cluster-management.io/api/cloudevents/work/watcher" + workv1 "open-cluster-management.io/api/work/v1" +) + +// NewManifestWorkAgentHandler returns a ResourceHandler for a ManifestWork on managed cluster. It sends the kube events +// with ManifestWorWatcher after CloudEventAgentClient received the ManifestWork specs from source, then the +// ManifestWorkInformer handles the kube events in its local cache. +func NewManifestWorkAgentHandler(lister workv1lister.ManifestWorkNamespaceLister, watcher *watcher.ManifestWorkWatcher) generic.ResourceHandler[*workv1.ManifestWork] { + return func(action types.ResourceAction, work *workv1.ManifestWork) error { + switch action { + case types.Added: + watcher.Receive(watch.Event{Type: watch.Added, Object: work}) + case types.Modified: + lastWork, err := lister.Get(work.Name) + if err != nil { + return err + } + + resourceVersion, err := strconv.ParseInt(work.ResourceVersion, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse the resourceVersion of the manifestwork %s, %v", work.Name, err) + } + + lastResourceVersion, err := strconv.ParseInt(lastWork.ResourceVersion, 10, 64) + if err != nil { + return fmt.Errorf("failed to parse the resourceVersion of the manifestwork %s, %v", lastWork.Name, err) + } + + if resourceVersion <= lastResourceVersion { + klog.Infof("The work %s resource version is less than or equal to cached, ignore", work.Name) + return nil + } + + updatedWork := work.DeepCopy() + + // restore the fields that are maintained by local agent + updatedWork.Labels = lastWork.Labels + updatedWork.Annotations = lastWork.Annotations + updatedWork.Finalizers = lastWork.Finalizers + updatedWork.Status = lastWork.Status + + watcher.Receive(watch.Event{Type: watch.Modified, Object: updatedWork}) + case types.Deleted: + // the manifestwork is deleting on the source, we just update its deletion timestamp. + lastWork, err := lister.Get(work.Name) + if errors.IsNotFound(err) { + return nil + } + + if err != nil { + return err + } + + updatedWork := lastWork.DeepCopy() + updatedWork.DeletionTimestamp = work.DeletionTimestamp + watcher.Receive(watch.Event{Type: watch.Modified, Object: updatedWork}) + default: + return fmt.Errorf("unsupported resource action %s", action) + } + + return nil + } +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/clientbuilder.go b/vendor/open-cluster-management.io/api/cloudevents/work/clientbuilder.go new file mode 100644 index 000000000..810bf2758 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/clientbuilder.go @@ -0,0 +1,152 @@ +package work + +import ( + "context" + "fmt" + "time" + + "k8s.io/client-go/rest" + + workclientset "open-cluster-management.io/api/client/work/clientset/versioned" + workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" + workinformers "open-cluster-management.io/api/client/work/informers/externalversions" + workv1informers "open-cluster-management.io/api/client/work/informers/externalversions/work/v1" + "open-cluster-management.io/api/cloudevents/generic" + "open-cluster-management.io/api/cloudevents/generic/options/mqtt" + agentclient "open-cluster-management.io/api/cloudevents/work/agent/client" + agenthandler "open-cluster-management.io/api/cloudevents/work/agent/handler" + "open-cluster-management.io/api/cloudevents/work/internal" + "open-cluster-management.io/api/cloudevents/work/watcher" + workv1 "open-cluster-management.io/api/work/v1" +) + +const defaultInformerResyncTime = 10 * time.Minute + +// ClientHolder holds a manifestwork client that implements the ManifestWorkInterface based on different configuration +// and a ManifestWorkInformer that is built with the manifestWork client. +// +// ClientHolder also implements the ManifestWorksGetter interface. +type ClientHolder struct { + workClient workv1client.WorkV1Interface + manifestWorkInformer workv1informers.ManifestWorkInformer +} + +var _ workv1client.ManifestWorksGetter = &ClientHolder{} + +// ManifestWorks returns a ManifestWorkInterface +func (h *ClientHolder) ManifestWorks(namespace string) workv1client.ManifestWorkInterface { + return h.workClient.ManifestWorks(namespace) +} + +// ManifestWorkInformer returns a ManifestWorkInformer +func (h *ClientHolder) ManifestWorkInformer() workv1informers.ManifestWorkInformer { + return h.manifestWorkInformer +} + +// ClientHolderBuilder builds the ClientHolder with different configuration. +type ClientHolderBuilder struct { + config any + codecs []generic.Codec[*workv1.ManifestWork] + informerOptions []workinformers.SharedInformerOption + informerResyncTime time.Duration + clusterName string + clientID string +} + +// NewClientHolderBuilder returns a ClientHolderBuilder with a given configuration. +// +// Available configurations: +// - Kubeconfig (*rest.Config): builds a manifestwork client with kubeconfig +// - MQTTOptions (*mqtt.MQTTOptions): builds a manifestwork client based on cloudevents with MQTT +func NewClientHolderBuilder(clientID string, config any) *ClientHolderBuilder { + return &ClientHolderBuilder{ + clientID: clientID, + config: config, + informerResyncTime: defaultInformerResyncTime, + } +} + +// WithClusterName set the managed cluster name when building a manifestwork client for an agent. +func (b *ClientHolderBuilder) WithClusterName(clusterName string) *ClientHolderBuilder { + b.clusterName = clusterName + return b +} + +// WithCodecs add codecs when building a manifestwork client based on cloudevents. +func (b *ClientHolderBuilder) WithCodecs(codecs ...generic.Codec[*workv1.ManifestWork]) *ClientHolderBuilder { + b.codecs = codecs + return b +} + +// WithInformerConfig set the ManifestWorkInformer configs. If the resync time is not set, the default time (10 minutes) +// will be used when building the ManifestWorkInformer. +func (b *ClientHolderBuilder) WithInformerConfig( + resyncTime time.Duration, options ...workinformers.SharedInformerOption) *ClientHolderBuilder { + b.informerResyncTime = resyncTime + b.informerOptions = options + return b +} + +// NewClientHolder returns a ClientHolder for works. +func (b *ClientHolderBuilder) NewClientHolder(ctx context.Context) (*ClientHolder, error) { + switch config := b.config.(type) { + case *rest.Config: + kubeWorkClientSet, err := workclientset.NewForConfig(config) + if err != nil { + return nil, err + } + + factory := workinformers.NewSharedInformerFactoryWithOptions(kubeWorkClientSet, b.informerResyncTime, b.informerOptions...) + + return &ClientHolder{ + workClient: kubeWorkClientSet.WorkV1(), + manifestWorkInformer: factory.Work().V1().ManifestWorks(), + }, nil + case *mqtt.MQTTOptions: + if len(b.clusterName) != 0 { + return b.newAgentClients(ctx, config) + } + + //TODO build manifestwork clients for source + return nil, nil + default: + return nil, fmt.Errorf("unsupported client configuration type %T", config) + } +} + +func (b *ClientHolderBuilder) newAgentClients(ctx context.Context, config *mqtt.MQTTOptions) (*ClientHolder, error) { + workLister := &ManifestWorkLister{} + watcher := watcher.NewManifestWorkWatcher() + agentOptions := mqtt.NewAgentOptions(config, b.clusterName, b.clientID) + cloudEventsClient, err := generic.NewCloudEventAgentClient[*workv1.ManifestWork]( + ctx, + agentOptions, + workLister, + ManifestWorkStatusHash, + b.codecs..., + ) + if err != nil { + return nil, err + } + + manifestWorkClient := agentclient.NewManifestWorkAgentClient(cloudEventsClient, watcher) + workClient := &internal.WorkV1ClientWrapper{ManifestWorkClient: manifestWorkClient} + workClientSet := &internal.WorkClientSetWrapper{WorkV1ClientWrapper: workClient} + factory := workinformers.NewSharedInformerFactoryWithOptions(workClientSet, b.informerResyncTime, b.informerOptions...) + informers := factory.Work().V1().ManifestWorks() + manifestWorkLister := informers.Lister() + namespacedLister := manifestWorkLister.ManifestWorks(b.clusterName) + + // Set informer lister back to work lister and client. + workLister.Lister = manifestWorkLister + // TODO the work client and informer share a same store in the current implementation, ideally, the store should be + // only written from the server. we may need to revisit the implementation in the future. + manifestWorkClient.SetLister(namespacedLister) + + cloudEventsClient.Subscribe(ctx, agenthandler.NewManifestWorkAgentHandler(namespacedLister, watcher)) + + return &ClientHolder{ + workClient: workClient, + manifestWorkInformer: informers, + }, nil +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/internal/clientset.go b/vendor/open-cluster-management.io/api/cloudevents/work/internal/clientset.go new file mode 100644 index 000000000..9b629d83f --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/internal/clientset.go @@ -0,0 +1,50 @@ +package internal + +import ( + discovery "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + + workclientset "open-cluster-management.io/api/client/work/clientset/versioned" + workv1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1" + workv1alpha1client "open-cluster-management.io/api/client/work/clientset/versioned/typed/work/v1alpha1" +) + +// WorkClientSetWrapper wraps a work client that has a manifestwork client to a work clientset interface, this wrapper +// will helps us to build manifestwork informer factory easily. +type WorkClientSetWrapper struct { + WorkV1ClientWrapper *WorkV1ClientWrapper +} + +var _ workclientset.Interface = &WorkClientSetWrapper{} + +func (c *WorkClientSetWrapper) WorkV1() workv1client.WorkV1Interface { + return c.WorkV1ClientWrapper +} + +func (c *WorkClientSetWrapper) WorkV1alpha1() workv1alpha1client.WorkV1alpha1Interface { + return nil +} + +func (c *WorkClientSetWrapper) Discovery() discovery.DiscoveryInterface { + return nil +} + +// WorkV1ClientWrapper wraps a manifestwork client to a WorkV1Interface +type WorkV1ClientWrapper struct { + ManifestWorkClient workv1client.ManifestWorkInterface +} + +var _ workv1client.WorkV1Interface = &WorkV1ClientWrapper{} + +func (c *WorkV1ClientWrapper) ManifestWorks(namespace string) workv1client.ManifestWorkInterface { + // TODO if the ManifestWorkClient is ManifestWorkSourceClient, we need set namespace here + return c.ManifestWorkClient +} + +func (c *WorkV1ClientWrapper) AppliedManifestWorks() workv1client.AppliedManifestWorkInterface { + return nil +} + +func (c *WorkV1ClientWrapper) RESTClient() rest.Interface { + return nil +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/lister.go b/vendor/open-cluster-management.io/api/cloudevents/work/lister.go new file mode 100644 index 000000000..1f1fbda70 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/lister.go @@ -0,0 +1,19 @@ +package work + +import ( + "k8s.io/apimachinery/pkg/labels" + + workv1lister "open-cluster-management.io/api/client/work/listers/work/v1" + "open-cluster-management.io/api/cloudevents/generic/types" + workv1 "open-cluster-management.io/api/work/v1" +) + +// ManifestWorkLister list the ManifestWorks from a ManifestWorkInformer's local cache. +type ManifestWorkLister struct { + Lister workv1lister.ManifestWorkLister +} + +// List returns the ManifestWorks from a ManifestWorkInformer's local cache. +func (l *ManifestWorkLister) List(options types.ListOptions) ([]*workv1.ManifestWork, error) { + return l.Lister.ManifestWorks(options.ClusterName).List(labels.Everything()) +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/payload/mainfiest.go b/vendor/open-cluster-management.io/api/cloudevents/work/payload/mainfiest.go new file mode 100644 index 000000000..c6ae992fe --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/payload/mainfiest.go @@ -0,0 +1,54 @@ +package payload + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "open-cluster-management.io/api/cloudevents/generic/types" + workv1 "open-cluster-management.io/api/work/v1" +) + +var ManifestEventDataType = types.CloudEventsDataType{ + Group: "io.open-cluster-management.works", + Version: "v1alpha1", + Resource: "manifests", +} + +// Manifest represents the data in a cloudevent, it contains a single manifest. +type Manifest struct { + // Manifest represents a resource to be deployed on managed cluster. + Manifest unstructured.Unstructured `json:"manifest"` + + // DeleteOption represents deletion strategy when this manifest is deleted. + DeleteOption *workv1.DeleteOption `json:"deleteOption,omitempty"` + + // ConfigOption represents the configuration of this manifest. + ConfigOption *ManifestConfigOption `json:"configOption,omitempty"` +} + +// ManifestStatus represents the data in a cloudevent, it contains the status of a SingleManifest on a managed +// cluster. +type ManifestStatus struct { + // Conditions contains the different condition statuses for a SingleManifest on a managed cluster. + // Valid condition types are: + // 1. Applied represents the manifest of a SingleManifest is applied successfully on a managed cluster. + // 2. Progressing represents the manifest of a SingleManifest is being applied on a managed cluster. + // 3. Available represents the manifest of a SingleManifest exists on the managed cluster. + // 4. Degraded represents the current state of manifest of a SingleManifest does not match the desired state for a + // certain period. + // 5. Deleted represents the manifests of a SingleManifest is deleted from a managed cluster. + Conditions []metav1.Condition `json:"conditions"` + + // Status represents the conditions of this manifest on a managed cluster. + Status *workv1.ManifestCondition `json:"status,omitempty"` +} + +type ManifestConfigOption struct { + // FeedbackRules defines what resource status field should be returned. + // If it is not set or empty, no feedback rules will be honored. + FeedbackRules []workv1.FeedbackRule `json:"feedbackRules,omitempty"` + + // UpdateStrategy defines the strategy to update this manifest. + // UpdateStrategy is Update if it is not set. + UpdateStrategy *workv1.UpdateStrategy `json:"updateStrategy,omitempty"` +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/payload/manifestbundle.go b/vendor/open-cluster-management.io/api/cloudevents/work/payload/manifestbundle.go new file mode 100644 index 000000000..60f63fa9a --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/payload/manifestbundle.go @@ -0,0 +1,43 @@ +package payload + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "open-cluster-management.io/api/cloudevents/generic/types" + workv1 "open-cluster-management.io/api/work/v1" +) + +var ManifestBundleEventDataType = types.CloudEventsDataType{ + Group: "io.open-cluster-management.works", + Version: "v1alpha1", + Resource: "manifestbundles", +} + +// ManifestBundle represents the data in a cloudevent, it contains a bundle of manifests. +type ManifestBundle struct { + // Manifests represents a list of Kuberenetes resources to be deployed on a managed cluster. + Manifests []workv1.Manifest `json:"manifests"` + + // DeleteOption represents deletion strategy when the manifests are deleted. + DeleteOption *workv1.DeleteOption `json:"deleteOption,omitempty"` + + // ManifestConfigs represents the configurations of manifests. + ManifestConfigs []workv1.ManifestConfigOption `json:"manifestConfigs,omitempty"` +} + +// ManifestBundleStatus represents the data in a cloudevent, it contains the status of a ManifestBundle on a managed +// cluster. +type ManifestBundleStatus struct { + // Conditions contains the different condition statuses for a ManifestBundle on managed cluster. + // Valid condition types are: + // 1. Applied represents the manifests in a ManifestBundle are applied successfully on a managed cluster. + // 2. Progressing represents the manifests in a ManifestBundle are being applied on a managed cluster. + // 3. Available represents the manifests in a ManifestBundle exist on a managed cluster. + // 4. Degraded represents the current state of manifests in a ManifestBundle do not match the desired state for a + // certain period. + // 5. Deleted represents the manifests in a ManifestBundle are deleted from a managed cluster. + Conditions []metav1.Condition `json:"conditions"` + + // ManifestResourceStatus represents the status of each resource in manifest work deployed on managed cluster. + ResourceStatus []workv1.ManifestCondition `json:"resourceStatus,omitempty"` +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/statushash.go b/vendor/open-cluster-management.io/api/cloudevents/work/statushash.go new file mode 100644 index 000000000..642f78609 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/statushash.go @@ -0,0 +1,18 @@ +package work + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + + workv1 "open-cluster-management.io/api/work/v1" +) + +// ManifestWorkStatusHash returns the SHA256 checksum of a ManifestWork status. +func ManifestWorkStatusHash(work *workv1.ManifestWork) (string, error) { + statusBytes, err := json.Marshal(work.Status) + if err != nil { + return "", fmt.Errorf("failed to marshal work status, %v", err) + } + return fmt.Sprintf("%x", sha256.Sum256(statusBytes)), nil +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/utils/utils.go b/vendor/open-cluster-management.io/api/cloudevents/work/utils/utils.go new file mode 100644 index 000000000..7a4afcc72 --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/utils/utils.go @@ -0,0 +1,47 @@ +package utils + +import ( + "encoding/json" + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/types" + workv1 "open-cluster-management.io/api/work/v1" +) + +// Patch applies the patch to a work with the patch type. +func Patch(patchType types.PatchType, work *workv1.ManifestWork, patchData []byte) (*workv1.ManifestWork, error) { + workData, err := json.Marshal(work) + if err != nil { + return nil, err + } + + var patchedData []byte + switch patchType { + case types.JSONPatchType: + var patchObj jsonpatch.Patch + patchObj, err = jsonpatch.DecodePatch(patchData) + if err != nil { + return nil, err + } + patchedData, err = patchObj.Apply(workData) + if err != nil { + return nil, err + } + + case types.MergePatchType: + patchedData, err = jsonpatch.MergePatch(workData, patchData) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unsupported patch type: %s", patchType) + } + + patchedWork := &workv1.ManifestWork{} + if err := json.Unmarshal(patchedData, patchedWork); err != nil { + return nil, err + } + + return patchedWork, nil +} diff --git a/vendor/open-cluster-management.io/api/cloudevents/work/watcher/watcher.go b/vendor/open-cluster-management.io/api/cloudevents/work/watcher/watcher.go new file mode 100644 index 000000000..d22a7512a --- /dev/null +++ b/vendor/open-cluster-management.io/api/cloudevents/work/watcher/watcher.go @@ -0,0 +1,64 @@ +package watcher + +import ( + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog/v2" +) + +// ManifestWorkWatcher implements the watch.Interface. It returns a chan which will receive all the events. +type ManifestWorkWatcher struct { + sync.Mutex + + result chan watch.Event + done chan struct{} +} + +var _ watch.Interface = &ManifestWorkWatcher{} + +func NewManifestWorkWatcher() *ManifestWorkWatcher { + mw := &ManifestWorkWatcher{ + // It's easy for a consumer to add buffering via an extra + // goroutine/channel, but impossible for them to remove it, + // so nonbuffered is better. + result: make(chan watch.Event), + // If the watcher is externally stopped there is no receiver anymore + // and the send operations on the result channel, especially the + // error reporting might block forever. + // Therefore a dedicated stop channel is used to resolve this blocking. + done: make(chan struct{}), + } + + return mw +} + +// ResultChan implements Interface. +func (mw *ManifestWorkWatcher) ResultChan() <-chan watch.Event { + return mw.result +} + +// Stop implements Interface. +func (mw *ManifestWorkWatcher) Stop() { + // Call Close() exactly once by locking and setting a flag. + mw.Lock() + defer mw.Unlock() + // closing a closed channel always panics, therefore check before closing + select { + case <-mw.done: + close(mw.result) + default: + close(mw.done) + } +} + +// Receive a event from the work client and sends down the result channel. +func (mw *ManifestWorkWatcher) Receive(evt watch.Event) { + if klog.V(4).Enabled() { + obj, _ := meta.Accessor(evt.Object) + klog.V(4).Infof("Receive the event %v for %v", evt.Type, obj.GetName()) + } + + mw.result <- evt +} diff --git a/vendor/open-cluster-management.io/api/utils/work/v1/utils/utils.go b/vendor/open-cluster-management.io/api/utils/work/v1/utils/utils.go new file mode 100644 index 000000000..32b4557c1 --- /dev/null +++ b/vendor/open-cluster-management.io/api/utils/work/v1/utils/utils.go @@ -0,0 +1,76 @@ +package utils + +import ( + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/scheme" + + workv1 "open-cluster-management.io/api/work/v1" +) + +var genericScheme = runtime.NewScheme() + +// BuildResourceMeta builds manifest resource meta for the object +func BuildResourceMeta( + index int, + object runtime.Object, + restMapper meta.RESTMapper) (workv1.ManifestResourceMeta, schema.GroupVersionResource, error) { + resourceMeta := workv1.ManifestResourceMeta{ + Ordinal: int32(index), + } + + if object == nil || reflect.ValueOf(object).IsNil() { + return resourceMeta, schema.GroupVersionResource{}, nil + } + + // set gvk + gvk, err := GuessObjectGroupVersionKind(object) + if err != nil { + return resourceMeta, schema.GroupVersionResource{}, err + } + resourceMeta.Group = gvk.Group + resourceMeta.Version = gvk.Version + resourceMeta.Kind = gvk.Kind + + // set namespace/name + if accessor, e := meta.Accessor(object); e != nil { + err = fmt.Errorf("cannot access metadata of %v: %w", object, e) + } else { + resourceMeta.Namespace = accessor.GetNamespace() + resourceMeta.Name = accessor.GetName() + } + + // set resource + if restMapper == nil { + return resourceMeta, schema.GroupVersionResource{}, err + } + mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return resourceMeta, schema.GroupVersionResource{}, fmt.Errorf("the server doesn't have a resource type %q", gvk.Kind) + } + + resourceMeta.Resource = mapping.Resource.Resource + return resourceMeta, mapping.Resource, err +} + +// GuessObjectGroupVersionKind returns GVK for the passed runtime object. +func GuessObjectGroupVersionKind(object runtime.Object) (*schema.GroupVersionKind, error) { + if gvk := object.GetObjectKind().GroupVersionKind(); len(gvk.Kind) > 0 { + return &gvk, nil + } + + if kinds, _, _ := scheme.Scheme.ObjectKinds(object); len(kinds) > 0 { + return &kinds[0], nil + } + + // otherwise fall back to genericScheme + if kinds, _, _ := genericScheme.ObjectKinds(object); len(kinds) > 0 { + return &kinds[0], nil + } + + return nil, fmt.Errorf("cannot get gvk of %v", object) +} diff --git a/vendor/open-cluster-management.io/api/utils/work/v1/workvalidator/validator.go b/vendor/open-cluster-management.io/api/utils/work/v1/workvalidator/validator.go new file mode 100644 index 000000000..04559069c --- /dev/null +++ b/vendor/open-cluster-management.io/api/utils/work/v1/workvalidator/validator.go @@ -0,0 +1,64 @@ +package workvalidator + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + workv1 "open-cluster-management.io/api/work/v1" +) + +type Validator struct { + limit int +} + +var ManifestValidator = &Validator{limit: 500 * 1024} // the default manifest limit is 500k. + +func (m *Validator) WithLimit(limit int) { + m.limit = limit +} + +func (m *Validator) ValidateManifests(manifests []workv1.Manifest) error { + if len(manifests) == 0 { + return errors.NewBadRequest("Workload manifests should not be empty") + } + + totalSize := 0 + for _, manifest := range manifests { + totalSize = totalSize + manifest.Size() + } + + if totalSize > m.limit { + return fmt.Errorf("the size of manifests is %v bytes which exceeds the %v limit", totalSize, m.limit) + } + + for _, manifest := range manifests { + err := validateManifest(manifest.Raw) + if err != nil { + return err + } + } + + return nil +} + +func validateManifest(manifest []byte) error { + // If the manifest cannot be decoded, return err + unstructuredObj := &unstructured.Unstructured{} + err := unstructuredObj.UnmarshalJSON(manifest) + if err != nil { + return err + } + + // The object must have name specified, generateName is not allowed in manifestwork + if unstructuredObj.GetName() == "" { + return fmt.Errorf("name must be set in manifest") + } + + if unstructuredObj.GetGenerateName() != "" { + return fmt.Errorf("generateName must not be set in manifest") + } + + return nil +}