diff --git a/go.mod b/go.mod index 75c9ba6..f03b59a 100644 --- a/go.mod +++ b/go.mod @@ -4,11 +4,12 @@ go 1.14 require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/pborman/uuid v1.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71 github.com/stretchr/testify v1.1.4-0.20160524234229-8d64eb7173c7 github.com/taskcluster/pulse-go v1.0.0 + github.com/taskcluster/taskcluster/clients/client-go/v23 v23.0.0 github.com/urfave/cli v1.17.1-0.20160608151511-fa949b48f384 go.mozilla.org/mozlog v0.0.0-20160610165107-cd74695caf44 + launchpad.net/gocheck v0.0.0-20140225173054-000000000087 // indirect ) diff --git a/go.sum b/go.sum index 12d81a3..b01f843 100644 --- a/go.sum +++ b/go.sum @@ -1,18 +1,50 @@ +github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71 h1:2MR0pKUzlP3SGgj5NYJe/zRYDwOu9ku6YHy+Iw7l5DM= github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/stretchr/testify v1.1.4-0.20160524234229-8d64eb7173c7 h1:5KNW+VDCZxdFJasHPW1AehzhTIm69pHQq2psutjjXRk= github.com/stretchr/testify v1.1.4-0.20160524234229-8d64eb7173c7/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/taskcluster/httpbackoff/v3 v3.0.0 h1:Zh2BCW2iA3fzBBuZo2E4MvwyPSB6aimyI4EreeK3TRM= +github.com/taskcluster/httpbackoff/v3 v3.0.0/go.mod h1:99ubellEC0fmRj7wnGkPftT2xfCY7NmbjT3gzn2ZPUM= +github.com/taskcluster/jsonschema2go v1.0.0 h1:ZEDj2NKh8Sceq36zyLhSV6ann/aNXKZIe9cAXq7CDdk= +github.com/taskcluster/jsonschema2go v1.0.0/go.mod h1:jhsT3XPj3iLNRx0efJVfFzZBZgxeYE7IHfZAai8wuKQ= github.com/taskcluster/pulse-go v1.0.0 h1:ys4ZUNp5TYiV5LSMxge4YF/AtuBUNH9StAA/bkno+r0= github.com/taskcluster/pulse-go v1.0.0/go.mod h1:uuaqnRQj9XqouabCEKjnrlJiC6UT9Gurx2oSe6s+irM= +github.com/taskcluster/slugid-go v1.1.0 h1:SWsUplliyamdYzOKVM4+lDohZKuL63fKreGkvIKJ9aI= +github.com/taskcluster/slugid-go v1.1.0/go.mod h1:5sOAcPHjqso1UkKxSl77CkKgOwha0D9X0msBKBj0AOg= +github.com/taskcluster/taskcluster-base-go v1.0.0/go.mod h1:ByyzyqqufsfZTrAHUw+0Grp8FwZAizZOKzVE1IpDXxQ= +github.com/taskcluster/taskcluster-lib-urls v12.0.0+incompatible h1:57WLzh7B04y6ahTOJ8wjvdkbwYqnyJkwLXQ1Tu4E/DU= +github.com/taskcluster/taskcluster-lib-urls v12.0.0+incompatible/go.mod h1:ALqTgi15AmJGEGubRKM0ydlLAFatlQPrQrmal9YZpQs= +github.com/taskcluster/taskcluster/clients/client-go/v23 v23.0.0 h1:QQDQUy8zHLdfMF+EQEkA+NRGfjdykhHxNcPMakr+sm8= +github.com/taskcluster/taskcluster/clients/client-go/v23 v23.0.0/go.mod h1:5NZbdHaaF2/yjkHtlcEKm4AXs3EH3M5Kih0wAEGRlMs= +github.com/tent/hawk-go v0.0.0-20161026210932-d341ea318957 h1:6Fre/uvwovW5YY4nfHZk66cAg9HjT9YdFSAJHUUgOyQ= +github.com/tent/hawk-go v0.0.0-20161026210932-d341ea318957/go.mod h1:dch7ywQEefE1ibFqBG1erFibrdUIwovcwQjksYuHuP4= github.com/urfave/cli v1.17.1-0.20160608151511-fa949b48f384 h1:nYZrQfefkHIWNPwwIToQOnTeZ5nQt4nUHiS4wHQBWSA= github.com/urfave/cli v1.17.1-0.20160608151511-fa949b48f384/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= go.mozilla.org/mozlog v0.0.0-20160610165107-cd74695caf44 h1:ZwI2d4V8jjame21x82EXAGuKKStymcxBsNt0hkN4/5E= go.mozilla.org/mozlog v0.0.0-20160610165107-cd74695caf44/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= diff --git a/main.go b/main.go index 853e5da..29c00c8 100644 --- a/main.go +++ b/main.go @@ -77,6 +77,24 @@ func main() { Value: "hgmo", EnvVar: "HGMO_PULSE_QUEUE", }, + cli.StringFlag{ + Name: "cloudops-deploy-pulse-prefix", + Usage: "Pulse route to listen to.", + Value: "cloudops.deploy.v1", + EnvVar: "CLOUDOPS_DEPLOY_PULSE_PREFIX", + }, + cli.StringFlag{ + Name: "cloudops-deploy-pulse-queue", + Usage: "Pulse queue to listen to.", + Value: "deploy-proxy", + EnvVar: "CLOUDOPS_DEPLOY_PULSE_QUEUE", + }, + cli.StringFlag{ + Name: "taskcluster-root-url", + Usage: "Taskcluster root URL", + Value: "https://firefox-ci-tc.services.mozilla.com", + EnvVar: "TASKCLUSTER_ROOT_URL", + }, } app.Action = func(c *cli.Context) error { @@ -111,6 +129,18 @@ func main() { c.String("pulse-host"), ) + taskclusterPulseHandler := proxyservice.NewTaskclusterPulseHandler( + jenkins, + &pulse, + c.String("cloudops-deploy-pulse-queue"), + c.String("cloudops-deploy-pulse-prefix"), + c.String("taskcluster-root-url"), + ) + + if err := taskclusterPulseHandler.Consume(); err != nil { + return cli.NewExitError(fmt.Sprintf("Could not listen to taskcluster pulse: %v", err), 1) + } + hgmoPulseHandler := proxyservice.NewHgmoPulseHandler( jenkins, &pulse, diff --git a/proxyservice/jenkins.go b/proxyservice/jenkins.go index 7aa382a..3433ee2 100644 --- a/proxyservice/jenkins.go +++ b/proxyservice/jenkins.go @@ -138,3 +138,25 @@ func TriggerHgJob(j Jenkins, repoPath string, repoUrl string, head string, data params.Set("RawJSON", string(rawJSON)) return j.TriggerJob(path, params) } + +func TriggerTaskclusterJob(j Jenkins, route string, deploy DeployExtra, data *TaskCompletedMessage) error { + // FIXME: This should probably be split on . + if !regexp.MustCompile(`^[a-zA-Z0-9_\-]{2,255}$`).MatchString(route) { + return fmt.Errorf("Invalid data.Repository.Namespace: %s", route) + } + + rawJSON, err := json.Marshal(data) + if err != nil { + return fmt.Errorf("Error marshaling data: %v", err) + } + path := path.Join("/job/taskcluster/job", + route, "job", route) + params := url.Values{} + params.Set("IMAGE_TASK_ID", deploy.ImageTaskId) + if deploy.Variant != "" { + params.Set("VARIANT", deploy.Variant) + } + params.Set("RawJSON", string(rawJSON)) + return j.TriggerJob(path, params) +} + diff --git a/proxyservice/taskcluster_handler.go b/proxyservice/taskcluster_handler.go new file mode 100644 index 0000000..81e2364 --- /dev/null +++ b/proxyservice/taskcluster_handler.go @@ -0,0 +1,98 @@ +package proxyservice + +import ( + "encoding/json" + "fmt" + "log" + "strings" + + "github.com/streadway/amqp" + "github.com/taskcluster/pulse-go/pulse" + "github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueue" + "github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueueevents" +) + +type TaskCompletedMessage = tcqueueevents.TaskCompletedMessage + +type routeTaskCompleted struct { + Route string +} + +func (binding routeTaskCompleted) RoutingKey() string { + return fmt.Sprintf("route.%s", binding.Route) +} + +func (binding routeTaskCompleted) ExchangeName() string { + return "exchange/taskcluster-queue/v1/task-completed" +} + +func (binding routeTaskCompleted) NewPayloadObject() interface{} { + return new(tcqueueevents.TaskCompletedMessage) +} + +type DeployExtra struct { + ImageTaskId string `json:"image-task-id"` + Variant string `json:"variant"` +} + +func ParseExtra(extra *json.RawMessage) (DeployExtra, error) { + var raw struct { + Deploy DeployExtra `json:"cloudops-deploy"` + } + if err := json.Unmarshal(*extra, &raw); err != nil { + return DeployExtra{}, err + } + return raw.Deploy, nil +} + +type TaskclusterPulseHandler struct { + Jenkins Jenkins + Pulse *pulse.Connection + QueueName string + PulseRoutePrefix string + TcQueue *tcqueue.Queue +} + +func NewTaskclusterPulseHandler(jenkins Jenkins, pulse *pulse.Connection, queueName string, routePrefix string, taskclusterRootUrl string) *TaskclusterPulseHandler { + return &TaskclusterPulseHandler{ + Jenkins: jenkins, + Pulse: pulse, + QueueName: queueName, + PulseRoutePrefix: routePrefix, + TcQueue: tcqueue.New(nil, taskclusterRootUrl), + } +} + +func (handler *TaskclusterPulseHandler) handleMessage(message interface{}, delivery amqp.Delivery) { + routingKeyPrefix := "route." + handler.PulseRoutePrefix + switch t := message.(type) { + case *tcqueueevents.TaskCompletedMessage: + if strings.HasPrefix(delivery.RoutingKey, routingKeyPrefix) { + route := strings.TrimPrefix(delivery.RoutingKey, routingKeyPrefix) + task, err := handler.TcQueue.Task(t.Status.TaskID) + if err == nil { + log.Printf("Error triggering taskcluster job: %s", err) + } + deploy, err := ParseExtra(&task.Extra) + if err == nil { + log.Printf("Error triggering taskcluster job: %s", err) + } + if err := TriggerTaskclusterJob(handler.Jenkins, route, deploy, t); err != nil { + log.Printf("Error triggering taskcluster job: %s", err) + } + } + } + delivery.Ack(false) // acknowledge message *after* processing + +} + +func (handler *TaskclusterPulseHandler) Consume() error { + _, err := handler.Pulse.Consume( + handler.QueueName, + handler.handleMessage, + 1, // prefetch 1 message at a time + false, // don't autoacknowledge messages + routeTaskCompleted{Route: handler.PulseRoutePrefix + ".#"}, + ) + return err +} diff --git a/vendor/github.com/cenkalti/backoff/v3/.gitignore b/vendor/github.com/cenkalti/backoff/v3/.gitignore new file mode 100644 index 0000000..0026861 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/cenkalti/backoff/v3/.travis.yml b/vendor/github.com/cenkalti/backoff/v3/.travis.yml new file mode 100644 index 0000000..47a6a46 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/.travis.yml @@ -0,0 +1,10 @@ +language: go +go: + - 1.7 + - 1.x + - tip +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/cenkalti/backoff/v3/LICENSE b/vendor/github.com/cenkalti/backoff/v3/LICENSE new file mode 100644 index 0000000..89b8179 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/v3/README.md b/vendor/github.com/cenkalti/backoff/v3/README.md new file mode 100644 index 0000000..55ebc98 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/README.md @@ -0,0 +1,30 @@ +# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] + +This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. + +[Exponential backoff][exponential backoff wiki] +is an algorithm that uses feedback to multiplicatively decrease the rate of some process, +in order to gradually find an acceptable rate. +The retries exponentially increase and stop increasing when a certain threshold is met. + +## Usage + +See https://godoc.org/github.com/cenkalti/backoff#pkg-examples + +## Contributing + +* I would like to keep this library as small as possible. +* Please don't send a PR without opening an issue and discussing it first. +* If proposed change is not a common use case, I will probably not accept it. + +[godoc]: https://godoc.org/github.com/cenkalti/backoff +[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png +[travis]: https://travis-ci.org/cenkalti/backoff +[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master +[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master +[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master + +[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java +[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff + +[advanced example]: https://godoc.org/github.com/cenkalti/backoff#example_ diff --git a/vendor/github.com/cenkalti/backoff/v3/backoff.go b/vendor/github.com/cenkalti/backoff/v3/backoff.go new file mode 100644 index 0000000..3676ee4 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/backoff.go @@ -0,0 +1,66 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Use Retry function for retrying operations that may fail. +// If Retry does not meet your needs, +// copy/paste the function into your project and modify as you wish. +// +// There is also Ticker type similar to time.Ticker. +// You can use it if you need to work with channels. +// +// See Examples section below for usage examples. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // or backoff. Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff(); + // if (duration == backoff.Stop) { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Stop indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/vendor/github.com/cenkalti/backoff/v3/context.go b/vendor/github.com/cenkalti/backoff/v3/context.go new file mode 100644 index 0000000..7706faa --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/context.go @@ -0,0 +1,63 @@ +package backoff + +import ( + "context" + "time" +) + +// BackOffContext is a backoff policy that stops retrying after the context +// is canceled. +type BackOffContext interface { + BackOff + Context() context.Context +} + +type backOffContext struct { + BackOff + ctx context.Context +} + +// WithContext returns a BackOffContext with context ctx +// +// ctx must not be nil +func WithContext(b BackOff, ctx context.Context) BackOffContext { + if ctx == nil { + panic("nil context") + } + + if b, ok := b.(*backOffContext); ok { + return &backOffContext{ + BackOff: b.BackOff, + ctx: ctx, + } + } + + return &backOffContext{ + BackOff: b, + ctx: ctx, + } +} + +func ensureContext(b BackOff) BackOffContext { + if cb, ok := b.(BackOffContext); ok { + return cb + } + return WithContext(b, context.Background()) +} + +func (b *backOffContext) Context() context.Context { + return b.ctx +} + +func (b *backOffContext) NextBackOff() time.Duration { + select { + case <-b.ctx.Done(): + return Stop + default: + } + next := b.BackOff.NextBackOff() + if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next { + return Stop + } + return next +} diff --git a/vendor/github.com/cenkalti/backoff/v3/exponential.go b/vendor/github.com/cenkalti/backoff/v3/exponential.go new file mode 100644 index 0000000..a031a65 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/exponential.go @@ -0,0 +1,153 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + // After MaxElapsedTime the ExponentialBackOff stops. + // It never stops if MaxElapsedTime == 0. + MaxElapsedTime time.Duration + Clock Clock + + currentInterval time.Duration + startTime time.Time +} + +// Clock is an interface that returns current time for BackOff. +type Clock interface { + Now() time.Time +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second + DefaultMaxElapsedTime = 15 * time.Minute +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + b := &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + MaxElapsedTime: DefaultMaxElapsedTime, + Clock: SystemClock, + } + b.Reset() + return b +} + +type systemClock struct{} + +func (t systemClock) Now() time.Time { + return time.Now() +} + +// SystemClock implements Clock interface that uses time.Now(). +var SystemClock = systemClock{} + +// Reset the interval back to the initial retry interval and restarts the timer. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval + b.startTime = b.Clock.Now() +} + +// NextBackOff calculates the next backoff interval using the formula: +// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + // Make sure we have not gone over the maximum elapsed time. + if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime { + return Stop + } + defer b.incrementCurrentInterval() + return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) +} + +// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance +// is created and is reset when Reset() is called. +// +// The elapsed time is computed using time.Now().UnixNano(). It is +// safe to call even while the backoff policy is used by a running +// ticker. +func (b *ExponentialBackOff) GetElapsedTime() time.Duration { + return b.Clock.Now().Sub(b.startTime) +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/v3/go.mod b/vendor/github.com/cenkalti/backoff/v3/go.mod new file mode 100644 index 0000000..479e62a --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/go.mod @@ -0,0 +1,3 @@ +module github.com/cenkalti/backoff/v3 + +go 1.12 diff --git a/vendor/github.com/cenkalti/backoff/v3/retry.go b/vendor/github.com/cenkalti/backoff/v3/retry.go new file mode 100644 index 0000000..e936a50 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/retry.go @@ -0,0 +1,82 @@ +package backoff + +import "time" + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the operation o until it does not return error or BackOff stops. +// o is guaranteed to be run at least once. +// +// If o returns a *PermanentError, the operation is not retried, and the +// wrapped error is returned. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) } + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b BackOff, notify Notify) error { + var err error + var next time.Duration + var t *time.Timer + + cb := ensureContext(b) + + b.Reset() + for { + if err = operation(); err == nil { + return nil + } + + if permanent, ok := err.(*PermanentError); ok { + return permanent.Err + } + + if next = cb.NextBackOff(); next == Stop { + return err + } + + if notify != nil { + notify(err, next) + } + + if t == nil { + t = time.NewTimer(next) + defer t.Stop() + } else { + t.Reset(next) + } + + select { + case <-cb.Context().Done(): + return err + case <-t.C: + } + } +} + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) *PermanentError { + return &PermanentError{ + Err: err, + } +} diff --git a/vendor/github.com/cenkalti/backoff/v3/ticker.go b/vendor/github.com/cenkalti/backoff/v3/ticker.go new file mode 100644 index 0000000..e41084b --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/ticker.go @@ -0,0 +1,82 @@ +package backoff + +import ( + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOffContext + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. +func NewTicker(b BackOff) *Ticker { + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: ensureContext(b), + stop: make(chan struct{}), + } + t.b.Reset() + go t.run() + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + case <-t.b.Context().Done(): + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + return time.After(next) +} diff --git a/vendor/github.com/cenkalti/backoff/v3/tries.go b/vendor/github.com/cenkalti/backoff/v3/tries.go new file mode 100644 index 0000000..cfeefd9 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/tries.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +/* +WithMaxRetries creates a wrapper around another BackOff, which will +return Stop if NextBackOff() has been called too many times since +the last time Reset() was called + +Note: Implementation is not thread-safe. +*/ +func WithMaxRetries(b BackOff, max uint64) BackOff { + return &backOffTries{delegate: b, maxTries: max} +} + +type backOffTries struct { + delegate BackOff + maxTries uint64 + numTries uint64 +} + +func (b *backOffTries) NextBackOff() time.Duration { + if b.maxTries > 0 { + if b.maxTries <= b.numTries { + return Stop + } + b.numTries++ + } + return b.delegate.NextBackOff() +} + +func (b *backOffTries) Reset() { + b.numTries = 0 + b.delegate.Reset() +} diff --git a/vendor/github.com/fatih/camelcase/.travis.yml b/vendor/github.com/fatih/camelcase/.travis.yml new file mode 100644 index 0000000..3489e38 --- /dev/null +++ b/vendor/github.com/fatih/camelcase/.travis.yml @@ -0,0 +1,3 @@ +language: go +go: 1.x + diff --git a/vendor/github.com/fatih/camelcase/LICENSE.md b/vendor/github.com/fatih/camelcase/LICENSE.md new file mode 100644 index 0000000..aa4a536 --- /dev/null +++ b/vendor/github.com/fatih/camelcase/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2015 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/camelcase/README.md b/vendor/github.com/fatih/camelcase/README.md new file mode 100644 index 0000000..105a6ae --- /dev/null +++ b/vendor/github.com/fatih/camelcase/README.md @@ -0,0 +1,58 @@ +# CamelCase [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/camelcase) [![Build Status](http://img.shields.io/travis/fatih/camelcase.svg?style=flat-square)](https://travis-ci.org/fatih/camelcase) + +CamelCase is a Golang (Go) package to split the words of a camelcase type +string into a slice of words. It can be used to convert a camelcase word (lower +or upper case) into any type of word. + +## Splitting rules: + +1. If string is not valid UTF-8, return it without splitting as + single item array. +2. Assign all unicode characters into one of 4 sets: lower case + letters, upper case letters, numbers, and all other characters. +3. Iterate through characters of string, introducing splits + between adjacent characters that belong to different sets. +4. Iterate through array of split strings, and if a given string + is upper case: + * if subsequent string is lower case: + * move last character of upper case string to beginning of + lower case string + +## Install + +```bash +go get github.com/fatih/camelcase +``` + +## Usage and examples + +```go +splitted := camelcase.Split("GolangPackage") + +fmt.Println(splitted[0], splitted[1]) // prints: "Golang", "Package" +``` + +Both lower camel case and upper camel case are supported. For more info please +check: [http://en.wikipedia.org/wiki/CamelCase](http://en.wikipedia.org/wiki/CamelCase) + +Below are some example cases: + +``` +"" => [] +"lowercase" => ["lowercase"] +"Class" => ["Class"] +"MyClass" => ["My", "Class"] +"MyC" => ["My", "C"] +"HTML" => ["HTML"] +"PDFLoader" => ["PDF", "Loader"] +"AString" => ["A", "String"] +"SimpleXMLParser" => ["Simple", "XML", "Parser"] +"vimRPCPlugin" => ["vim", "RPC", "Plugin"] +"GL11Version" => ["GL", "11", "Version"] +"99Bottles" => ["99", "Bottles"] +"May5" => ["May", "5"] +"BFG9000" => ["BFG", "9000"] +"BöseÜberraschung" => ["Böse", "Überraschung"] +"Two spaces" => ["Two", " ", "spaces"] +"BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"] +``` diff --git a/vendor/github.com/fatih/camelcase/camelcase.go b/vendor/github.com/fatih/camelcase/camelcase.go new file mode 100644 index 0000000..02160c9 --- /dev/null +++ b/vendor/github.com/fatih/camelcase/camelcase.go @@ -0,0 +1,90 @@ +// Package camelcase is a micro package to split the words of a camelcase type +// string into a slice of words. +package camelcase + +import ( + "unicode" + "unicode/utf8" +) + +// Split splits the camelcase word and returns a list of words. It also +// supports digits. Both lower camel case and upper camel case are supported. +// For more info please check: http://en.wikipedia.org/wiki/CamelCase +// +// Examples +// +// "" => [""] +// "lowercase" => ["lowercase"] +// "Class" => ["Class"] +// "MyClass" => ["My", "Class"] +// "MyC" => ["My", "C"] +// "HTML" => ["HTML"] +// "PDFLoader" => ["PDF", "Loader"] +// "AString" => ["A", "String"] +// "SimpleXMLParser" => ["Simple", "XML", "Parser"] +// "vimRPCPlugin" => ["vim", "RPC", "Plugin"] +// "GL11Version" => ["GL", "11", "Version"] +// "99Bottles" => ["99", "Bottles"] +// "May5" => ["May", "5"] +// "BFG9000" => ["BFG", "9000"] +// "BöseÜberraschung" => ["Böse", "Überraschung"] +// "Two spaces" => ["Two", " ", "spaces"] +// "BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"] +// +// Splitting rules +// +// 1) If string is not valid UTF-8, return it without splitting as +// single item array. +// 2) Assign all unicode characters into one of 4 sets: lower case +// letters, upper case letters, numbers, and all other characters. +// 3) Iterate through characters of string, introducing splits +// between adjacent characters that belong to different sets. +// 4) Iterate through array of split strings, and if a given string +// is upper case: +// if subsequent string is lower case: +// move last character of upper case string to beginning of +// lower case string +func Split(src string) (entries []string) { + // don't split invalid utf8 + if !utf8.ValidString(src) { + return []string{src} + } + entries = []string{} + var runes [][]rune + lastClass := 0 + class := 0 + // split into fields based on class of unicode character + for _, r := range src { + switch true { + case unicode.IsLower(r): + class = 1 + case unicode.IsUpper(r): + class = 2 + case unicode.IsDigit(r): + class = 3 + default: + class = 4 + } + if class == lastClass { + runes[len(runes)-1] = append(runes[len(runes)-1], r) + } else { + runes = append(runes, []rune{r}) + } + lastClass = class + } + // handle upper case -> lower case sequences, e.g. + // "PDFL", "oader" -> "PDF", "Loader" + for i := 0; i < len(runes)-1; i++ { + if unicode.IsUpper(runes[i][0]) && unicode.IsLower(runes[i+1][0]) { + runes[i+1] = append([]rune{runes[i][len(runes[i])-1]}, runes[i+1]...) + runes[i] = runes[i][:len(runes[i])-1] + } + } + // construct []string from results + for _, s := range runes { + if len(s) > 0 { + entries = append(entries, string(s)) + } + } + return +} diff --git a/vendor/github.com/taskcluster/httpbackoff/v3/.gitignore b/vendor/github.com/taskcluster/httpbackoff/v3/.gitignore new file mode 100644 index 0000000..d558e48 --- /dev/null +++ b/vendor/github.com/taskcluster/httpbackoff/v3/.gitignore @@ -0,0 +1,28 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +hack + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# Coverage reports +coverage.report diff --git a/vendor/github.com/taskcluster/httpbackoff/v3/.travis.yml b/vendor/github.com/taskcluster/httpbackoff/v3/.travis.yml new file mode 100644 index 0000000..7bd4a95 --- /dev/null +++ b/vendor/github.com/taskcluster/httpbackoff/v3/.travis.yml @@ -0,0 +1,30 @@ +language: go + +go: + - 1.12 + +before_install: + - go get github.com/axw/gocov/gocov + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover + +script: + - go test -v -coverprofile=coverage.report + +after_script: + - go tool cover -func=coverage.report + - ${HOME}/gopath/bin/goveralls -coverprofile=coverage.report -service=travis-ci + +# currently cannot customise per user fork, see: +# https://github.com/travis-ci/travis-ci/issues/1094 +notifications: + irc: + channels: + - "irc.mozilla.org#taskcluster-bots" + on_success: change + on_failure: always + template: + - "%{repository}#%{build_number} (%{branch} - %{commit} : %{author}): %{message}" + - "Change view : %{compare_url}" + - "Build details : %{build_url}" + - "Commit message : %{commit_message}" diff --git a/vendor/github.com/taskcluster/httpbackoff/v3/CODE_OF_CONDUCT.md b/vendor/github.com/taskcluster/httpbackoff/v3/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..498baa3 --- /dev/null +++ b/vendor/github.com/taskcluster/httpbackoff/v3/CODE_OF_CONDUCT.md @@ -0,0 +1,15 @@ +# Community Participation Guidelines + +This repository is governed by Mozilla's code of conduct and etiquette guidelines. +For more details, please read the +[Mozilla Community Participation Guidelines](https://www.mozilla.org/about/governance/policies/participation/). + +## How to Report +For more information on how to report violations of the Community Participation Guidelines, please read our '[How to Report](https://www.mozilla.org/about/governance/policies/participation/reporting/)' page. + + diff --git a/vendor/github.com/taskcluster/httpbackoff/v3/CONTRIBUTING.md b/vendor/github.com/taskcluster/httpbackoff/v3/CONTRIBUTING.md new file mode 100644 index 0000000..3ab3a82 --- /dev/null +++ b/vendor/github.com/taskcluster/httpbackoff/v3/CONTRIBUTING.md @@ -0,0 +1,28 @@ +# How to Contribute + +We welcome pull requests from everyone. We do expect everyone to adhere to the [Mozilla Community Participation Guidelines][participation]. + +If you're trying to figure out what to work on, here are some places to find suitable projects: +* [Good first bugs][goodfirstbug]: these are scoped to make it easy for first-time contributors to get their feet wet with Taskcluster code. +* [Mentored bugs][bugsahoy]: these are slightly more involved projects that may require insight or guidance from someone on the Taskcluster team. +* [Full list of open issues][issues]: everything else + +If the project you're interested in working on isn't covered by a bug or issue, or you're unsure about how to proceed on an existing issue, it's a good idea to talk to someone on the Taskcluster team before you go too far down a particular path. You can find us in the #taskcluster channel on [Mozilla's IRC server][irc] to discuss. You can also simply add a comment to the issue or bug. + +Once you've found an issue to work on and written a patch, submit a pull request. Some things that will increase the chance that your pull request is accepted: + +* Follow our [best practices][bestpractices]. +* This includes [writing or updating tests][testing]. +* Write a [good commit message][commit]. + +Welcome to the team! + +[participation]: https://www.mozilla.org/en-US/about/governance/policies/participation/ +[issues]: ../../issues +[bugsahoy]: https://www.joshmatthews.net/bugsahoy/?taskcluster=1 +[goodfirstbug]: http://www.joshmatthews.net/bugsahoy/?taskcluster=1&simple=1 +[irc]: https://wiki.mozilla.org/IRC +[bestpractices]: https://docs.taskcluster.net/docs/manual/design/devel/best-practices +[testing]: https://docs.taskcluster.net/docs/manual/design/devel/best-practices/testing +[commit]: https://docs.taskcluster.net/docs/manual/design/devel/best-practices/commits + diff --git a/vendor/github.com/taskcluster/httpbackoff/v3/LICENSE b/vendor/github.com/taskcluster/httpbackoff/v3/LICENSE new file mode 100644 index 0000000..a612ad9 --- /dev/null +++ b/vendor/github.com/taskcluster/httpbackoff/v3/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/taskcluster/httpbackoff/v3/README.md b/vendor/github.com/taskcluster/httpbackoff/v3/README.md new file mode 100644 index 0000000..885a838 --- /dev/null +++ b/vendor/github.com/taskcluster/httpbackoff/v3/README.md @@ -0,0 +1,119 @@ + + +# httpbackoff + +[![Build Status](https://travis-ci.org/taskcluster/httpbackoff.svg?branch=master)](https://travis-ci.org/taskcluster/httpbackoff) +[![GoDoc](https://godoc.org/github.com/taskcluster/httpbackoff?status.svg)](https://godoc.org/github.com/taskcluster/httpbackoff) +[![Coverage Status](https://coveralls.io/repos/taskcluster/httpbackoff/badge.svg?branch=master&service=github)](https://coveralls.io/github/taskcluster/httpbackoff?branch=master) +[![License](https://img.shields.io/badge/license-MPL%202.0-orange.svg)](http://mozilla.org/MPL/2.0) + +Automatic http retries for intermittent failures, with exponential backoff, +based on https://github.com/cenkalti/backoff. + +The reason for a separate library, is that this library handles http status +codes to know whether to retry or not. HTTP codes in range 500-599 are +retried. Connection failures are also retried. Status codes 400-499 are +considered permanent errors and are not retried. + +The Retry function performs the http request and retries if temporary errors +occur. It takes a single parameter as its input - a function to perform the +http request. This function must return `(resp *http.Response, tempError error, +permError error)` where `tempError` must be non-nil if a temporary error occurs +(e.g. dropped connection), and `permError` must be non-nil if an error occurs +that does not warrant retrying the request (e.g. badly formed url). + +For example, the following code that is not using retries: + +```go +res, err := http.Get("http://www.google.com/robots.txt") +``` + +can be rewritten as: + +```go +res, attempts, err := httpbackoff.Retry(func() (*http.Response, error, error) { + resp, err := http.Get("http://www.google.com/robots.txt") + // assume all errors are temporary + return resp, err, nil +}) +``` + +Please note the additional return value `attempts` is an `int` specifying how +many http calls were made (i.e. = 1 if no retries, otherwise > 1). + +The go http package has 9 functions that return `(*http.Reponse, error)` that +can potentially be retried: + +* http://golang.org/pkg/net/http/#Client.Do +* http://golang.org/pkg/net/http/#Client.Get +* http://golang.org/pkg/net/http/#Client.Head +* http://golang.org/pkg/net/http/#Client.Post +* http://golang.org/pkg/net/http/#Client.PostForm +* http://golang.org/pkg/net/http/#Get +* http://golang.org/pkg/net/http/#Head +* http://golang.org/pkg/net/http/#Post +* http://golang.org/pkg/net/http/#PostForm + +To simplify using these functions, 9 utility functions have been written that +wrap these. Therefore you can simplify this example above further with: + +```go +res, _, err := httpbackoff.Get("http://www.google.com/robots.txt") +``` + +## Configuring back off settings + +To use cusom back off settings (for example, in testing, you might want to fail quickly), instead of calling the package functions, you can call methods of HTTPRetryClient with the same name: + +```go +package main + +import ( + "log" + "net/http/httputil" + "time" + + "github.com/cenkalti/backoff" + "github.com/taskcluster/httpbackoff" +) + +func main() { + // Note, you only need to create a client if you want to customise + // the back off settings. In this example, we want to, but if you + // wish to use the reasonable default settings, no need to do this. + retryClient := httpbackoff.Client{ + BackOffSettings: &backoff.ExponentialBackOff{ + InitialInterval: 1 * time.Millisecond, + RandomizationFactor: 0.2, + Multiplier: 1.2, + MaxInterval: 5 * time.Millisecond, + MaxElapsedTime: 20 * time.Millisecond, + Clock: backoff.SystemClock, + }, + } + + res, _, err := retryClient.Get("http://www.google.com/robots.txt") + if err != nil { + log.Fatalf("%s", err) + } + data, err := httputil.DumpResponse(res, true) + if err != nil { + log.Fatalf("%s", err) + } + log.Print(string(data)) +} +``` + +## Testing + +The package has tests, which run in travis. See http://travis-ci.org/taskcluster/httpbackoff. + +## Concurrency + +As far as I am aware, there is nothing in this library that prevents it from +being used concurrently. Please let me know if you find any problems! + +## Contributing +Contributions are welcome. Please fork, and issue a Pull Request back with an +explanation of your changes. Also please include tests for any functional +changes. diff --git a/vendor/github.com/taskcluster/httpbackoff/v3/go.mod b/vendor/github.com/taskcluster/httpbackoff/v3/go.mod new file mode 100644 index 0000000..7b45b0a --- /dev/null +++ b/vendor/github.com/taskcluster/httpbackoff/v3/go.mod @@ -0,0 +1,5 @@ +module github.com/taskcluster/httpbackoff/v3 + +require github.com/cenkalti/backoff/v3 v3.0.0 + +go 1.12 diff --git a/vendor/github.com/taskcluster/httpbackoff/v3/go.sum b/vendor/github.com/taskcluster/httpbackoff/v3/go.sum new file mode 100644 index 0000000..8d1b4d2 --- /dev/null +++ b/vendor/github.com/taskcluster/httpbackoff/v3/go.sum @@ -0,0 +1,4 @@ +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= diff --git a/vendor/github.com/taskcluster/httpbackoff/v3/httpbackoff.go b/vendor/github.com/taskcluster/httpbackoff/v3/httpbackoff.go new file mode 100644 index 0000000..6b7dbe8 --- /dev/null +++ b/vendor/github.com/taskcluster/httpbackoff/v3/httpbackoff.go @@ -0,0 +1,287 @@ +// This package provides exponential backoff support for making HTTP requests. +// +// It uses the github.com/cenkalti/backoff algorithm. +// +// Network failures and HTTP 5xx status codes qualify for retries. +// +// HTTP calls that return HTTP 4xx status codes do not get retried. +// +// If the last HTTP request made does not result in a 2xx HTTP status code, an +// error is returned, together with the data. +// +// There are several utility methods that wrap the standard net/http package +// calls. +// +// Any function that takes no arguments and returns (*http.Response, error) can +// be retried using this library's Retry function. +// +// The methods in this library should be able to run concurrently in multiple +// go routines. +// +// Example Usage +// +// Consider this trivial HTTP GET request: +// +// res, err := http.Get("http://www.google.com/robots.txt") +// +// This can be rewritten as follows, enabling automatic retries: +// +// res, attempts, err := httpbackoff.Get("http://www.google.com/robots.txt") +// +// The variable attempts stores the number of http calls that were made (one +// plus the number of retries). +package httpbackoff + +import ( + "bufio" + "bytes" + "log" + "net/http" + "net/http/httputil" + "net/url" + "strconv" + "time" + + "github.com/cenkalti/backoff/v3" +) + +var defaultClient Client = Client{ + BackOffSettings: backoff.NewExponentialBackOff(), +} + +type Client struct { + BackOffSettings *backoff.ExponentialBackOff +} + +// Any non 2xx HTTP status code is considered a bad response code, and will +// result in a BadHttpResponseCode. +type BadHttpResponseCode struct { + HttpResponseCode int + Message string +} + +// Returns an error message for this bad HTTP response code +func (err BadHttpResponseCode) Error() string { + return err.Message +} + +// Retry is the core library method for retrying http calls. +// +// httpCall should be a function that performs the http operation, and returns +// (resp *http.Response, tempError error, permError error). Errors that should +// cause retries should be returned as tempError. Permanent errors that should +// not result in retries should be returned as permError. Retries are performed +// using the exponential backoff algorithm from the github.com/cenkalti/backoff +// package. Retry automatically treats HTTP 5xx status codes as a temporary +// error, and any other non-2xx HTTP status codes as a permanent error. Thus +// httpCall function does not need to handle the HTTP status code of resp, +// since Retry will take care of it. +// +// Concurrent use of this library method is supported. +func (httpRetryClient *Client) Retry(httpCall func() (resp *http.Response, tempError error, permError error)) (*http.Response, int, error) { + var tempError, permError error + var response *http.Response + attempts := 0 + doHttpCall := func() error { + response, tempError, permError = httpCall() + attempts += 1 + if tempError != nil { + return tempError + } + if permError != nil { + return nil + } + // only call this if there is a non 2xx response + body := func(response *http.Response) string { + // this is a no-op + raw, err := httputil.DumpResponse(response, true) + if err == nil { + return string(raw) + } + return "" + } + // now check if http response code is such that we should retry [500, 600)... + if respCode := response.StatusCode; respCode/100 == 5 { + return BadHttpResponseCode{ + HttpResponseCode: respCode, + Message: "(Intermittent) HTTP response code " + strconv.Itoa(respCode) + "\n" + body(response), + } + } + // now check http response code is ok [200, 300)... + if respCode := response.StatusCode; respCode/100 != 2 { + permError = BadHttpResponseCode{ + HttpResponseCode: respCode, + Message: "(Permanent) HTTP response code " + strconv.Itoa(respCode) + "\n" + body(response), + } + return nil + } + return nil + } + + // Make HTTP API calls using an exponential backoff algorithm... + b := backoff.ExponentialBackOff(*httpRetryClient.BackOffSettings) + backoff.RetryNotify(doHttpCall, &b, func(err error, wait time.Duration) { + log.Printf("Error: %s", err) + }) + + switch { + case permError != nil: + return response, attempts, permError + case tempError != nil: + return response, attempts, tempError + default: + return response, attempts, nil + } +} + +// Retry works the same as HTTPRetryClient.Retry, but uses the default exponential back off settings +func Retry(httpCall func() (resp *http.Response, tempError error, permError error)) (*http.Response, int, error) { + return defaultClient.Retry(httpCall) +} + +// Retry wrapper for http://golang.org/pkg/net/http/#Get where attempts is the number of http calls made (one plus number of retries). +func (httpRetryClient *Client) Get(url string) (resp *http.Response, attempts int, err error) { + return httpRetryClient.Retry(func() (*http.Response, error, error) { + resp, err := http.Get(url) + // assume all errors should result in a retry + return resp, err, nil + }) +} + +// Get works the same as HTTPRetryClient.Get, but uses the default exponential back off settings +func Get(url string) (resp *http.Response, attempts int, err error) { + return defaultClient.Get(url) +} + +// Retry wrapper for http://golang.org/pkg/net/http/#Head where attempts is the number of http calls made (one plus number of retries). +func (httpRetryClient *Client) Head(url string) (resp *http.Response, attempts int, err error) { + return httpRetryClient.Retry(func() (*http.Response, error, error) { + resp, err := http.Head(url) + // assume all errors should result in a retry + return resp, err, nil + }) +} + +// Head works the same as HTTPRetryClient.Head, but uses the default exponential back off settings +func Head(url string) (resp *http.Response, attempts int, err error) { + return defaultClient.Head(url) +} + +// Retry wrapper for http://golang.org/pkg/net/http/#Post where attempts is the number of http calls made (one plus number of retries). +func (httpRetryClient *Client) Post(url string, bodyType string, body []byte) (resp *http.Response, attempts int, err error) { + return httpRetryClient.Retry(func() (*http.Response, error, error) { + resp, err := http.Post(url, bodyType, bytes.NewBuffer(body)) + // assume all errors should result in a retry + return resp, err, nil + }) +} + +// Post works the same as HTTPRetryClient.Post, but uses the default exponential back off settings +func Post(url string, bodyType string, body []byte) (resp *http.Response, attempts int, err error) { + return defaultClient.Post(url, bodyType, body) +} + +// Retry wrapper for http://golang.org/pkg/net/http/#PostForm where attempts is the number of http calls made (one plus number of retries). +func (httpRetryClient *Client) PostForm(url string, data url.Values) (resp *http.Response, attempts int, err error) { + return httpRetryClient.Retry(func() (*http.Response, error, error) { + resp, err := http.PostForm(url, data) + // assume all errors should result in a retry + return resp, err, nil + }) +} + +// PostForm works the same as HTTPRetryClient.PostForm, but uses the default exponential back off settings +func PostForm(url string, data url.Values) (resp *http.Response, attempts int, err error) { + return defaultClient.PostForm(url, data) +} + +// Retry wrapper for http://golang.org/pkg/net/http/#Client.Do where attempts is the number of http calls made (one plus number of retries). +func (httpRetryClient *Client) ClientDo(c *http.Client, req *http.Request) (resp *http.Response, attempts int, err error) { + rawReq, err := httputil.DumpRequestOut(req, true) + // fatal + if err != nil { + return nil, 0, err + } + return httpRetryClient.Retry(func() (*http.Response, error, error) { + newReq, err := http.ReadRequest(bufio.NewReader(bytes.NewBuffer(rawReq))) + newReq.RequestURI = "" + newReq.URL = req.URL + // If the original request doesn't explicitly set Accept-Encoding, then + // the go standard library will add it, and allow gzip compression, and + // magically unzip the response transparently. This wouldn't be too + // much of a problem, except that if the header is explicitly set, then + // the standard library won't automatically unzip the response. This is + // arguably a bug in the standard library but we'll work around it by + // checking this specific condition. + if req.Header.Get("Accept-Encoding") == "" { + newReq.Header.Del("Accept-Encoding") + } + if err != nil { + return nil, nil, err // fatal + } + resp, err := c.Do(newReq) + // assume all errors should result in a retry + return resp, err, nil + }) +} + +// ClientDo works the same as HTTPRetryClient.ClientDo, but uses the default exponential back off settings +func ClientDo(c *http.Client, req *http.Request) (resp *http.Response, attempts int, err error) { + return defaultClient.ClientDo(c, req) +} + +// Retry wrapper for http://golang.org/pkg/net/http/#Client.Get where attempts is the number of http calls made (one plus number of retries). +func (httpRetryClient *Client) ClientGet(c *http.Client, url string) (resp *http.Response, attempts int, err error) { + return httpRetryClient.Retry(func() (*http.Response, error, error) { + resp, err := c.Get(url) + // assume all errors should result in a retry + return resp, err, nil + }) +} + +// ClientGet works the same as HTTPRetryClient.ClientGet, but uses the default exponential back off settings +func ClientGet(c *http.Client, url string) (resp *http.Response, attempts int, err error) { + return defaultClient.ClientGet(c, url) +} + +// Retry wrapper for http://golang.org/pkg/net/http/#Client.Head where attempts is the number of http calls made (one plus number of retries). +func (httpRetryClient *Client) ClientHead(c *http.Client, url string) (resp *http.Response, attempts int, err error) { + return httpRetryClient.Retry(func() (*http.Response, error, error) { + resp, err := c.Head(url) + // assume all errors should result in a retry + return resp, err, nil + }) +} + +// ClientHead works the same as HTTPRetryClient.ClientHead, but uses the default exponential back off settings +func ClientHead(c *http.Client, url string) (resp *http.Response, attempts int, err error) { + return defaultClient.ClientHead(c, url) +} + +// Retry wrapper for http://golang.org/pkg/net/http/#Client.Post where attempts is the number of http calls made (one plus number of retries). +func (httpRetryClient *Client) ClientPost(c *http.Client, url string, bodyType string, body []byte) (resp *http.Response, attempts int, err error) { + return httpRetryClient.Retry(func() (*http.Response, error, error) { + resp, err := c.Post(url, bodyType, bytes.NewBuffer(body)) + // assume all errors should result in a retry + return resp, err, nil + }) +} + +// ClientPost works the same as HTTPRetryClient.ClientPost, but uses the default exponential back off settings +func ClientPost(c *http.Client, url string, bodyType string, body []byte) (resp *http.Response, attempts int, err error) { + return defaultClient.ClientPost(c, url, bodyType, body) +} + +// Retry wrapper for http://golang.org/pkg/net/http/#Client.PostForm where attempts is the number of http calls made (one plus number of retries). +func (httpRetryClient *Client) ClientPostForm(c *http.Client, url string, data url.Values) (resp *http.Response, attempts int, err error) { + return httpRetryClient.Retry(func() (*http.Response, error, error) { + resp, err := c.PostForm(url, data) + // assume all errors should result in a retry + return resp, err, nil + }) +} + +// ClientPostForm works the same as HTTPRetryClient.ClientPostForm, but uses the default exponential back off settings +func ClientPostForm(c *http.Client, url string, data url.Values) (resp *http.Response, attempts int, err error) { + return defaultClient.ClientPostForm(c, url, data) +} diff --git a/vendor/github.com/taskcluster/jsonschema2go/LICENSE b/vendor/github.com/taskcluster/jsonschema2go/LICENSE new file mode 100644 index 0000000..a612ad9 --- /dev/null +++ b/vendor/github.com/taskcluster/jsonschema2go/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/taskcluster/jsonschema2go/text/text.go b/vendor/github.com/taskcluster/jsonschema2go/text/text.go new file mode 100644 index 0000000..30c4817 --- /dev/null +++ b/vendor/github.com/taskcluster/jsonschema2go/text/text.go @@ -0,0 +1,239 @@ +// Package text contains utility functions for manipulating raw text strings +package text + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" + + "github.com/fatih/camelcase" +) + +// See https://golang.org/ref/spec#Keywords +var reservedKeyWords = map[string]bool{ + "break": true, + "case": true, + "chan": true, + "const": true, + "continue": true, + "default": true, + "defer": true, + "else": true, + "fallthrough": true, + "for": true, + "func": true, + "go": true, + "goto": true, + "if": true, + "import": true, + "interface": true, + "map": true, + "package": true, + "range": true, + "return": true, + "select": true, + "struct": true, + "switch": true, + "type": true, + "var": true, +} + +// taken from https://github.com/golang/lint/blob/32a87160691b3c96046c0c678fe57c5bef761456/lint.go#L702 +var commonInitialisms = map[string]bool{ + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTP": true, + "HTTPS": true, + "ID": true, + "IP": true, + "JSON": true, + "LHS": true, + "OS": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XSRF": true, + "XSS": true, +} + +// Indent indents a block of text with an indent string. It does this by +// placing the given indent string at the front of every line, except on the +// last line, if the last line has no characters. This special treatment +// simplifies the generation of nested text structures. +func Indent(text, indent string) string { + if text == "" { + return text + } + if text[len(text)-1:] == "\n" { + result := "" + for _, j := range strings.Split(text[:len(text)-1], "\n") { + result += indent + j + "\n" + } + return result + } + result := "" + for _, j := range strings.Split(strings.TrimRight(text, "\n"), "\n") { + result += indent + j + "\n" + } + return result[:len(result)-1] +} + +// Underline returns the provided text together with a new line character and a +// line of "=" characters whose length is equal to the maximum line length in +// the provided text, followed by a final newline character. +func Underline(text string) string { + var maxlen int + for _, j := range strings.Split(text, "\n") { + if len(j) > maxlen { + maxlen = len(j) + } + } + return text + "\n" + strings.Repeat("=", maxlen) + "\n" +} + +// Returns a string of the same length, filled with "*"s. +func StarOut(text string) string { + return strings.Repeat("*", len(text)) +} + +// GoIdentifierFrom provides a mechanism to mutate an arbitrary descriptive +// string (name) into a Go identifier (variable name, function name, etc) that +// e.g. can be used in generated code, taking into account a blacklist of names +// that should not be used, plus the blacklist of the go language reserved key +// words (https://golang.org/ref/spec#Keywords), in order to guarantee that a +// new name is created which will not conflict with an existing type. +// +// Identifier syntax: https://golang.org/ref/spec#Identifiers +// +// Strategy to convert arbitrary unicode string to a valid identifier: +// +// 1) Ensure name is valid UTF-8; if not, replace it with empty string +// +// 2) Split name into arrays of allowed runes (words), by considering a run of +// disallowed unicode characters to act as a separator, where allowed runes +// include unicode letters, unicode numbers, and '_' character (disallowed +// runes are discarded) +// +// 3) Split words further into sub words, by decomposing camel case words as +// per https://github.com/fatih/camelcase#usage-and-examples +// +// 4) Designate the case of all subwords of all words to be uppercase, with the +// exception of the first subword of the first word, which should be lowercase +// if exported is false, otherwise uppercase +// +// 5) For each subword of each word, adjust as follows: if designated as +// lowercase, lowercase all characters of the subword; if designated as +// uppercase, then if recognised as a common "initialism", then uppercase all +// the characters of the subword, otherwise uppercase only the first character +// of the subword. Common "Initialisms" are defined as per: +// https://github.com/golang/lint/blob/32a87160691b3c96046c0c678fe57c5bef761456/lint.go#L702 +// +// 6) Rejoin subwords to form a single word +// +// 7) Rejoin words into a single string +// +// 8) If the string starts with a number, add a leading `_` +// +// 9) If the string is the empty string or "_", set as "Identifier" +// +// 10) If the resulting identifier is in the given blacklist, or the list of +// reserved key words (https://golang.org/ref/spec#Keywords), append the lowest +// integer possible, >= 1, that results in no blacklist conflict +// +// 11) Add the new name to the given blacklist +// +// Note, the `map[string]bool` construction is simply a mechanism to implement +// set semantics; a value of `true` signifies inclusion in the set. +// Non-existence is equivalent to existence with a value of `false`; therefore +// it is recommended to only store `true` values. +func GoIdentifierFrom(name string, exported bool, blacklist map[string]bool) (identifier string) { + if !utf8.ValidString(name) { + name = "" + } + for i, word := range strings.FieldsFunc( + name, + func(c rune) bool { + return !unicode.IsLetter(c) && !unicode.IsNumber(c) && c != '_' + }, + ) { + caseAdaptedWord := "" + for j, subWord := range camelcase.Split(word) { + caseAdaptedWord += fixCase(subWord, i == 0 && j == 0 && !exported) + } + identifier += caseAdaptedWord + } + + if strings.IndexFunc( + identifier, + func(c rune) bool { + return unicode.IsNumber(c) + }, + ) == 0 { + identifier = "_" + identifier + } + + if identifier == "" || identifier == "_" { + identifier = "Identifier" + } + + // If name already exists, add an integer suffix to name. Start with "1" and increment + // by 1 until an unused name is found. Example: if name FooBar was generated four times + // , the first instance would be called FooBar, then the next would be FooBar1, the next + // FooBar2 and the last would be assigned a name of FooBar3. We do this to guarantee we + // don't use duplicate names for different logical entities. + for k, baseName := 1, identifier; blacklist[identifier] || reservedKeyWords[identifier]; { + identifier = fmt.Sprintf("%v%v", baseName, k) + k++ + } + blacklist[identifier] = true + return +} + +func fixCase(word string, makeLower bool) string { + if word == "" { + return "" + } + if makeLower { + return strings.ToLower(word) + } + upper := strings.ToUpper(word) + if commonInitialisms[upper] { + return upper + } + firstRune, size := utf8.DecodeRuneInString(word) + remainingString := word[size:] + return string(unicode.ToUpper(firstRune)) + remainingString +} + +// Returns the indefinite article (in English) for a the given noun, which is +// 'an' for nouns beginning with a vowel, otherwise 'a'. +func IndefiniteArticle(noun string) string { + if strings.ContainsRune("AEIOUaeiou", rune(noun[0])) { + return "an" + } + return "a" +} diff --git a/vendor/github.com/taskcluster/slugid-go/LICENSE b/vendor/github.com/taskcluster/slugid-go/LICENSE new file mode 100644 index 0000000..a612ad9 --- /dev/null +++ b/vendor/github.com/taskcluster/slugid-go/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/taskcluster/slugid-go/slugid/slugid.go b/vendor/github.com/taskcluster/slugid-go/slugid/slugid.go new file mode 100644 index 0000000..e843339 --- /dev/null +++ b/vendor/github.com/taskcluster/slugid-go/slugid/slugid.go @@ -0,0 +1,113 @@ +// Licensed under the Mozilla Public Licence 2.0. +// https://www.mozilla.org/en-US/MPL/2.0 +// +// ************** +// * Slugid API * +// ************** +// +// @)@) +// _|_| ( ) +// _(___,`\ _,--------------._ (( /`, )) +// `==` `*-_,' O `~._ ( ( _/ | ) ) +// `, : o } `~._.~` * ', +// \ - _ O - ,' +// | ; - - " ; o / +// | O o ,-` +// \ _,-:""""""'`:-._ - . O / +// `""""""~'` `._ _,-` +// """""" + +// A go (golang) module for generating v4 UUIDs and encoding them into 22 +// character URL-safe base64 slug representation (see [RFC 4648 sec. +// 5](http://tools.ietf.org/html/rfc4648#section-5)). +// +// Slugs are url-safe base64 encoded v4 uuids, stripped of base64 `=` padding. +// +// There are two methods for generating slugs - `slugid.V4()` and +// `slugid.Nice()`. +// +// V4 Slugs +// +// The `slugid.V4()` method returns a slug from a randomly generated v4 uuid. +// +// Nice slugs +// +// The `slugid.Nice()` method returns a v4 slug which conforms to a set of +// "nice" properties. At the moment the only "nice" property is that the slug +// starts with `[A-Za-f]`, which in turn implies that the first (most +// significant) bit of its associated uuid is set to 0. +// +// The purpose of the `slugid.Nice()` method is to support having slugids which +// can be used in more contexts safely. Regular slugids can safely be used in +// urls, and for example in AMQP routing keys. However, slugs beginning with `-` +// may cause problems when used as command line parameters. +// +// In contrast, slugids generated by the `slugid.Nice()` method can safely be +// used as command line parameters. This comes at a cost to entropy (121 bits vs +// 122 bits for regular v4 slugs). +// +// Choosing which slug generation method to use +// +// Slug consumers should consider carefully which of these two slug generation +// methods to call. Is it more important to have maximum entropy, or to have +// slugids that do not need special treatment when used as command line +// parameters? This is especially important if you are providing a service which +// supplies slugs to unexpecting tool developers downstream, who may not realise +// the risks of using your regular v4 slugs as command line parameters, especially +// since this would arise only as an intermittent issue (one time in 64). +// +// Generated slugs take the form `[A-Za-z0-9_-]{22}`, or more precisely: +// +// `slugid.V4()` slugs conform to +// `[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]` +// +// `slugid.Nice()` slugs conform to +// `[A-Za-f][A-Za-z0-9_-]{7}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]` +// +// RFC 4122 defines the setting of 6 bits of the v4 UUID which implies v4 slugs +// provide 128 - 6 = 122 bits entropy. Due to the (un)setting of the first bit +// of "nice" slugs, nice slugs provide therefore 121 bits entropy. +package slugid + +import ( + "encoding/base64" + + "github.com/pborman/uuid" +) + +// Returns the given uuid.UUID object as a 22 character slug. This can be a +// regular v4 slug or a "nice" slug. +func Encode(uuid_ uuid.UUID) string { + return base64.URLEncoding.EncodeToString(uuid_)[:22] // Drop '==' padding +} + +// Returns the uuid.UUID object represented by the given v4 or "nice" slug, or +// nil if it cannot be decoded +func Decode(slug string) uuid.UUID { + uuid_, err := base64.URLEncoding.DecodeString(slug + "==") // b64 padding + if err != nil { + return nil + } + return uuid_ +} + +// Returns a randomly generated uuid v4 compliant slug +func V4() string { + return base64.URLEncoding.EncodeToString(uuid.NewRandom())[:22] // Drop '==' padding +} + +// Returns a randomly generated uuid v4 compliant slug which conforms to a set +// of "nice" properties, at the cost of some entropy. Currently this means one +// extra fixed bit (the first bit of the uuid is set to 0) which guarantees the +// slug will begin with [A-Za-f]. For example such slugs don't require special +// handling when used as command line parameters (whereas non-nice slugs may +// start with `-` which can confuse command line tools). +// +// Potentially other "nice" properties may be added in future to further +// restrict the range of potential uuids that may be generated. +func Nice() string { + rawBytes := uuid.NewRandom() + // unset most significant bit of first byte to ensure slug starts with [A-Za-f] + rawBytes[0] &= 0x7f + return base64.URLEncoding.EncodeToString(rawBytes)[:22] // Drop '==' padding +} diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/.eslintrc b/vendor/github.com/taskcluster/taskcluster-lib-urls/.eslintrc new file mode 100644 index 0000000..528d46d --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/.eslintrc @@ -0,0 +1,3 @@ +{ + "extends": "eslint-config-taskcluster" +} diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/.gitignore b/vendor/github.com/taskcluster/taskcluster-lib-urls/.gitignore new file mode 100644 index 0000000..edf2e2c --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/.gitignore @@ -0,0 +1,146 @@ +/user-config.yml +lib/ +.test/ + +# Logs +logs +*.log +npm-debug.log* + +# Runtime data +pids +*.pid +*.seed + +# Directory for instrumented libs generated by jscoverage/JSCover +lib-cov + +# Coverage directory used by tools like istanbul +coverage + +# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) +.grunt + +# node-waf configuration +.lock-wscript + +# Compiled binary addons (http://nodejs.org/api/addons.html) +build/Release + +# Dependency directories +node_modules +jspm_packages + +# Optional npm cache directory +.npm + +# npm package lock file +package-lock.json + +# Optional REPL history +.node_repl_history + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/.gometalinter.json b/vendor/github.com/taskcluster/taskcluster-lib-urls/.gometalinter.json new file mode 100644 index 0000000..291500f --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/.gometalinter.json @@ -0,0 +1,25 @@ +{ + "Enable": [ + "deadcode", + "errcheck", + "goconst", + "goimports", + "golint", + "gosimple", + "gotype", + "gotypex", + "ineffassign", + "interfacer", + "maligned", + "megacheck", + "misspell", + "nakedret", + "safesql", + "staticcheck", + "structcheck", + "unconvert", + "unparam", + "unused", + "varcheck" + ] +} diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/.taskcluster.yml b/vendor/github.com/taskcluster/taskcluster-lib-urls/.taskcluster.yml new file mode 100644 index 0000000..bc4d6c8 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/.taskcluster.yml @@ -0,0 +1,173 @@ +version: 0 +metadata: + name: "taskcluster-lib-urls test" + description: "Library for building taskcluster urls" + owner: "{{ event.head.user.email }}" + source: "{{ event.head.repo.url }}" + +tasks: + - provisionerId: "{{ taskcluster.docker.provisionerId }}" + workerType: "{{ taskcluster.docker.workerType }}" + extra: + github: + env: true + events: + - pull_request.opened + - pull_request.synchronize + - pull_request.reopened + - push + payload: + maxRunTime: 3600 + image: "node:10" + command: + - "/bin/bash" + - "-lc" + - "git clone {{event.head.repo.url}} repo && cd repo && git checkout {{event.head.sha}} && yarn install && yarn test" + metadata: + name: "taskcluster-lib-urls node.js test" + description: "Library for building taskcluster urls" + owner: "{{ event.head.user.email }}" + source: "{{ event.head.repo.url }}" + - provisionerId: '{{ taskcluster.docker.provisionerId }}' + workerType: '{{ taskcluster.docker.workerType }}' + extra: + github: + events: + - push + branches: + - master + scopes: + - auth:aws-s3:read-write:taskcluster-raw-docs/taskcluster-lib-urls/ + payload: + maxRunTime: 3600 + image: taskcluster/upload-project-docs:latest + features: + taskclusterProxy: + true + command: + - /bin/bash + - '--login' + - '-cx' + - | + git clone {{event.head.repo.url}} repo + cd repo + git config advice.detachedHead false + git checkout {{event.head.sha}} + export DEBUG=* DOCS_PROJECT=taskcluster-lib-urls DOCS_TIER=libraries DOCS_FOLDER=docs DOCS_README=README.md + upload-project-docs + metadata: + name: "taskcluster-lib-urls docs upload" + description: "Upload documentation for this project" + owner: '{{ event.head.user.email }}' + source: '{{ event.head.repo.url }}' + - provisionerId: '{{ taskcluster.docker.provisionerId }}' + workerType: '{{ taskcluster.docker.workerType }}' + extra: + github: + events: + - pull_request.opened + - pull_request.synchronize + - pull_request.reopened + - push + payload: + maxRunTime: 3600 + image: 'golang:1.10' + command: + - /bin/bash + - '-c' + - | + mkdir -p /go/src/github.com/taskcluster/taskcluster-lib-urls + cd /go/src/github.com/taskcluster/taskcluster-lib-urls + git clone {{event.head.repo.url}} . + git config advice.detachedHead false + git checkout {{event.head.sha}} + go get -v -d -t ./... + go test -v -race ./... + go get -u github.com/alecthomas/gometalinter + gometalinter --install + gometalinter + metadata: + name: "taskcluster-lib-urls go test" + description: Run library test suite - golang 1.10 + owner: '{{ event.head.user.email }}' + source: '{{ event.head.repo.url }}' + - provisionerId: '{{ taskcluster.docker.provisionerId }}' + workerType: '{{ taskcluster.docker.workerType }}' + extra: + github: + events: + - pull_request.opened + - pull_request.synchronize + - pull_request.reopened + - push + payload: + maxRunTime: 3600 + image: 'maven' + command: + - /bin/bash + - '-c' + - | + git clone {{event.head.repo.url}} repo + cd repo + git config advice.detachedHead false + git checkout {{event.head.sha}} + mvn -X -e install + metadata: + name: taskcluster-lib-urls java test + description: Run library test suite - java + owner: '{{ event.head.user.email }}' + source: '{{ event.head.repo.url }}' + - provisionerId: '{{ taskcluster.docker.provisionerId }}' + workerType: '{{ taskcluster.docker.workerType }}' + extra: + github: + events: + - pull_request.opened + - pull_request.synchronize + - pull_request.reopened + - push + payload: + maxRunTime: 3600 + image: 'python:2.7' + command: + - /bin/bash + - '-c' + - | + git clone {{event.head.repo.url}} repo + cd repo + git config advice.detachedHead false + git checkout {{event.head.sha}} + pip install tox + tox -e py27 + metadata: + name: "taskcluster-lib-urls python 2.7 test" + description: Run library test suite - python2.7 + owner: '{{ event.head.user.email }}' + source: '{{ event.head.repo.url }}' + - provisionerId: '{{ taskcluster.docker.provisionerId }}' + workerType: '{{ taskcluster.docker.workerType }}' + extra: + github: + events: + - pull_request.opened + - pull_request.synchronize + - pull_request.reopened + - push + payload: + maxRunTime: 3600 + image: 'python:3.6' + command: + - /bin/bash + - '-c' + - | + git clone {{event.head.repo.url}} repo + cd repo + git config advice.detachedHead false + git checkout {{event.head.sha}} + pip install tox + tox -e py36 + metadata: + name: taskcluster-lib-urls python 3.6 test + description: Run library test suite - python3.6 + owner: '{{ event.head.user.email }}' + source: '{{ event.head.repo.url }}' diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/.travis.yml b/vendor/github.com/taskcluster/taskcluster-lib-urls/.travis.yml new file mode 100644 index 0000000..6fa9a8b --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/.travis.yml @@ -0,0 +1,30 @@ +language: node_js +sudo: false +node_js: +- '8' +- '10' +addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - g++-4.8 +caches: + yarn: true +env: + global: + - DEBUG='taskcluster-lib-urls test' + - CXX=g++-4.8 +notifications: + irc: + secure: ub7PLmoLr7fPYdORBBgH1VSF6esn7+XlNXh8zm3NBOf2RAsMAp+Hk2r+yD4PI72+tGiPbUGsKKsCHh6+Xq3RZc9h67bfMS8KLoZGuogap12i0MnwM5x03lLCRhLvW+r1Y35u8NOEOuaQXfPL6UmQ2EGzDzrjVHXHotFnTqPFSFxeIAnAi9BSjqjy/yBstn1ybX0etobc+Mm9CRubD2eblKVwAYJi98yWLz+w8ou3BXUA/9iHZFyda60SJ8BTqxhnh9J1UYESvEoUg0dfO0Tj+++xQBz/mFM05SuVw5fzp4u5rECGkcFwanyeubHjzxvBXn83gV1mQpe9F1kAXsGNHt/1+YeIjo6nsOHdjpzScHfV5Nf063n1RNOFgQc3l5hhWmEFX+qdQFE1yxMXou00H/7iXlA8dUvjHCswqsKOFHiH3v6VQQ5cXtgu8jB1N0ZWPPeS1xIyQlHWXdD2uxKp+PyCnlKEjorIhRgFyr6kdPWRUQW45OJVvaMHs+a5DTZntsLf524kmObkJAmTdfy8Amq7XIKIYzHEo0KFQA4ukKfWwXqu7ESEgoXjI1rIERa9qzGBHR1prPZsBoDrctppL949/RsM48oT0PB7BGVQI9pwGI4oFrBwH/dp5w45AFpIB32r2IlLdsdP5iNiVtGDgVLn03KMvQgnVXP4cK1FEwg= +deploy: + provider: npm + email: taskcluster-accounts@mozilla.com + skip_cleanup: true + api_key: + secure: Af1jZIlFU7rxuJS12fb5N4yyz0VHsvgS7haSr5+a9yrMuxWpxLPdDSVlB33KOyhKoUYUY+ayJQqN2I+WYtGf7BYHGgPe6kNR5cCOseH1cRfmPrvNhQEWMrc7stWa6cHOpbI/3gZn6jSigKf6vLm4Wr8fOR+TLQ6tDaKzpqoZW8W6TwJyeM+9BQUY3aHuR6qb+ItpfrtR12vsOtLqDG7KImo0NhIb3ae91/HjC+EqL0UUJWYiqxg7Nkk2ziCWPYL1b554cKXf5HjOQUzWolHDEDybS20H8K20PSMUk3SZ79TKdqI3asXc8+V6AGCmgnq6cT4zLBk0gx6PoZWU6y3/OI1IYRSs7BGdrCiBGRlgAGSXnHoVWA6NDbQrkUrBVmhBKjnhyhO/q8la6fAPqHP2LgEQ6/RvT8XS9y1jpUwt233P+rjsXCW+nyF09cZAZ/CTCwGFuyMhtQZycA5SFE/IDZ3B+B5VMK7+HnC8j2QS/OKaNZXJivvIDpZzKXDz3TsHIlOY2WgfzvHXgCGnuRSNdx6ulgW9RvNOI5/AeWOvdFb3rnb8y8d3jllk1GUDcwycJ0Y702QuwBTzGnrCFl8AR8uEwUY29QSlPgXie2mdweDt1A2xM8iQ9IQeC1nlm71f9msSsa3PDzchJ6R5Shafuiz59UjkfDsekJ6sWvXTkbM= + on: + tags: true + repo: taskcluster/taskcluster-lib-urls + node: '10' diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/CODE_OF_CONDUCT.md b/vendor/github.com/taskcluster/taskcluster-lib-urls/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..2d33e6b --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +# Mozilla Community Participation Guidelines + +The most recent version of the Mozilla Community Participation Guideline can always be found here: https://www.mozilla.org/en-US/about/governance/policies/participation/ diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/CONTRIBUTING.md b/vendor/github.com/taskcluster/taskcluster-lib-urls/CONTRIBUTING.md new file mode 100644 index 0000000..3ab3a82 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/CONTRIBUTING.md @@ -0,0 +1,28 @@ +# How to Contribute + +We welcome pull requests from everyone. We do expect everyone to adhere to the [Mozilla Community Participation Guidelines][participation]. + +If you're trying to figure out what to work on, here are some places to find suitable projects: +* [Good first bugs][goodfirstbug]: these are scoped to make it easy for first-time contributors to get their feet wet with Taskcluster code. +* [Mentored bugs][bugsahoy]: these are slightly more involved projects that may require insight or guidance from someone on the Taskcluster team. +* [Full list of open issues][issues]: everything else + +If the project you're interested in working on isn't covered by a bug or issue, or you're unsure about how to proceed on an existing issue, it's a good idea to talk to someone on the Taskcluster team before you go too far down a particular path. You can find us in the #taskcluster channel on [Mozilla's IRC server][irc] to discuss. You can also simply add a comment to the issue or bug. + +Once you've found an issue to work on and written a patch, submit a pull request. Some things that will increase the chance that your pull request is accepted: + +* Follow our [best practices][bestpractices]. +* This includes [writing or updating tests][testing]. +* Write a [good commit message][commit]. + +Welcome to the team! + +[participation]: https://www.mozilla.org/en-US/about/governance/policies/participation/ +[issues]: ../../issues +[bugsahoy]: https://www.joshmatthews.net/bugsahoy/?taskcluster=1 +[goodfirstbug]: http://www.joshmatthews.net/bugsahoy/?taskcluster=1&simple=1 +[irc]: https://wiki.mozilla.org/IRC +[bestpractices]: https://docs.taskcluster.net/docs/manual/design/devel/best-practices +[testing]: https://docs.taskcluster.net/docs/manual/design/devel/best-practices/testing +[commit]: https://docs.taskcluster.net/docs/manual/design/devel/best-practices/commits + diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/LICENSE b/vendor/github.com/taskcluster/taskcluster-lib-urls/LICENSE new file mode 100644 index 0000000..a612ad9 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/MANIFEST.in b/vendor/github.com/taskcluster/taskcluster-lib-urls/MANIFEST.in new file mode 100644 index 0000000..2451f52 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/MANIFEST.in @@ -0,0 +1,4 @@ +include LICENSE +global-exclude *.py[co] +include specification.yml +include package.json diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/README.md b/vendor/github.com/taskcluster/taskcluster-lib-urls/README.md new file mode 100644 index 0000000..10fb412 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/README.md @@ -0,0 +1,252 @@ +# Taskcluster URL Building Library + +[![License](https://img.shields.io/badge/license-MPL%202.0-orange.svg)](http://mozilla.org/MPL/2.0) + +A simple library to generate URLs for various Taskcluster resources across our various deployment methods. + +This serves as both a simple shim for projects that use JavaScript but also is the reference implementation for +how we define these paths. + +URLs are defined in the 'Taskcluster URL Format' document. + +Changelog +--------- +View the changelog on the [releases page](https://github.com/taskcluster/taskcluster-lib-urls/releases). + +Requirements +------------ + +This is tested on and should run on any of Node.js `{8, 10}`. + +JS Usage +-------- +[![Node.js Build Status](https://travis-ci.org/taskcluster/taskcluster-lib-urls.svg?branch=master)](https://travis-ci.org/taskcluster/taskcluster-lib-urls) +[![npm](https://img.shields.io/npm/v/taskcluster-lib-urls.svg?maxAge=2592000)](https://www.npmjs.com/package/taskcluster-lib-urls) + +This package exports several methods for generating URLs conditionally based on +a root URL, as well as a few helper classes for generating URLs for a pre-determined +root URL: + +* `api(rootUrl, service, version, path)` -> `String` +* `apiReference(rootUrl, service, version)` -> `String` +* `docs(rootUrl, path)` -> `String` +* `exchangeReference(rootUrl, service, version)` -> `String` +* `schema(rootUrl, service, schema)` -> `String` +* `apiManifestSchema(rootUrl, version)` -> `String` +* `apiReferenceSchema(rootUrl, version)` -> `String` +* `exchangesReferenceSchema(rootUrl, version)` -> `String` +* `metadataMetaschema(rootUrl)` -> `String` +* `ui(rootUrl, path)` -> `String` +* `apiManifest(rootUrl)` -> `String` +* `testRootUrl()` -> `String` +* `withRootUrl(rootUrl)` -> `Class` instance for above methods + +When the `rootUrl` is `https://taskcluster.net`, the generated URLs will be to the Heroku cluster. Otherwise they will follow the +[spec defined in this project](https://github.com/taskcluster/taskcluster-lib-urls/tree/master/docs/urls-spec.md). + +`testRootUrl()` is used to share a common fake `rootUrl` between various Taskcluster mocks in testing. +The URL does not resolve. + +```js +// Specifying root URL every time: +const libUrls = require('taskcluster-lib-urls'); + +libUrls.api(rootUrl, 'auth', 'v1', 'foo/bar'); +libUrls.schema(rootUrl, 'auth', 'v1/foo.yml'); // Note that schema names have versions in them +libUrls.apiReference(rootUrl, 'auth', 'v1'); +libUrls.exchangeReference(rootUrl, 'auth', 'v1'); +libUrls.ui(rootUrl, 'foo/bar'); +libUrls.apiManifest(rootUrl); +libUrls.docs(rootUrl, 'foo/bar'); +``` + +```js +// Specifying root URL in advance: +const libUrls = require('taskcluster-lib-urls'); + +const urls = libUrls.withRoot(rootUrl); + +urls.api('auth', 'v1', 'foo/bar'); +urls.schema('auth', 'v1/foo.yml'); +urls.apiReference('auth', 'v1'); +urls.exchangeReference('auth', 'v1'); +urls.ui('foo/bar'); +urls.apiManifest(); +urls.docs('foo/bar'); +``` + +If you would like, you can set this up via [taskcluster-lib-loader](https://github.com/taskcluster/taskcluster-lib-loader) as follows: + +```js +{ + libUrlss: { + require: ['cfg'], + setup: ({cfg}) => withRootUrl(cfg.rootURl), + }, +} +``` + +Test with: + +``` +yarn install +yarn test +``` + + +Go Usage +-------- + +[![GoDoc](https://godoc.org/github.com/taskcluster/taskcluster-lib-urls?status.svg)](https://godoc.org/github.com/taskcluster/taskcluster-lib-urls) + +The go package exports the following functions: + +```go +func API(rootURL string, service string, version string, path string) string +func APIReference(rootURL string, service string, version string) string +func Docs(rootURL string, path string) string +func ExchangeReference(rootURL string, service string, version string) string +func Schema(rootURL string, service string, name string) string +func APIManifestSchema(rootURL string, version string) string +func APIReferenceSchema(rootURL string, version string) string +func ExchangesReferenceSchema(rootURL string, version string) string +func MetadataMetaschema(rootURL string) string +func UI(rootURL string, path string) string +func APIManifest(rootURL string) string +``` + +Install with: + +``` +go install ./.. +``` + +Test with: + +``` +go test -v ./... +``` + +Python Usage +------------ + +You can install the python client with `pip install taskcluster-urls`; + +```python +import taskcluster_urls + +taskcluster_urls.api(root_url, 'auth', 'v1', 'foo/bar') +taskcluster_urls.schema(root_url, 'auth', 'v1/foo.yml') # Note that schema names have versions in them +taskcluster_urls.api_manifest_schema(root_url, 'v1') +taskcluster_urls.api_reference_schema(root_url, 'v1') +taskcluster_urls.exchanges_reference_schema(root_url, 'v1') +taskcluster_urls.metadata_metaschema(root_url, 'v1') +taskcluster_urls.api_reference(root_url, 'auth', 'v1') +taskcluster_urls.exchange_reference(root_url, 'auth', 'v1') +taskcluster_urls.ui(root_url, 'foo/bar') +taskcluster_urls.apiManifest(root_url) +taskcluster_urls.docs(root_url, 'foo/bar') + +And for testing, +```python +taskcluster_urls.test_root_url() +``` + +Test with: + +``` +tox +``` + +Java Usage +---------- + +[![JavaDoc](https://img.shields.io/badge/javadoc-reference-blue.svg)](http://taskcluster.github.io/taskcluster-lib-urls/apidocs) + +In order to use this library from your maven project, simply include it as a project dependency: + +``` + + ... + + ... + + org.mozilla.taskcluster + taskcluster-lib-urls + 1.0.0 + + + +``` + +The taskcluster-lib-urls artifacts are now available from the [maven central repository](http://central.sonatype.org/): + +* [Search Results](http://search.maven.org/#search|gav|1|g%3A%22org.mozilla.taskcluster%22%20AND%20a%3A%22taskcluster-lib-urls%22) +* [Directory Listing](https://repo1.maven.org/maven2/org/mozilla/taskcluster/taskcluster-lib-urls/) + +To use the library, do as follows: + +```java +import org.mozilla.taskcluster.urls.*; + +... + + URLProvider urlProvider = URLs.provider("https://mytaskcluster.acme.org"); + + String fooBarAPI = urlProvider.api("auth", "v1", "foo/bar"); + String fooSchema = urlProvider.schema("auth", "v1/foo.yml"); // Note that schema names have versions in them + String apiSchema = urlProvider.apiReferenceSchema("v1"); + String exchangesSchema = urlProvider.exchangesReferenceSchema("v1"); + String manifestSchema = urlProvider.apiManifestSchema("v1"); + String metaschema = urlProvider.metadataMetaschema(); + String authAPIRef = urlProvider.apiReference("auth", "v1"); + String authExchangesRef = urlProvider.exchangeReference("auth", "v1"); + String uiFooBar = urlProvider.ui("foo/bar"); + String apiManifest = urlProvider.apiManifest(); + String docsFooBar = urlProvider.docs("foo/bar"); + +... +``` + +Install with: + +``` +mvn install +``` + +Test with: + +``` +mvn test +``` + + +Releasing +--------- + +New releases should be tested on Travis and Taskcluster to allow for all supported versions of various languages to be tested. Once satisfied that it works, new versions should be created with +`npm version` rather than by manually editing `package.json` and tags should be pushed to Github. + +Make the Node release first, as Python's version depends on its `package.json`. This follows the typical tag-and-push-to-publish approach: + +```sh +$ npm version minor # or patch, or major +$ git push upstream +``` + +Once that's done, build the Python sdists (only possible by the [maintainers on pypi](https://pypi.org/project/taskcluster-urls/#files)): + +```sh +rm -rf dist/* +python setup.py sdist bdist_wheel +python3 setup.py bdist_wheel +pip install twine +twine upload dist/* +``` + +Make sure to update [the changelog](https://github.com/taskcluster/taskcluster-lib-urls/releases)! + +License +------- + +[Mozilla Public License Version 2.0](https://github.com/taskcluster/taskcluster-lib-urls/blob/master/LICENSE) diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/eclipse-formatter-config.xml b/vendor/github.com/taskcluster/taskcluster-lib-urls/eclipse-formatter-config.xml new file mode 100644 index 0000000..c519750 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/eclipse-formatter-config.xml @@ -0,0 +1,295 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/package.json b/vendor/github.com/taskcluster/taskcluster-lib-urls/package.json new file mode 100644 index 0000000..03c1bca --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/package.json @@ -0,0 +1,25 @@ +{ + "name": "taskcluster-lib-urls", + "version": "12.0.0", + "author": "Brian Stack ", + "description": "Build urls for taskcluster resources.", + "license": "MPL-2.0", + "scripts": { + "lint": "eslint src/*.js test/*.js", + "pretest": "yarn lint", + "test": "mocha test/*_test.js" + }, + "files": [ + "src" + ], + "repository": { + "type": "git", + "url": "https://github.com/taskcluster/taskcluster-lib-urls.git" + }, + "main": "./src/index.js", + "devDependencies": { + "eslint-config-taskcluster": "^3.1.0", + "js-yaml": "^3.11.0", + "mocha": "^5.1.1" + } +} diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/pom.xml b/vendor/github.com/taskcluster/taskcluster-lib-urls/pom.xml new file mode 100644 index 0000000..2225d1c --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/pom.xml @@ -0,0 +1,226 @@ + + + 4.0.0 + org.mozilla.taskcluster + taskcluster-lib-urls + 1.0.1 + jar + Taskcluster Lib URLs (java port) + A simple library to generate URLs for various Taskcluster resources across our various deployment methods. + https://github.com/taskcluster/taskcluster-lib-urls + 2018 + + UTF-8 + 1.7 + 1.7 + master + + + https://github.com/taskcluster/taskcluster-lib-urls/tree/${scm.branch} + scm:git:git://github.com/taskcluster/taskcluster-lib-urls.git + scm:git:ssh://git@github.com/taskcluster/taskcluster-lib-urls.git + HEAD + + + Bugzilla + https://bugzilla.mozilla.org/buglist.cgi?product=Taskcluster&component=Platform%20Libraries&resolution=--- + + + Taskcluster + https://tools.taskcluster.net + + + + Mozilla Public License, version 2.0 + https://www.mozilla.org/MPL/2.0 + + + + + ossrh + https://oss.sonatype.org/content/repositories/snapshots + + + + + Pete Moore + pmoore@mozilla.com + https://github.com/petemoore + Mozilla + https://www.mozilla.org + + pmoore + + + + + + Taskcluster Tools + https://groups.google.com/forum/#!forum/mozilla.tools.taskcluster + + + + Mozilla + https://www.mozilla.org + + + + junit + junit + 4.12 + test + + + org.yaml + snakeyaml + 1.23 + + + org.projectlombok + lombok + 1.16.16 + + + + + + + net.revelc.code.formatter + formatter-maven-plugin + 2.7.1 + + + + format + + + + + ${project.basedir}/eclipse-formatter-config.xml + + org/mozilla/taskcluster/urls/*.java + + + + + + maven-javadoc-plugin + 2.9.1 + + ch.raffael.doclets.pegdown.PegdownDoclet + + ch.raffael.pegdown-doclet + pegdown-doclet + 1.1 + + true + + + + + org.eluder.coveralls + coveralls-maven-plugin + 3.1.0 + + + + org.codehaus.mojo + cobertura-maven-plugin + 2.6 + + xml + 256m + + true + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.20.1 + + + + java.util.logging.config.file + src/test/resources/logging.properties + + + + + + + + . + + tests.yml + + + + + + + release + + + + org.sonatype.plugins + nexus-staging-maven-plugin + 1.6.3 + true + + ossrh + https://oss.sonatype.org/ + true + + + + maven-javadoc-plugin + 2.9.1 + + + attach-javadocs + + jar + + + + + + org.apache.maven.plugins + maven-gpg-plugin + 1.5 + + + sign-artifacts + verify + + sign + + + + + + org.apache.maven.plugins + maven-source-plugin + 2.2.1 + + + attach-sources + + jar-no-fork + + + + + + + + + + + projectlombok.org + http://projectlombok.org/mavenrepo + + + diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/setup.cfg b/vendor/github.com/taskcluster/taskcluster-lib-urls/setup.cfg new file mode 100644 index 0000000..972cab3 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/setup.cfg @@ -0,0 +1,2 @@ +[tools:pytest] +flake8-max-line-length = 120 diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/setup.py b/vendor/github.com/taskcluster/taskcluster-lib-urls/setup.py new file mode 100644 index 0000000..f601081 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/setup.py @@ -0,0 +1,28 @@ +import json +import os +from setuptools import setup + +package_json = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'package.json') +with open(package_json) as f: + version = json.load(f)['version'] + +setup( + name='taskcluster-urls', + description='Standardized url generator for taskcluster resources.', + long_description=open(os.path.join(os.path.dirname(__file__), 'README.md')).read(), + long_description_content_type='text/markdown', + url='https://github.com/taskcluster/taskcluster-lib-urls', + version=version, + packages=['taskcluster_urls'], + author='Brian Stack', + author_email='bstack@mozilla.com', + license='MPL2', + classifiers=[ + 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + ], +) diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/tcurls.go b/vendor/github.com/taskcluster/taskcluster-lib-urls/tcurls.go new file mode 100644 index 0000000..53453f9 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/tcurls.go @@ -0,0 +1,108 @@ +package tcurls + +import ( + "fmt" + "strings" +) + +const oldRootURL = "https://taskcluster.net" + +// API generates a url for a resource in a taskcluster service +func API(rootURL string, service string, version string, path string) string { + path = strings.TrimLeft(path, "/") + switch r := strings.TrimRight(rootURL, "/"); r { + case oldRootURL: + return fmt.Sprintf("https://%s.taskcluster.net/%s/%s", service, version, path) + default: + return fmt.Sprintf("%s/api/%s/%s/%s", r, service, version, path) + } +} + +// APIReference enerates a url for a taskcluster service reference doc +func APIReference(rootURL string, service string, version string) string { + switch r := strings.TrimRight(rootURL, "/"); r { + case oldRootURL: + return fmt.Sprintf("https://references.taskcluster.net/%s/%s/api.json", service, version) + default: + return fmt.Sprintf("%s/references/%s/%s/api.json", r, service, version) + } +} + +// Docs generates a url for a taskcluster docs-site page +func Docs(rootURL string, path string) string { + path = strings.TrimLeft(path, "/") + switch r := strings.TrimRight(rootURL, "/"); r { + case oldRootURL: + return fmt.Sprintf("https://docs.taskcluster.net/%s", path) + default: + return fmt.Sprintf("%s/docs/%s", r, path) + } +} + +// ExchangeReference generates a url for a taskcluster exchange reference doc +func ExchangeReference(rootURL string, service string, version string) string { + switch r := strings.TrimRight(rootURL, "/"); r { + case oldRootURL: + return fmt.Sprintf("https://references.taskcluster.net/%s/%s/exchanges.json", service, version) + default: + return fmt.Sprintf("%s/references/%s/%s/exchanges.json", r, service, version) + } +} + +// Schema generates a url for a taskcluster schema +func Schema(rootURL string, service string, name string) string { + name = strings.TrimLeft(name, "/") + switch r := strings.TrimRight(rootURL, "/"); r { + case oldRootURL: + return fmt.Sprintf("https://schemas.taskcluster.net/%s/%s", service, name) + default: + return fmt.Sprintf("%s/schemas/%s/%s", r, service, name) + } +} + +// APIReferenceSchema generates a url for the api reference schema +func APIReferenceSchema(rootURL string, version string) string { + return Schema(rootURL, "common", fmt.Sprintf("api-reference-%s.json", version)) +} + +// ExchangesReferenceSchema generates a url for the exchanges reference schema +func ExchangesReferenceSchema(rootURL string, version string) string { + return Schema(rootURL, "common", fmt.Sprintf("exchanges-reference-%s.json", version)) +} + +// APIManifestSchema generates a url for the api manifest schema +func APIManifestSchema(rootURL string, version string) string { + return Schema(rootURL, "common", fmt.Sprintf("manifest-%s.json", version)) +} + +// MetadataMetaschema generates a url for the metadata metaschema +func MetadataMetaschema(rootURL string) string { + return Schema(rootURL, "common", "metadata-metaschema.json") +} + +// UI generates a url for a page in taskcluster tools site +// The purpose of the function is to switch on rootUrl: +// "The driver for having a ui method is so we can just call ui with a path and any root url, +// and the returned url should work for both our current deployment (with root URL = https://taskcluster.net) +// and any future deployment. The returned value is essentially rootURL == 'https://taskcluster.net' +// ? 'https://tools.taskcluster.net/${path}' +// : '${rootURL}/${path}' " +func UI(rootURL string, path string) string { + path = strings.TrimLeft(path, "/") + switch r := strings.TrimRight(rootURL, "/"); r { + case oldRootURL: + return fmt.Sprintf("https://tools.taskcluster.net/%s", path) + default: + return fmt.Sprintf("%s/%s", r, path) + } +} + +// APIManifest returns a URL for the service manifest of a taskcluster deployment +func APIManifest(rootURL string) string { + switch r := strings.TrimRight(rootURL, "/"); r { + case oldRootURL: + return "https://references.taskcluster.net/manifest.json" + default: + return fmt.Sprintf("%s/references/manifest.json", r) + } +} diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/tests.yml b/vendor/github.com/taskcluster/taskcluster-lib-urls/tests.yml new file mode 100644 index 0000000..d05b53a --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/tests.yml @@ -0,0 +1,160 @@ +################################################################################ +# This document contains a language-agnostic set of test cases for validating +# language implementations. +# +# The format of this document is as follows: +# +# rootURLs: +# tests: +# function: [api, apiReference, docs, exchangeReference, schema, ui] +# argSets: +# expected: +# +# The specification for generating URLs that the libraries in this repository +# implement, is defined in `docs/urls-spec.md`. +################################################################################# +--- +rootURLs: + old: + - https://taskcluster.net + - https://taskcluster.net/ + - https://taskcluster.net// + new: + - https://taskcluster.example.com + - https://taskcluster.example.com/ + - https://taskcluster.example.com// + invalid: + - '12345' + empty: + - '' + +tests: +- function: api + argSets: + - [auth, v1, ping] + - [auth, v1, /ping] + - [auth, v1, //ping] + expected: + old: https://auth.taskcluster.net/v1/ping + new: https://taskcluster.example.com/api/auth/v1/ping + invalid: 12345/api/auth/v1/ping + empty: /api/auth/v1/ping +- function: api + argSets: + - [auth, v1, foo/ping] + - [auth, v1, /foo/ping] + - [auth, v1, //foo/ping] + expected: + old: https://auth.taskcluster.net/v1/foo/ping + new: https://taskcluster.example.com/api/auth/v1/foo/ping + invalid: 12345/api/auth/v1/foo/ping + empty: /api/auth/v1/foo/ping +- function: docs + argSets: + - [something/in/docs] + - [/something/in/docs] + - [//something/in/docs] + expected: + old: https://docs.taskcluster.net/something/in/docs + new: https://taskcluster.example.com/docs/something/in/docs + invalid: 12345/docs/something/in/docs + empty: /docs/something/in/docs +- function: schema + argSets: + - [auth, v1/something.json] + - [auth, /v1/something.json] + - [auth, //v1/something.json] + expected: + old: https://schemas.taskcluster.net/auth/v1/something.json + new: https://taskcluster.example.com/schemas/auth/v1/something.json + invalid: 12345/schemas/auth/v1/something.json + empty: /schemas/auth/v1/something.json +- function: schema + argSets: + - [auth, v2/something.json] + - [auth, /v2/something.json] + - [auth, //v2/something.json] + expected: + old: https://schemas.taskcluster.net/auth/v2/something.json + new: https://taskcluster.example.com/schemas/auth/v2/something.json + invalid: 12345/schemas/auth/v2/something.json + empty: /schemas/auth/v2/something.json +- function: apiReferenceSchema + argSets: + - [v1] + expected: + old: https://schemas.taskcluster.net/common/api-reference-v1.json + new: https://taskcluster.example.com/schemas/common/api-reference-v1.json + invalid: 12345/schemas/common/api-reference-v1.json + empty: /schemas/common/api-reference-v1.json +- function: exchangesReferenceSchema + argSets: + - [v1] + expected: + old: https://schemas.taskcluster.net/common/exchanges-reference-v1.json + new: https://taskcluster.example.com/schemas/common/exchanges-reference-v1.json + invalid: 12345/schemas/common/exchanges-reference-v1.json + empty: /schemas/common/exchanges-reference-v1.json +- function: apiManifestSchema + argSets: + - [v1] + expected: + old: https://schemas.taskcluster.net/common/manifest-v1.json + new: https://taskcluster.example.com/schemas/common/manifest-v1.json + invalid: 12345/schemas/common/manifest-v1.json + empty: /schemas/common/manifest-v1.json +- function: metadataMetaschema + argSets: + - [v1] + expected: + old: https://schemas.taskcluster.net/common/metadata-metaschema.json + new: https://taskcluster.example.com/schemas/common/metadata-metaschema.json + invalid: 12345/schemas/common/metadata-metaschema.json + empty: /schemas/common/metadata-metaschema.json +- function: apiReference + argSets: + - [auth, v1] + expected: + old: https://references.taskcluster.net/auth/v1/api.json + new: https://taskcluster.example.com/references/auth/v1/api.json + invalid: 12345/references/auth/v1/api.json + empty: /references/auth/v1/api.json +- function: exchangeReference + argSets: + - [auth, v1] + expected: + old: https://references.taskcluster.net/auth/v1/exchanges.json + new: https://taskcluster.example.com/references/auth/v1/exchanges.json + invalid: 12345/references/auth/v1/exchanges.json + empty: /references/auth/v1/exchanges.json +- function: ui + argSets: + - [something] + - [/something] + - [//something] + expected: + old: https://tools.taskcluster.net/something + new: https://taskcluster.example.com/something + invalid: 12345/something + empty: /something +- function: ui + argSets: + - [''] + - [/] + - [//] + expected: + old: https://tools.taskcluster.net/ + new: https://taskcluster.example.com/ + invalid: 12345/ + empty: / +- function: apiManifest + argSets: + - [] + expected: + old: https://references.taskcluster.net/manifest.json + new: https://taskcluster.example.com/references/manifest.json + invalid: 12345/references/manifest.json + empty: /references/manifest.json diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/tox.ini b/vendor/github.com/taskcluster/taskcluster-lib-urls/tox.ini new file mode 100644 index 0000000..dd2dc18 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/tox.ini @@ -0,0 +1,11 @@ +[tox] +envlist = py27,py36 + +[testenv] +deps = + pyyaml + pytest + pytest-cov + pytest-flake8 +commands = + pytest -v --flake8 --cov=taskcluster_urls {posargs:test} diff --git a/vendor/github.com/taskcluster/taskcluster-lib-urls/yarn.lock b/vendor/github.com/taskcluster/taskcluster-lib-urls/yarn.lock new file mode 100644 index 0000000..f9b8d35 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster-lib-urls/yarn.lock @@ -0,0 +1,1005 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +acorn-jsx@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/acorn-jsx/-/acorn-jsx-3.0.1.tgz#afdf9488fb1ecefc8348f6fb22f464e32a58b36b" + integrity sha1-r9+UiPsezvyDSPb7IvRk4ypYs2s= + dependencies: + acorn "^3.0.4" + +acorn@^3.0.4: + version "3.3.0" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-3.3.0.tgz#45e37fb39e8da3f25baee3ff5369e2bb5f22017a" + integrity sha1-ReN/s56No/JbruP/U2niu18iAXo= + +acorn@^5.5.0: + version "5.5.3" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-5.5.3.tgz#f473dd47e0277a08e28e9bec5aeeb04751f0b8c9" + integrity sha512-jd5MkIUlbbmb07nXH0DT3y7rDVtkzDi4XZOUVWAer8ajmF/DTSSbl5oNFyDOl/OXA33Bl79+ypHhl2pN20VeOQ== + +ajv-keywords@^2.1.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ajv-keywords/-/ajv-keywords-2.1.1.tgz#617997fc5f60576894c435f940d819e135b80762" + integrity sha1-YXmX/F9gV2iUxDX5QNgZ4TW4B2I= + +ajv@^5.2.3, ajv@^5.3.0: + version "5.5.2" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-5.5.2.tgz#73b5eeca3fab653e3d3f9422b341ad42205dc965" + integrity sha1-c7Xuyj+rZT49P5Qis0GtQiBdyWU= + dependencies: + co "^4.6.0" + fast-deep-equal "^1.0.0" + fast-json-stable-stringify "^2.0.0" + json-schema-traverse "^0.3.0" + +ansi-escapes@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-3.1.0.tgz#f73207bb81207d75fd6c83f125af26eea378ca30" + integrity sha512-UgAb8H9D41AQnu/PbWlCofQVcnV4Gs2bBJi9eZPxfU/hgglFh3SMDMENRIqdr7H6XFnXdoknctFByVsCOotTVw== + +ansi-regex@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df" + integrity sha1-w7M6te42DYbg5ijwRorn7yfWVN8= + +ansi-regex@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-3.0.0.tgz#ed0317c322064f79466c02966bddb605ab37d998" + integrity sha1-7QMXwyIGT3lGbAKWa922Bas32Zg= + +ansi-styles@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe" + integrity sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4= + +ansi-styles@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" + integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA== + dependencies: + color-convert "^1.9.0" + +argparse@^1.0.7: + version "1.0.10" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" + integrity sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg== + dependencies: + sprintf-js "~1.0.2" + +array-union@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/array-union/-/array-union-1.0.2.tgz#9a34410e4f4e3da23dea375be5be70f24778ec39" + integrity sha1-mjRBDk9OPaI96jdb5b5w8kd47Dk= + dependencies: + array-uniq "^1.0.1" + +array-uniq@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/array-uniq/-/array-uniq-1.0.3.tgz#af6ac877a25cc7f74e058894753858dfdb24fdb6" + integrity sha1-r2rId6Jcx/dOBYiUdThY39sk/bY= + +arrify@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d" + integrity sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0= + +babel-code-frame@^6.22.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-code-frame/-/babel-code-frame-6.26.0.tgz#63fd43f7dc1e3bb7ce35947db8fe369a3f58c74b" + integrity sha1-Y/1D99weO7fONZR9uP42mj9Yx0s= + dependencies: + chalk "^1.1.3" + esutils "^2.0.2" + js-tokens "^3.0.2" + +balanced-match@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767" + integrity sha1-ibTRmasr7kneFk6gK4nORi1xt2c= + +brace-expansion@^1.1.7: + version "1.1.11" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" + integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA== + dependencies: + balanced-match "^1.0.0" + concat-map "0.0.1" + +browser-stdout@1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/browser-stdout/-/browser-stdout-1.3.1.tgz#baa559ee14ced73452229bad7326467c61fabd60" + integrity sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw== + +buffer-from@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.0.0.tgz#4cb8832d23612589b0406e9e2956c17f06fdf531" + integrity sha512-83apNb8KK0Se60UE1+4Ukbe3HbfELJ6UlI4ldtOGs7So4KD26orJM8hIY9lxdzP+UpItH1Yh/Y8GUvNFWFFRxA== + +caller-path@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/caller-path/-/caller-path-0.1.0.tgz#94085ef63581ecd3daa92444a8fe94e82577751f" + integrity sha1-lAhe9jWB7NPaqSREqP6U6CV3dR8= + dependencies: + callsites "^0.2.0" + +callsites@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/callsites/-/callsites-0.2.0.tgz#afab96262910a7f33c19a5775825c69f34e350ca" + integrity sha1-r6uWJikQp/M8GaV3WCXGnzTjUMo= + +chalk@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98" + integrity sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg= + dependencies: + ansi-styles "^2.2.1" + escape-string-regexp "^1.0.2" + has-ansi "^2.0.0" + strip-ansi "^3.0.0" + supports-color "^2.0.0" + +chalk@^2.0.0, chalk@^2.1.0: + version "2.4.1" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.1.tgz#18c49ab16a037b6eb0152cc83e3471338215b66e" + integrity sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ== + dependencies: + ansi-styles "^3.2.1" + escape-string-regexp "^1.0.5" + supports-color "^5.3.0" + +chardet@^0.4.0: + version "0.4.2" + resolved "https://registry.yarnpkg.com/chardet/-/chardet-0.4.2.tgz#b5473b33dc97c424e5d98dc87d55d4d8a29c8bf2" + integrity sha1-tUc7M9yXxCTl2Y3IfVXU2KKci/I= + +circular-json@^0.3.1: + version "0.3.3" + resolved "https://registry.yarnpkg.com/circular-json/-/circular-json-0.3.3.tgz#815c99ea84f6809529d2f45791bdf82711352d66" + integrity sha512-UZK3NBx2Mca+b5LsG7bY183pHWt5Y1xts4P3Pz7ENTwGVnJOUWbRb3ocjvX7hx9tq/yTAdclXm9sZ38gNuem4A== + +cli-cursor@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/cli-cursor/-/cli-cursor-2.1.0.tgz#b35dac376479facc3e94747d41d0d0f5238ffcb5" + integrity sha1-s12sN2R5+sw+lHR9QdDQ9SOP/LU= + dependencies: + restore-cursor "^2.0.0" + +cli-width@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/cli-width/-/cli-width-2.2.0.tgz#ff19ede8a9a5e579324147b0c11f0fbcbabed639" + integrity sha1-/xnt6Kml5XkyQUewwR8PvLq+1jk= + +co@^4.6.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184" + integrity sha1-bqa989hTrlTMuOR7+gvz+QMfsYQ= + +color-convert@^1.9.0: + version "1.9.1" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.1.tgz#c1261107aeb2f294ebffec9ed9ecad529a6097ed" + integrity sha512-mjGanIiwQJskCC18rPR6OmrZ6fm2Lc7PeGFYwCmy5J34wC6F1PzdGL6xeMfmgicfYcNLGuVFA3WzXtIDCQSZxQ== + dependencies: + color-name "^1.1.1" + +color-name@^1.1.1: + version "1.1.3" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" + integrity sha1-p9BVi9icQveV3UIyj3QIMcpTvCU= + +commander@2.11.0: + version "2.11.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.11.0.tgz#157152fd1e7a6c8d98a5b715cf376df928004563" + integrity sha512-b0553uYA5YAEGgyYIGYROzKQ7X5RAqedkfjiZxwi0kL1g3bOaBNNZfYkzt/CL0umgD5wc9Jec2FbB98CjkMRvQ== + +concat-map@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" + integrity sha1-2Klr13/Wjfd5OnMDajug1UBdR3s= + +concat-stream@^1.6.0: + version "1.6.2" + resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.6.2.tgz#904bdf194cd3122fc675c77fc4ac3d4ff0fd1a34" + integrity sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw== + dependencies: + buffer-from "^1.0.0" + inherits "^2.0.3" + readable-stream "^2.2.2" + typedarray "^0.0.6" + +core-util-is@~1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" + integrity sha1-tf1UIgqivFq1eqtxQMlAdUUDwac= + +cross-spawn@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-5.1.0.tgz#e8bd0efee58fcff6f8f94510a0a554bbfa235449" + integrity sha1-6L0O/uWPz/b4+UUQoKVUu/ojVEk= + dependencies: + lru-cache "^4.0.1" + shebang-command "^1.2.0" + which "^1.2.9" + +debug@3.1.0, debug@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/debug/-/debug-3.1.0.tgz#5bb5a0672628b64149566ba16819e61518c67261" + integrity sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g== + dependencies: + ms "2.0.0" + +deep-is@~0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.3.tgz#b369d6fb5dbc13eecf524f91b070feedc357cf34" + integrity sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ= + +del@^2.0.2: + version "2.2.2" + resolved "https://registry.yarnpkg.com/del/-/del-2.2.2.tgz#c12c981d067846c84bcaf862cff930d907ffd1a8" + integrity sha1-wSyYHQZ4RshLyvhiz/kw2Qf/0ag= + dependencies: + globby "^5.0.0" + is-path-cwd "^1.0.0" + is-path-in-cwd "^1.0.0" + object-assign "^4.0.1" + pify "^2.0.0" + pinkie-promise "^2.0.0" + rimraf "^2.2.8" + +diff@3.5.0: + version "3.5.0" + resolved "https://registry.yarnpkg.com/diff/-/diff-3.5.0.tgz#800c0dd1e0a8bfbc95835c202ad220fe317e5a12" + integrity sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA== + +doctrine@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/doctrine/-/doctrine-2.1.0.tgz#5cd01fc101621b42c4cd7f5d1a66243716d3f39d" + integrity sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw== + dependencies: + esutils "^2.0.2" + +escape-string-regexp@1.0.5, escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" + integrity sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ= + +eslint-config-taskcluster@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/eslint-config-taskcluster/-/eslint-config-taskcluster-3.1.0.tgz#a8ec7efd59a88f33abd9c8256adb69ee60775007" + integrity sha512-xemOAkCVbkbMOmOZ9xHR134qzpJCjlWC2ymzNTVLdW02T7/vm0/38ADC1WQeHAWXNapLq8VrunXbk220ROrJqw== + dependencies: + eslint "^4.10.0" + +eslint-scope@^3.7.1: + version "3.7.1" + resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-3.7.1.tgz#3d63c3edfda02e06e01a452ad88caacc7cdcb6e8" + integrity sha1-PWPD7f2gLgbgGkUq2IyqzHzctug= + dependencies: + esrecurse "^4.1.0" + estraverse "^4.1.1" + +eslint-visitor-keys@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-1.0.0.tgz#3f3180fb2e291017716acb4c9d6d5b5c34a6a81d" + integrity sha512-qzm/XxIbxm/FHyH341ZrbnMUpe+5Bocte9xkmFMzPMjRaZMcXww+MpBptFvtU+79L362nqiLhekCxCxDPaUMBQ== + +eslint@^4.10.0: + version "4.19.1" + resolved "https://registry.yarnpkg.com/eslint/-/eslint-4.19.1.tgz#32d1d653e1d90408854bfb296f076ec7e186a300" + integrity sha512-bT3/1x1EbZB7phzYu7vCr1v3ONuzDtX8WjuM9c0iYxe+cq+pwcKEoQjl7zd3RpC6YOLgnSy3cTN58M2jcoPDIQ== + dependencies: + ajv "^5.3.0" + babel-code-frame "^6.22.0" + chalk "^2.1.0" + concat-stream "^1.6.0" + cross-spawn "^5.1.0" + debug "^3.1.0" + doctrine "^2.1.0" + eslint-scope "^3.7.1" + eslint-visitor-keys "^1.0.0" + espree "^3.5.4" + esquery "^1.0.0" + esutils "^2.0.2" + file-entry-cache "^2.0.0" + functional-red-black-tree "^1.0.1" + glob "^7.1.2" + globals "^11.0.1" + ignore "^3.3.3" + imurmurhash "^0.1.4" + inquirer "^3.0.6" + is-resolvable "^1.0.0" + js-yaml "^3.9.1" + json-stable-stringify-without-jsonify "^1.0.1" + levn "^0.3.0" + lodash "^4.17.4" + minimatch "^3.0.2" + mkdirp "^0.5.1" + natural-compare "^1.4.0" + optionator "^0.8.2" + path-is-inside "^1.0.2" + pluralize "^7.0.0" + progress "^2.0.0" + regexpp "^1.0.1" + require-uncached "^1.0.3" + semver "^5.3.0" + strip-ansi "^4.0.0" + strip-json-comments "~2.0.1" + table "4.0.2" + text-table "~0.2.0" + +espree@^3.5.4: + version "3.5.4" + resolved "https://registry.yarnpkg.com/espree/-/espree-3.5.4.tgz#b0f447187c8a8bed944b815a660bddf5deb5d1a7" + integrity sha512-yAcIQxtmMiB/jL32dzEp2enBeidsB7xWPLNiw3IIkpVds1P+h7qF9YwJq1yUNzp2OKXgAprs4F61ih66UsoD1A== + dependencies: + acorn "^5.5.0" + acorn-jsx "^3.0.0" + +esprima@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.0.tgz#4499eddcd1110e0b218bacf2fa7f7f59f55ca804" + integrity sha512-oftTcaMu/EGrEIu904mWteKIv8vMuOgGYo7EhVJJN00R/EED9DCua/xxHRdYnKtcECzVg7xOWhflvJMnqcFZjw== + +esquery@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/esquery/-/esquery-1.0.1.tgz#406c51658b1f5991a5f9b62b1dc25b00e3e5c708" + integrity sha512-SmiyZ5zIWH9VM+SRUReLS5Q8a7GxtRdxEBVZpm98rJM7Sb+A9DVCndXfkeFUd3byderg+EbDkfnevfCwynWaNA== + dependencies: + estraverse "^4.0.0" + +esrecurse@^4.1.0: + version "4.2.1" + resolved "https://registry.yarnpkg.com/esrecurse/-/esrecurse-4.2.1.tgz#007a3b9fdbc2b3bb87e4879ea19c92fdbd3942cf" + integrity sha512-64RBB++fIOAXPw3P9cy89qfMlvZEXZkqqJkjqqXIvzP5ezRZjW+lPWjw35UX/3EhUPFYbg5ER4JYgDw4007/DQ== + dependencies: + estraverse "^4.1.0" + +estraverse@^4.0.0, estraverse@^4.1.0, estraverse@^4.1.1: + version "4.2.0" + resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-4.2.0.tgz#0dee3fed31fcd469618ce7342099fc1afa0bdb13" + integrity sha1-De4/7TH81GlhjOc0IJn8GvoL2xM= + +esutils@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.2.tgz#0abf4f1caa5bcb1f7a9d8acc6dea4faaa04bac9b" + integrity sha1-Cr9PHKpbyx96nYrMbepPqqBLrJs= + +external-editor@^2.0.4: + version "2.2.0" + resolved "https://registry.yarnpkg.com/external-editor/-/external-editor-2.2.0.tgz#045511cfd8d133f3846673d1047c154e214ad3d5" + integrity sha512-bSn6gvGxKt+b7+6TKEv1ZycHleA7aHhRHyAqJyp5pbUFuYYNIzpZnQDk7AsYckyWdEnTeAnay0aCy2aV6iTk9A== + dependencies: + chardet "^0.4.0" + iconv-lite "^0.4.17" + tmp "^0.0.33" + +fast-deep-equal@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-1.1.0.tgz#c053477817c86b51daa853c81e059b733d023614" + integrity sha1-wFNHeBfIa1HaqFPIHgWbcz0CNhQ= + +fast-json-stable-stringify@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz#d5142c0caee6b1189f87d3a76111064f86c8bbf2" + integrity sha1-1RQsDK7msRifh9OnYREGT4bIu/I= + +fast-levenshtein@~2.0.4: + version "2.0.6" + resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" + integrity sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc= + +figures@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/figures/-/figures-2.0.0.tgz#3ab1a2d2a62c8bfb431a0c94cb797a2fce27c962" + integrity sha1-OrGi0qYsi/tDGgyUy3l6L84nyWI= + dependencies: + escape-string-regexp "^1.0.5" + +file-entry-cache@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/file-entry-cache/-/file-entry-cache-2.0.0.tgz#c392990c3e684783d838b8c84a45d8a048458361" + integrity sha1-w5KZDD5oR4PYOLjISkXYoEhFg2E= + dependencies: + flat-cache "^1.2.1" + object-assign "^4.0.1" + +flat-cache@^1.2.1: + version "1.3.0" + resolved "https://registry.yarnpkg.com/flat-cache/-/flat-cache-1.3.0.tgz#d3030b32b38154f4e3b7e9c709f490f7ef97c481" + integrity sha1-0wMLMrOBVPTjt+nHCfSQ9++XxIE= + dependencies: + circular-json "^0.3.1" + del "^2.0.2" + graceful-fs "^4.1.2" + write "^0.2.1" + +fs.realpath@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" + integrity sha1-FQStJSMVjKpA20onh8sBQRmU6k8= + +functional-red-black-tree@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz#1b0ab3bd553b2a0d6399d29c0e3ea0b252078327" + integrity sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc= + +glob@7.1.2, glob@^7.0.3, glob@^7.0.5, glob@^7.1.2: + version "7.1.2" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.2.tgz#c19c9df9a028702d678612384a6552404c636d15" + integrity sha512-MJTUg1kjuLeQCJ+ccE4Vpa6kKVXkPYJ2mOCQyUuKLcLQsdrMCpBPUi8qVE6+YuaJkozeA9NusTAw3hLr8Xe5EQ== + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.0.4" + once "^1.3.0" + path-is-absolute "^1.0.0" + +globals@^11.0.1: + version "11.5.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-11.5.0.tgz#6bc840de6771173b191f13d3a9c94d441ee92642" + integrity sha512-hYyf+kI8dm3nORsiiXUQigOU62hDLfJ9G01uyGMxhc6BKsircrUhC4uJPQPUSuq2GrTmiiEt7ewxlMdBewfmKQ== + +globby@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/globby/-/globby-5.0.0.tgz#ebd84667ca0dbb330b99bcfc68eac2bc54370e0d" + integrity sha1-69hGZ8oNuzMLmbz8aOrCvFQ3Dg0= + dependencies: + array-union "^1.0.1" + arrify "^1.0.0" + glob "^7.0.3" + object-assign "^4.0.1" + pify "^2.0.0" + pinkie-promise "^2.0.0" + +graceful-fs@^4.1.2: + version "4.1.11" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.11.tgz#0e8bdfe4d1ddb8854d64e04ea7c00e2a026e5658" + integrity sha1-Dovf5NHduIVNZOBOp8AOKgJuVlg= + +growl@1.10.3: + version "1.10.3" + resolved "https://registry.yarnpkg.com/growl/-/growl-1.10.3.tgz#1926ba90cf3edfe2adb4927f5880bc22c66c790f" + integrity sha512-hKlsbA5Vu3xsh1Cg3J7jSmX/WaW6A5oBeqzM88oNbCRQFz+zUaXm6yxS4RVytp1scBoJzSYl4YAEOQIt6O8V1Q== + +has-ansi@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91" + integrity sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE= + dependencies: + ansi-regex "^2.0.0" + +has-flag@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-2.0.0.tgz#e8207af1cc7b30d446cc70b734b5e8be18f88d51" + integrity sha1-6CB68cx7MNRGzHC3NLXovhj4jVE= + +has-flag@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" + integrity sha1-tdRU3CGZriJWmfNGfloH87lVuv0= + +he@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/he/-/he-1.1.1.tgz#93410fd21b009735151f8868c2f271f3427e23fd" + integrity sha1-k0EP0hsAlzUVH4howvJx80J+I/0= + +iconv-lite@^0.4.17: + version "0.4.21" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.21.tgz#c47f8733d02171189ebc4a400f3218d348094798" + integrity sha512-En5V9za5mBt2oUA03WGD3TwDv0MKAruqsuxstbMUZaj9W9k/m1CV/9py3l0L5kw9Bln8fdHQmzHSYtvpvTLpKw== + dependencies: + safer-buffer "^2.1.0" + +ignore@^3.3.3: + version "3.3.8" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-3.3.8.tgz#3f8e9c35d38708a3a7e0e9abb6c73e7ee7707b2b" + integrity sha512-pUh+xUQQhQzevjRHHFqqcTy0/dP/kS9I8HSrUydhihjuD09W6ldVWFtIrwhXdUJHis3i2rZNqEHpZH/cbinFbg== + +imurmurhash@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" + integrity sha1-khi5srkoojixPcT7a21XbyMUU+o= + +inflight@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" + integrity sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk= + dependencies: + once "^1.3.0" + wrappy "1" + +inherits@2, inherits@^2.0.3, inherits@~2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" + integrity sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4= + +inquirer@^3.0.6: + version "3.3.0" + resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-3.3.0.tgz#9dd2f2ad765dcab1ff0443b491442a20ba227dc9" + integrity sha512-h+xtnyk4EwKvFWHrUYsWErEVR+igKtLdchu+o0Z1RL7VU/jVMFbYir2bp6bAj8efFNxWqHX0dIss6fJQ+/+qeQ== + dependencies: + ansi-escapes "^3.0.0" + chalk "^2.0.0" + cli-cursor "^2.1.0" + cli-width "^2.0.0" + external-editor "^2.0.4" + figures "^2.0.0" + lodash "^4.3.0" + mute-stream "0.0.7" + run-async "^2.2.0" + rx-lite "^4.0.8" + rx-lite-aggregates "^4.0.8" + string-width "^2.1.0" + strip-ansi "^4.0.0" + through "^2.3.6" + +is-fullwidth-code-point@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz#a3b30a5c4f199183167aaab93beefae3ddfb654f" + integrity sha1-o7MKXE8ZkYMWeqq5O+764937ZU8= + +is-path-cwd@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-path-cwd/-/is-path-cwd-1.0.0.tgz#d225ec23132e89edd38fda767472e62e65f1106d" + integrity sha1-0iXsIxMuie3Tj9p2dHLmLmXxEG0= + +is-path-in-cwd@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-path-in-cwd/-/is-path-in-cwd-1.0.1.tgz#5ac48b345ef675339bd6c7a48a912110b241cf52" + integrity sha512-FjV1RTW48E7CWM7eE/J2NJvAEEVektecDBVBE5Hh3nM1Jd0kvhHtX68Pr3xsDf857xt3Y4AkwVULK1Vku62aaQ== + dependencies: + is-path-inside "^1.0.0" + +is-path-inside@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-path-inside/-/is-path-inside-1.0.1.tgz#8ef5b7de50437a3fdca6b4e865ef7aa55cb48036" + integrity sha1-jvW33lBDej/cprToZe96pVy0gDY= + dependencies: + path-is-inside "^1.0.1" + +is-promise@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-promise/-/is-promise-2.1.0.tgz#79a2a9ece7f096e80f36d2b2f3bc16c1ff4bf3fa" + integrity sha1-eaKp7OfwlugPNtKy87wWwf9L8/o= + +is-resolvable@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-resolvable/-/is-resolvable-1.1.0.tgz#fb18f87ce1feb925169c9a407c19318a3206ed88" + integrity sha512-qgDYXFSR5WvEfuS5dMj6oTMEbrrSaM0CrFk2Yiq/gXnBvD9pMa2jGXxyhGLfvhZpuMZe18CJpFxAt3CRs42NMg== + +isarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" + integrity sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE= + +isexe@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" + integrity sha1-6PvzdNxVb/iUehDcsFctYz8s+hA= + +js-tokens@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.2.tgz#9866df395102130e38f7f996bceb65443209c25b" + integrity sha1-mGbfOVECEw449/mWvOtlRDIJwls= + +js-yaml@^3.11.0, js-yaml@^3.9.1: + version "3.11.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.11.0.tgz#597c1a8bd57152f26d622ce4117851a51f5ebaef" + integrity sha512-saJstZWv7oNeOyBh3+Dx1qWzhW0+e6/8eDzo7p5rDFqxntSztloLtuKu+Ejhtq82jsilwOIZYsCz+lIjthg1Hw== + dependencies: + argparse "^1.0.7" + esprima "^4.0.0" + +json-schema-traverse@^0.3.0: + version "0.3.1" + resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.3.1.tgz#349a6d44c53a51de89b40805c5d5e59b417d3340" + integrity sha1-NJptRMU6Ud6JtAgFxdXlm0F9M0A= + +json-stable-stringify-without-jsonify@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz#9db7b59496ad3f3cfef30a75142d2d930ad72651" + integrity sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE= + +levn@^0.3.0, levn@~0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/levn/-/levn-0.3.0.tgz#3b09924edf9f083c0490fdd4c0bc4421e04764ee" + integrity sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4= + dependencies: + prelude-ls "~1.1.2" + type-check "~0.3.2" + +lodash@^4.17.4, lodash@^4.3.0: + version "4.17.10" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.10.tgz#1b7793cf7259ea38fb3661d4d38b3260af8ae4e7" + integrity sha512-UejweD1pDoXu+AD825lWwp4ZGtSwgnpZxb3JDViD7StjQz+Nb/6l093lx4OQ0foGWNRoc19mWy7BzL+UAK2iVg== + +lru-cache@^4.0.1: + version "4.1.2" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-4.1.2.tgz#45234b2e6e2f2b33da125624c4664929a0224c3f" + integrity sha512-wgeVXhrDwAWnIF/yZARsFnMBtdFXOg1b8RIrhilp+0iDYN4mdQcNZElDZ0e4B64BhaxeQ5zN7PMyvu7we1kPeQ== + dependencies: + pseudomap "^1.0.2" + yallist "^2.1.2" + +mimic-fn@^1.0.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-1.2.0.tgz#820c86a39334640e99516928bd03fca88057d022" + integrity sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ== + +minimatch@3.0.4, minimatch@^3.0.2, minimatch@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" + integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA== + dependencies: + brace-expansion "^1.1.7" + +minimist@0.0.8: + version "0.0.8" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d" + integrity sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0= + +mkdirp@0.5.1, mkdirp@^0.5.1: + version "0.5.1" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903" + integrity sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM= + dependencies: + minimist "0.0.8" + +mocha@^5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/mocha/-/mocha-5.1.1.tgz#b774c75609dac05eb48f4d9ba1d827b97fde8a7b" + integrity sha512-kKKs/H1KrMMQIEsWNxGmb4/BGsmj0dkeyotEvbrAuQ01FcWRLssUNXCEUZk6SZtyJBi6EE7SL0zDDtItw1rGhw== + dependencies: + browser-stdout "1.3.1" + commander "2.11.0" + debug "3.1.0" + diff "3.5.0" + escape-string-regexp "1.0.5" + glob "7.1.2" + growl "1.10.3" + he "1.1.1" + minimatch "3.0.4" + mkdirp "0.5.1" + supports-color "4.4.0" + +ms@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" + integrity sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g= + +mute-stream@0.0.7: + version "0.0.7" + resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.7.tgz#3075ce93bc21b8fab43e1bc4da7e8115ed1e7bab" + integrity sha1-MHXOk7whuPq0PhvE2n6BFe0ee6s= + +natural-compare@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7" + integrity sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc= + +object-assign@^4.0.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + integrity sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM= + +once@^1.3.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" + integrity sha1-WDsap3WWHUsROsF9nFC6753Xa9E= + dependencies: + wrappy "1" + +onetime@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/onetime/-/onetime-2.0.1.tgz#067428230fd67443b2794b22bba528b6867962d4" + integrity sha1-BnQoIw/WdEOyeUsiu6UotoZ5YtQ= + dependencies: + mimic-fn "^1.0.0" + +optionator@^0.8.2: + version "0.8.2" + resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.8.2.tgz#364c5e409d3f4d6301d6c0b4c05bba50180aeb64" + integrity sha1-NkxeQJ0/TWMB1sC0wFu6UBgK62Q= + dependencies: + deep-is "~0.1.3" + fast-levenshtein "~2.0.4" + levn "~0.3.0" + prelude-ls "~1.1.2" + type-check "~0.3.2" + wordwrap "~1.0.0" + +os-tmpdir@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" + integrity sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ= + +path-is-absolute@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" + integrity sha1-F0uSaHNVNP+8es5r9TpanhtcX18= + +path-is-inside@^1.0.1, path-is-inside@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/path-is-inside/-/path-is-inside-1.0.2.tgz#365417dede44430d1c11af61027facf074bdfc53" + integrity sha1-NlQX3t5EQw0cEa9hAn+s8HS9/FM= + +pify@^2.0.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c" + integrity sha1-7RQaasBDqEnqWISY59yosVMw6Qw= + +pinkie-promise@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa" + integrity sha1-ITXW36ejWMBprJsXh3YogihFD/o= + dependencies: + pinkie "^2.0.0" + +pinkie@^2.0.0: + version "2.0.4" + resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870" + integrity sha1-clVrgM+g1IqXToDnckjoDtT3+HA= + +pluralize@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/pluralize/-/pluralize-7.0.0.tgz#298b89df8b93b0221dbf421ad2b1b1ea23fc6777" + integrity sha512-ARhBOdzS3e41FbkW/XWrTEtukqqLoK5+Z/4UeDaLuSW+39JPeFgs4gCGqsrJHVZX0fUrx//4OF0K1CUGwlIFow== + +prelude-ls@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.1.2.tgz#21932a549f5e52ffd9a827f570e04be62a97da54" + integrity sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ= + +process-nextick-args@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.0.tgz#a37d732f4271b4ab1ad070d35508e8290788ffaa" + integrity sha512-MtEC1TqN0EU5nephaJ4rAtThHtC86dNN9qCuEhtshvpVBkAW5ZO7BASN9REnF9eoXGcRub+pFuKEpOHE+HbEMw== + +progress@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/progress/-/progress-2.0.0.tgz#8a1be366bf8fc23db2bd23f10c6fe920b4389d1f" + integrity sha1-ihvjZr+Pwj2yvSPxDG/pILQ4nR8= + +pseudomap@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/pseudomap/-/pseudomap-1.0.2.tgz#f052a28da70e618917ef0a8ac34c1ae5a68286b3" + integrity sha1-8FKijacOYYkX7wqKw0wa5aaChrM= + +readable-stream@^2.2.2: + version "2.3.6" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.6.tgz#b11c27d88b8ff1fbe070643cf94b0c79ae1b0aaf" + integrity sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw== + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.3" + isarray "~1.0.0" + process-nextick-args "~2.0.0" + safe-buffer "~5.1.1" + string_decoder "~1.1.1" + util-deprecate "~1.0.1" + +regexpp@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/regexpp/-/regexpp-1.1.0.tgz#0e3516dd0b7904f413d2d4193dce4618c3a689ab" + integrity sha512-LOPw8FpgdQF9etWMaAfG/WRthIdXJGYp4mJ2Jgn/2lpkbod9jPn0t9UqN7AxBOKNfzRbYyVfgc7Vk4t/MpnXgw== + +require-uncached@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/require-uncached/-/require-uncached-1.0.3.tgz#4e0d56d6c9662fd31e43011c4b95aa49955421d3" + integrity sha1-Tg1W1slmL9MeQwEcS5WqSZVUIdM= + dependencies: + caller-path "^0.1.0" + resolve-from "^1.0.0" + +resolve-from@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-1.0.1.tgz#26cbfe935d1aeeeabb29bc3fe5aeb01e93d44226" + integrity sha1-Jsv+k10a7uq7Kbw/5a6wHpPUQiY= + +restore-cursor@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/restore-cursor/-/restore-cursor-2.0.0.tgz#9f7ee287f82fd326d4fd162923d62129eee0dfaf" + integrity sha1-n37ih/gv0ybU/RYpI9YhKe7g368= + dependencies: + onetime "^2.0.0" + signal-exit "^3.0.2" + +rimraf@^2.2.8: + version "2.6.2" + resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.2.tgz#2ed8150d24a16ea8651e6d6ef0f47c4158ce7a36" + integrity sha512-lreewLK/BlghmxtfH36YYVg1i8IAce4TI7oao75I1g245+6BctqTVQiBP3YUJ9C6DQOXJmkYR9X9fCLtCOJc5w== + dependencies: + glob "^7.0.5" + +run-async@^2.2.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/run-async/-/run-async-2.3.0.tgz#0371ab4ae0bdd720d4166d7dfda64ff7a445a6c0" + integrity sha1-A3GrSuC91yDUFm19/aZP96RFpsA= + dependencies: + is-promise "^2.1.0" + +rx-lite-aggregates@^4.0.8: + version "4.0.8" + resolved "https://registry.yarnpkg.com/rx-lite-aggregates/-/rx-lite-aggregates-4.0.8.tgz#753b87a89a11c95467c4ac1626c4efc4e05c67be" + integrity sha1-dTuHqJoRyVRnxKwWJsTvxOBcZ74= + dependencies: + rx-lite "*" + +rx-lite@*, rx-lite@^4.0.8: + version "4.0.8" + resolved "https://registry.yarnpkg.com/rx-lite/-/rx-lite-4.0.8.tgz#0b1e11af8bc44836f04a6407e92da42467b79444" + integrity sha1-Cx4Rr4vESDbwSmQH6S2kJGe3lEQ= + +safe-buffer@~5.1.0, safe-buffer@~5.1.1: + version "5.1.2" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" + integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== + +safer-buffer@^2.1.0: + version "2.1.2" + resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" + integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== + +semver@^5.3.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-5.5.0.tgz#dc4bbc7a6ca9d916dee5d43516f0092b58f7b8ab" + integrity sha512-4SJ3dm0WAwWy/NVeioZh5AntkdJoWKxHxcmyP622fOkgHa4z3R0TdBJICINyaSDE6uNwVc8gZr+ZinwZAH4xIA== + +shebang-command@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-1.2.0.tgz#44aac65b695b03398968c39f363fee5deafdf1ea" + integrity sha1-RKrGW2lbAzmJaMOfNj/uXer98eo= + dependencies: + shebang-regex "^1.0.0" + +shebang-regex@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-1.0.0.tgz#da42f49740c0b42db2ca9728571cb190c98efea3" + integrity sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM= + +signal-exit@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.2.tgz#b5fdc08f1287ea1178628e415e25132b73646c6d" + integrity sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0= + +slice-ansi@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/slice-ansi/-/slice-ansi-1.0.0.tgz#044f1a49d8842ff307aad6b505ed178bd950134d" + integrity sha512-POqxBK6Lb3q6s047D/XsDVNPnF9Dl8JSaqe9h9lURl0OdNqy/ujDrOiIHtsqXMGbWWTIomRzAMaTyawAU//Reg== + dependencies: + is-fullwidth-code-point "^2.0.0" + +sprintf-js@~1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" + integrity sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw= + +string-width@^2.1.0, string-width@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-2.1.1.tgz#ab93f27a8dc13d28cac815c462143a6d9012ae9e" + integrity sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw== + dependencies: + is-fullwidth-code-point "^2.0.0" + strip-ansi "^4.0.0" + +string_decoder@~1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.1.1.tgz#9cf1611ba62685d7030ae9e4ba34149c3af03fc8" + integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg== + dependencies: + safe-buffer "~5.1.0" + +strip-ansi@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf" + integrity sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8= + dependencies: + ansi-regex "^2.0.0" + +strip-ansi@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-4.0.0.tgz#a8479022eb1ac368a871389b635262c505ee368f" + integrity sha1-qEeQIusaw2iocTibY1JixQXuNo8= + dependencies: + ansi-regex "^3.0.0" + +strip-json-comments@~2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" + integrity sha1-PFMZQukIwml8DsNEhYwobHygpgo= + +supports-color@4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-4.4.0.tgz#883f7ddabc165142b2a61427f3352ded195d1a3e" + integrity sha512-rKC3+DyXWgK0ZLKwmRsrkyHVZAjNkfzeehuFWdGGcqGDTZFH73+RH6S/RDAAxl9GusSjZSUWYLmT9N5pzXFOXQ== + dependencies: + has-flag "^2.0.0" + +supports-color@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7" + integrity sha1-U10EXOa2Nj+kARcIRimZXp3zJMc= + +supports-color@^5.3.0: + version "5.4.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.4.0.tgz#1c6b337402c2137605efe19f10fec390f6faab54" + integrity sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w== + dependencies: + has-flag "^3.0.0" + +table@4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/table/-/table-4.0.2.tgz#a33447375391e766ad34d3486e6e2aedc84d2e36" + integrity sha512-UUkEAPdSGxtRpiV9ozJ5cMTtYiqz7Ni1OGqLXRCynrvzdtR1p+cfOWe2RJLwvUG8hNanaSRjecIqwOjqeatDsA== + dependencies: + ajv "^5.2.3" + ajv-keywords "^2.1.0" + chalk "^2.1.0" + lodash "^4.17.4" + slice-ansi "1.0.0" + string-width "^2.1.1" + +text-table@~0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4" + integrity sha1-f17oI66AUgfACvLfSoTsP8+lcLQ= + +through@^2.3.6: + version "2.3.8" + resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5" + integrity sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU= + +tmp@^0.0.33: + version "0.0.33" + resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.33.tgz#6d34335889768d21b2bcda0aa277ced3b1bfadf9" + integrity sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw== + dependencies: + os-tmpdir "~1.0.2" + +type-check@~0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.3.2.tgz#5884cab512cf1d355e3fb784f30804b2b520db72" + integrity sha1-WITKtRLPHTVeP7eE8wgEsrUg23I= + dependencies: + prelude-ls "~1.1.2" + +typedarray@^0.0.6: + version "0.0.6" + resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777" + integrity sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c= + +util-deprecate@~1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" + integrity sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8= + +which@^1.2.9: + version "1.3.0" + resolved "https://registry.yarnpkg.com/which/-/which-1.3.0.tgz#ff04bdfc010ee547d780bec38e1ac1c2777d253a" + integrity sha512-xcJpopdamTuY5duC/KnTTNBraPK54YwpenP4lzxU8H91GudWpFv38u0CKjclE1Wi2EH2EDz5LRcHcKbCIzqGyg== + dependencies: + isexe "^2.0.0" + +wordwrap@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-1.0.0.tgz#27584810891456a4171c8d0226441ade90cbcaeb" + integrity sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus= + +wrappy@1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" + integrity sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8= + +write@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/write/-/write-0.2.1.tgz#5fc03828e264cea3fe91455476f7a3c566cb0757" + integrity sha1-X8A4KOJkzqP+kUVUdvejxWbLB1c= + dependencies: + mkdirp "^0.5.1" + +yallist@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-2.1.2.tgz#1c11f9218f076089a47dd512f93c6699a6a81d52" + integrity sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI= diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/.gitignore b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/.gitignore new file mode 100644 index 0000000..e1bda13 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/.gitignore @@ -0,0 +1,27 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# Coverage report +coverage.report diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/LICENSE b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/LICENSE new file mode 100644 index 0000000..a612ad9 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/README.md b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/README.md new file mode 100644 index 0000000..b72cbd2 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/README.md @@ -0,0 +1,591 @@ +# Taskcluster Client Go + +[![GoDoc](https://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23?status.svg)](https://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23) +[![Coverage Status](https://coveralls.io/repos/taskcluster/taskcluster/clients/client-go/badge.svg?branch=master&service=github)](https://coveralls.io/github/taskcluster/taskcluster/clients/client-go?branch=master) +[![License](https://img.shields.io/badge/license-MPL%202.0-orange.svg)](http://mozilla.org/MPL/2.0) + +A go (golang) port of taskcluster-client. + +Complete godoc documentation [here](https://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23). + +This library provides the following packages to interface with Taskcluster: + +### HTTP APIs +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcauth +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcawsprovisioner +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcec2manager +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcgithub +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tchooks +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcindex +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tclogin +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcnotify +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcpurgecache +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueue +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcsecrets + +### AMQP APIs +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcauthevents +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcawsprovisionerevents +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcgithubevents +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcpurgecacheevents +* http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueueevents + +## Example programs + +To get you started quickly, some example programs are included that use both the HTTP APIs and the AMQP APIs: + +* This [HTTP example program](http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcauth#example-package--Scopes) demonstrates the use of the [tcauth](http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcauth) package to query the expiry and expanded scopes of a given clientId. +* This [HTTP example program](http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcauth#example-package--UpdateClient) demonstrates the use of the [tcauth](http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcauth) package to update an existing clientId with a new description and expiry. +* The [AMQP example program](http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueueevents#example-package--TaskclusterSniffer) demonstrates the use of the [tcqueueevents](http://godoc.org/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueueevents) package to listen in on Taskcluster tasks being defined and executed. + +## Calling API End-Points + +To invoke an API end-point, instantiate one of the HTTP API classes (from +section [HTTP APIs](#http-apis)). In the following example we instantiate an +instance of the `Queue` client class and use it to create a task. + +```go +package main + +import ( + "encoding/json" + "fmt" + "log" + "time" + + "github.com/taskcluster/slugid-go/slugid" + tcclient "github.com/taskcluster/taskcluster/clients/client-go/v23" + "github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueue" +) + +// ********************************************************* +// These type definitions are copied from: +// https://github.com/taskcluster/generic-worker/blob/5cb2876624ce43974b1e1f96205535b037d63953/generated_windows.go#L11-L377 +// ********************************************************* +type ( + Artifact struct { + + // Explicitly set the value of the HTTP `Content-Type` response header when the artifact(s) + // is/are served over HTTP(S). If not provided (this property is optional) the worker will + // guess the content type of artifacts based on the filename extension of the file storing + // the artifact content. It does this by looking at the system filename-to-mimetype mappings + // defined in the Windows registry. Note, setting `contentType` on a directory artifact will + // apply the same contentType to all files contained in the directory. + // + // See [mime.TypeByExtension](https://godoc.org/mime#TypeByExtension). + // + // Since: generic-worker 10.4.0 + ContentType string `json:"contentType,omitempty"` + + // Date when artifact should expire must be in the future, no earlier than task deadline, but + // no later than task expiry. If not set, defaults to task expiry. + // + // Since: generic-worker 1.0.0 + Expires tcclient.Time `json:"expires,omitempty"` + + // Name of the artifact, as it will be published. If not set, `path` will be used. + // Conventionally (although not enforced) path elements are forward slash separated. Example: + // `public/build/a/house`. Note, no scopes are required to read artifacts beginning `public/`. + // Artifact names not beginning `public/` are scope-protected (caller requires scopes to + // download the artifact). See the Queue documentation for more information. + // + // Since: generic-worker 8.1.0 + Name string `json:"name,omitempty"` + + // Relative path of the file/directory from the task directory. Note this is not an absolute + // path as is typically used in docker-worker, since the absolute task directory name is not + // known when the task is submitted. Example: `dist\regedit.exe`. It doesn't matter if + // forward slashes or backslashes are used. + // + // Since: generic-worker 1.0.0 + Path string `json:"path"` + + // Artifacts can be either an individual `file` or a `directory` containing + // potentially multiple files with recursively included subdirectories. + // + // Since: generic-worker 1.0.0 + // + // Possible values: + // * "file" + // * "directory" + Type string `json:"type"` + } + + // Requires scope `queue:get-artifact:`. + // + // Since: generic-worker 5.4.0 + ArtifactContent struct { + + // Max length: 1024 + Artifact string `json:"artifact"` + + // The required SHA 256 of the content body. + // + // Since: generic-worker 10.8.0 + // + // Syntax: ^[a-f0-9]{64}$ + Sha256 string `json:"sha256,omitempty"` + + // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ + TaskID string `json:"taskId"` + } + + // Base64 encoded content of file/archive, up to 64KB (encoded) in size. + // + // Since: generic-worker 11.1.0 + Base64Content struct { + + // Base64 encoded content of file/archive, up to 64KB (encoded) in size. + // + // Since: generic-worker 11.1.0 + // + // Syntax: ^[A-Za-z0-9/+]+[=]{0,2}$ + // Max length: 65536 + Base64 string `json:"base64"` + } + + // By default tasks will be resolved with `state/reasonResolved`: `completed/completed` + // if all task commands have a zero exit code, or `failed/failed` if any command has a + // non-zero exit code. This payload property allows customsation of the task resolution + // based on exit code of task commands. + ExitCodeHandling struct { + + // Exit codes for any command in the task payload to cause this task to + // be resolved as `exception/intermittent-task`. Typically the Queue + // will then schedule a new run of the existing `taskId` (rerun) if not + // all task runs have been exhausted. + // + // See [itermittent tasks](https://docs.taskcluster.net/docs/reference/platform/taskcluster-queue/docs/worker-interaction#intermittent-tasks) for more detail. + // + // Since: generic-worker 10.10.0 + // + // Array items: + // Mininum: 1 + Retry []int64 `json:"retry,omitempty"` + } + + // Feature flags enable additional functionality. + // + // Since: generic-worker 5.3.0 + FeatureFlags struct { + + // An artifact named `public/chainOfTrust.json.asc` should be generated + // which will include information for downstream tasks to build a level + // of trust for the artifacts produced by the task and the environment + // it ran in. + // + // Since: generic-worker 5.3.0 + ChainOfTrust bool `json:"chainOfTrust,omitempty"` + + // Runs commands with UAC elevation. Only set to true when UAC is + // enabled on the worker and Administrative privileges are required by + // task commands. When UAC is disabled on the worker, task commands will + // already run with full user privileges, and therefore a value of true + // will result in a malformed-payload task exception. + // + // A value of true does not add the task user to the `Administrators` + // group - see the `osGroups` property for that. Typically + // `task.payload.osGroups` should include an Administrative group, such + // as `Administrators`, when setting to true. + // + // For security, `runAsAdministrator` feature cannot be used in + // conjunction with `chainOfTrust` feature. + // + // Requires scope + // `generic-worker:run-as-administrator:/`. + // + // Since: generic-worker 10.11.0 + RunAsAdministrator bool `json:"runAsAdministrator,omitempty"` + + // The taskcluster proxy provides an easy and safe way to make authenticated + // taskcluster requests within the scope(s) of a particular task. See + // [the github project](https://github.com/taskcluster/taskcluster-proxy) for more information. + // + // Since: generic-worker 10.6.0 + TaskclusterProxy bool `json:"taskclusterProxy,omitempty"` + } + + FileMount struct { + + // One of: + // * ArtifactContent + // * URLContent + // * RawContent + // * Base64Content + Content json.RawMessage `json:"content"` + + // The filesystem location to mount the file. + // + // Since: generic-worker 5.4.0 + File string `json:"file"` + } + + // This schema defines the structure of the `payload` property referred to in a + // Taskcluster Task definition. + GenericWorkerPayload struct { + + // Artifacts to be published. + // + // Since: generic-worker 1.0.0 + Artifacts []Artifact `json:"artifacts,omitempty"` + + // One entry per command (consider each entry to be interpreted as a full line of + // a Windows™ .bat file). For example: + // ``` + // [ + // "set", + // "echo hello world > hello_world.txt", + // "set GOPATH=C:\\Go" + // ] + // ``` + // + // Since: generic-worker 0.0.1 + // + // Array items: + Command []string `json:"command"` + + // Env vars must be string to __string__ mappings (not number or boolean). For example: + // ``` + // { + // "PATH": "C:\\Windows\\system32;C:\\Windows", + // "GOOS": "windows", + // "FOO_ENABLE": "true", + // "BAR_TOTAL": "3" + // } + // ``` + // + // Since: generic-worker 0.0.1 + // + // Map entries: + Env map[string]string `json:"env,omitempty"` + + // Feature flags enable additional functionality. + // + // Since: generic-worker 5.3.0 + Features FeatureFlags `json:"features,omitempty"` + + // Maximum time the task container can run in seconds. + // + // Since: generic-worker 0.0.1 + // + // Mininum: 1 + // Maximum: 86400 + MaxRunTime int64 `json:"maxRunTime"` + + // Directories and/or files to be mounted. + // + // Since: generic-worker 5.4.0 + // + // Array items: + // One of: + // * FileMount + // * WritableDirectoryCache + // * ReadOnlyDirectory + Mounts []json.RawMessage `json:"mounts,omitempty"` + + // By default tasks will be resolved with `state/reasonResolved`: `completed/completed` + // if all task commands have a zero exit code, or `failed/failed` if any command has a + // non-zero exit code. This payload property allows customsation of the task resolution + // based on exit code of task commands. + OnExitStatus ExitCodeHandling `json:"onExitStatus,omitempty"` + + // A list of OS Groups that the task user should be a member of. Requires scope + // `generic-worker:os-group://` for each + // group listed. + // + // Since: generic-worker 6.0.0 + // + // Array items: + OSGroups []string `json:"osGroups,omitempty"` + + // Specifies an artifact name for publishing RDP connection information. + // + // Since this is potentially sensitive data, care should be taken to publish + // to a suitably locked down path, such as + // `login-identity//rdpinfo.json` which is only readable for + // the given login identity (for example + // `login-identity/mozilla-ldap/pmoore@mozilla.com/rdpinfo.json`). See the + // [artifact namespace guide](https://docs.taskcluster.net/manual/design/namespaces#artifacts) for more information. + // + // Use of this feature requires scope + // `generic-worker:allow-rdp:/` which must be + // declared as a task scope. + // + // The RDP connection data is published during task startup so that a user + // may interact with the running task. + // + // The task environment will be retained for 12 hours after the task + // completes, to enable an interactive user to perform investigative tasks. + // After these 12 hours, the worker will delete the task's Windows user + // account, and then continue with other tasks. + // + // No guarantees are given about the resolution status of the interactive + // task, since the task is inherently non-reproducible and no automation + // should rely on this value. + // + // Since: generic-worker 10.5.0 + RdpInfo string `json:"rdpInfo,omitempty"` + + // URL of a service that can indicate tasks superseding this one; the current `taskId` + // will be appended as a query argument `taskId`. The service should return an object with + // a `supersedes` key containing a list of `taskId`s, including the supplied `taskId`. The + // tasks should be ordered such that each task supersedes all tasks appearing later in the + // list. + // + // See [superseding](https://docs.taskcluster.net/reference/platform/taskcluster-queue/docs/superseding) for more detail. + // + // Since: generic-worker 10.2.2 + SupersederURL string `json:"supersederUrl,omitempty"` + } + + // Byte-for-byte literal inline content of file/archive, up to 64KB in size. + // + // Since: generic-worker 11.1.0 + RawContent struct { + + // Byte-for-byte literal inline content of file/archive, up to 64KB in size. + // + // Since: generic-worker 11.1.0 + // + // Max length: 65536 + Raw string `json:"raw"` + } + + ReadOnlyDirectory struct { + + // One of: + // * ArtifactContent + // * URLContent + // * RawContent + // * Base64Content + Content json.RawMessage `json:"content"` + + // The filesystem location to mount the directory volume. + // + // Since: generic-worker 5.4.0 + Directory string `json:"directory"` + + // Archive format of content for read only directory. + // + // Since: generic-worker 5.4.0 + // + // Possible values: + // * "rar" + // * "tar.bz2" + // * "tar.gz" + // * "zip" + Format string `json:"format"` + } + + // URL to download content from. + // + // Since: generic-worker 5.4.0 + URLContent struct { + + // The required SHA 256 of the content body. + // + // Since: generic-worker 10.8.0 + // + // Syntax: ^[a-f0-9]{64}$ + Sha256 string `json:"sha256,omitempty"` + + // URL to download content from. + // + // Since: generic-worker 5.4.0 + URL string `json:"url"` + } + + WritableDirectoryCache struct { + + // Implies a read/write cache directory volume. A unique name for the + // cache volume. Requires scope `generic-worker:cache:`. + // Note if this cache is loaded from an artifact, you will also require + // scope `queue:get-artifact:` to use this cache. + // + // Since: generic-worker 5.4.0 + CacheName string `json:"cacheName"` + + // One of: + // * ArtifactContent + // * URLContent + // * RawContent + // * Base64Content + Content json.RawMessage `json:"content,omitempty"` + + // The filesystem location to mount the directory volume. + // + // Since: generic-worker 5.4.0 + Directory string `json:"directory"` + + // Archive format of the preloaded content (if `content` provided). + // + // Since: generic-worker 5.4.0 + // + // Possible values: + // * "rar" + // * "tar.bz2" + // * "tar.gz" + // * "zip" + Format string `json:"format,omitempty"` + } +) + +func fatalOnError(err error) { + if err != nil { + log.Fatalf("Error:\n%v", err) + } +} + +func mustCompileToRawMessage(data interface{}) *json.RawMessage { + bytes, err := json.Marshal(data) + fatalOnError(err) + var JSON json.RawMessage + err = json.Unmarshal(bytes, &JSON) + fatalOnError(err) + return &JSON +} + +func main() { + myQueue := tcqueue.NewFromEnv() + taskID := slugid.Nice() + created := time.Now() + + env := map[string]string{} + + payload := GenericWorkerPayload{ + Artifacts: []Artifact{}, + Command: []string{ + `echo Hello World!`, + }, + Env: env, + Features: FeatureFlags{ + ChainOfTrust: false, + }, + MaxRunTime: 60, + Mounts: []json.RawMessage{}, + OSGroups: []string{}, + } + + payloadJSON := mustCompileToRawMessage(payload) + + taskDef := &tcqueue.TaskDefinitionRequest{ + Created: tcclient.Time(created), + Deadline: tcclient.Time(created.Add(time.Hour * 3)), + Dependencies: []string{}, + Expires: tcclient.Time(created.Add(time.Hour * 24)), + Extra: json.RawMessage("{}"), + Metadata: tcqueue.TaskMetadata{ + Description: "xxxx", + Name: "xxxx", + Owner: "pmoore@mozilla.com", + Source: "https://hg.mozilla.org/try/file/xxxx", + }, + Payload: *payloadJSON, + Priority: "normal", + ProvisionerID: "some-provisioner-id", + Requires: "all-completed", + Retries: 5, + Routes: []string{}, + SchedulerID: "-", + Scopes: []string{}, + Tags: map[string]string{}, + TaskGroupID: taskID, + WorkerType: "some-worker-type", + } + + tsr, err := myQueue.CreateTask(taskID, taskDef) + fatalOnError(err) + + respJSON, err := json.MarshalIndent(tsr, "", " ") + fatalOnError(err) + + fmt.Println(string(respJSON)) + fmt.Println("") + fmt.Printf("curl -L https://queue.taskcluster.net/v1/task/%v/runs/0/artifacts/public/logs/live.log | gunzip\n", taskID) +} +``` + +## Temporary credentials + +You can generate temporary credentials from permanent credentials using the +go client. This may be useful if you wish to issue credentials to a third +party. See https://docs.taskcluster.net/docs/manual/design/apis/hawk/temporary-credentials for +more information. Both named and unnamed temporary credentials are supported, +although named credentials are preferred if you are not sure which type to use. + +### Example + +```go +package main + +import ( + "fmt" + "log" + "os" + "strconv" + "time" + + tcclient "github.com/taskcluster/taskcluster/clients/client-go/v23" + "github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueue" +) + +const ( + taskID = "VESwp9JaRo-XkFN_bemBhw" + runID = 0 + rootURL = "https://tc.example.com" +) + +// This simple demo lists the artifacts in run 0 of task +// VESwp9JaRo-XkFN_bemBhw. It creates permanent credentials from environment +// variables TASKCLUSTER_CLIENT_ID and TASKCLUSTER_ACCESS_TOKEN, and then +// creates temporary credentials, valid for 24 hours, from these permanent +// credentials. It queries the Queue using the temporary credentials, and with +// limited authorized scopes. +// +// Note, the queueClient.ListArtifacts(...) call doesn't require any scopes. +// The generation of temporary credentials, and limiting via authorized scopes +// is purely illustrative. The TASKCLUSTER_CLIENT_ID must satisfy +// auth:create-client:demo-client/taskcluster/clients/client-go, though. +func main() { + permCreds := &tcclient.Credentials{ + ClientID: os.Getenv("TASKCLUSTER_CLIENT_ID"), + AccessToken: os.Getenv("TASKCLUSTER_ACCESS_TOKEN"), + } + tempCreds, err := permCreds.CreateNamedTemporaryCredentials( + "demo-client/taskcluster/clients/client-go", + time.Hour*24, + "assume:legacy-permacred", + ) + if err != nil { + log.Fatalf("Could not create temporary credentials: %v", err) + } + tempCreds.AuthorizedScopes = []string{ + "queue:get-artifact:private/build/*", + } + queueClient, err := tcqueue.New(tempCreds, rootURL) + if err != nil { + // bug in code + log.Fatalf("SERIOUS BUG! Could not create client from generated temporary credentials: %v", err) + } + listArtifactsResponse, err := queueClient.ListArtifacts(taskID, strconv.Itoa(runID), "", "") + if err != nil { + log.Fatalf("Could not call queue.listArtifacts endpoint: %v", err) + } + fmt.Printf("Task %v run %v artifacts:\n", taskID, runID) + for _, artifact := range listArtifactsResponse.Artifacts { + fmt.Printf(" * %v\n", artifact.Name) + } + fmt.Println("Done") +} +``` + +See the [HTTP API godocs](#http-apis) for more information, or browse the [integration +tests](https://github.com/taskcluster/taskcluster/tree/master/clients/client-go/integrationtest) +for further examples. + +## Generating +The libraries provided by this client are auto-generated based on the schema references in this repository. +This is done with the `yarn generate` command, run from the top level of the repository. + +The code which generates the library can all be found under the top level [codegenerator](https://github.com/taskcluster/taskcluster/tree/master/clients/client-go/codegenerator) +directory. diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/creds.go b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/creds.go new file mode 100644 index 0000000..d47a984 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/creds.go @@ -0,0 +1,233 @@ +package tcclient + +import ( + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + "strconv" + "strings" + "time" + + "github.com/taskcluster/jsonschema2go/text" + "github.com/taskcluster/slugid-go/slugid" +) + +// Credentials represents the set of credentials required to access protected +// Taskcluster HTTP APIs. +type Credentials struct { + // ClientID + ClientID string `json:"clientId"` + // AccessToken + AccessToken string `json:"accessToken"` + // Certificate used only for temporary credentials + Certificate string `json:"certificate"` + // AuthorizedScopes if set to nil, is ignored. Otherwise, it should be a + // subset of the scopes that the ClientId already has, and restricts the + // Credentials to only having these scopes. This is useful when performing + // actions on behalf of a client which has more restricted scopes. Setting + // to nil is not the same as setting to an empty array. If AuthorizedScopes + // is set to an empty array rather than nil, this is equivalent to having + // no scopes at all. + // See https://docs.taskcluster.net/docs/manual/design/apis/hawk/authorized-scopes + AuthorizedScopes []string `json:"authorizedScopes"` +} + +func (creds *Credentials) String() string { + return fmt.Sprintf( + "ClientId: %q\nAccessToken: %q\nCertificate: %q\nAuthorizedScopes: %q", + creds.ClientID, + text.StarOut(creds.AccessToken), + creds.Certificate, + creds.AuthorizedScopes, + ) +} + +// Client is the entry point into all the functionality in this package. It +// contains authentication credentials, and a service endpoint, which are +// required for all HTTP operations. +type Client struct { + Credentials *Credentials + // The Root URL of the Taskcluster deployment + RootURL string + // The (short) name of the service being accessed + ServiceName string + // The API version of the service being accessed + APIVersion string + // Whether authentication is enabled (e.g. set to 'false' when using taskcluster-proxy) + Authenticate bool + // HTTPClient is a ReducedHTTPClient to be used for the http call instead of + // the DefaultHTTPClient. + HTTPClient ReducedHTTPClient + // Context that aborts all requests with this client + Context context.Context +} + +// Certificate represents the certificate used in Temporary Credentials. See +// https://docs.taskcluster.net/docs/manual/design/apis/hawk/temporary-credentials +type Certificate struct { + Version int `json:"version"` + Scopes []string `json:"scopes"` + Start int64 `json:"start"` + Expiry int64 `json:"expiry"` + Seed string `json:"seed"` + Signature string `json:"signature"` + Issuer string `json:"issuer,omitempty"` +} + +// CreateNamedTemporaryCredentials generates temporary credentials from permanent +// credentials, valid for the given duration, starting immediately. The +// temporary credentials' scopes must be a subset of the permanent credentials' +// scopes. The duration may not be more than 31 days. Any authorized scopes of +// the permanent credentials will be passed through as authorized scopes to the +// temporary credentials, but will not be restricted via the certificate. +// +// Note that the auth service already applies a 5 minute clock skew to the +// start and expiry times in +// https://github.com/taskcluster/taskcluster-auth/pull/117 so no clock skew is +// applied in this method, nor should be applied by the caller. +// +// See https://docs.taskcluster.net/docs/manual/design/apis/hawk/temporary-credentials +func (permaCreds *Credentials) CreateNamedTemporaryCredentials(tempClientID string, duration time.Duration, scopes ...string) (tempCreds *Credentials, err error) { + if duration > 31*24*time.Hour { + return nil, errors.New("Temporary credentials must expire within 31 days; however a duration of " + duration.String() + " was specified to (*tcclient.Client).CreateTemporaryCredentials(...) method") + } + + now := time.Now() + start := now + expiry := now.Add(duration) + + if permaCreds.ClientID == "" { + return nil, errors.New("Temporary credentials cannot be created from credentials that have an empty ClientId") + } + if permaCreds.AccessToken == "" { + return nil, errors.New("Temporary credentials cannot be created from credentials that have an empty AccessToken") + } + if permaCreds.Certificate != "" { + return nil, errors.New("Temporary credentials cannot be created from temporary credentials, only from permanent credentials") + } + + cert := &Certificate{ + Version: 1, + Scopes: scopes, + Start: start.UnixNano() / 1e6, + Expiry: expiry.UnixNano() / 1e6, + Seed: slugid.V4() + slugid.V4(), + Signature: "", // gets set in Sign() method below + } + // include the issuer iff this is a named credential + if tempClientID != "" { + cert.Issuer = permaCreds.ClientID + } + + cert.Sign(permaCreds.AccessToken, tempClientID) + + certBytes, err := json.Marshal(cert) + if err != nil { + return + } + + tempAccessToken, err := generateTemporaryAccessToken(permaCreds.AccessToken, cert.Seed) + if err != nil { + return + } + + tempCreds = &Credentials{ + ClientID: permaCreds.ClientID, + AccessToken: tempAccessToken, + Certificate: string(certBytes), + AuthorizedScopes: permaCreds.AuthorizedScopes, + } + if tempClientID != "" { + tempCreds.ClientID = tempClientID + } + + return +} + +// CreateTemporaryCredentials is an alias for CreateNamedTemporaryCredentials +// with an empty name. +func (permaCreds *Credentials) CreateTemporaryCredentials(duration time.Duration, scopes ...string) (tempCreds *Credentials, err error) { + return permaCreds.CreateNamedTemporaryCredentials("", duration, scopes...) +} + +func (cert *Certificate) Sign(accessToken string, tempClientID string) (err error) { + lines := []string{"version:" + strconv.Itoa(cert.Version)} + // iff this is a named credential, include clientId and issuer + if cert.Issuer != "" { + lines = append(lines, + "clientId:"+tempClientID, + "issuer:"+cert.Issuer, + ) + } + lines = append(lines, + "seed:"+cert.Seed, + "start:"+strconv.FormatInt(cert.Start, 10), + "expiry:"+strconv.FormatInt(cert.Expiry, 10), + "scopes:", + ) + lines = append(lines, cert.Scopes...) + hash := hmac.New(sha256.New, []byte(accessToken)) + text := strings.Join(lines, "\n") + _, err = hash.Write([]byte(text)) + if err != nil { + return err + } + cert.Signature = base64.StdEncoding.EncodeToString(hash.Sum([]byte{})) + return +} + +func generateTemporaryAccessToken(permAccessToken, seed string) (tempAccessToken string, err error) { + hash := hmac.New(sha256.New, []byte(permAccessToken)) + _, err = hash.Write([]byte(seed)) + if err != nil { + return "", err + } + tempAccessToken = strings.TrimRight(base64.URLEncoding.EncodeToString(hash.Sum([]byte{})), "=") + return +} + +// Cert attempts to parse the certificate string to return it as an object. If +// the certificate is an empty string (e.g. in the case of permanent +// credentials) then a nil pointer is returned for the certificate. If a +// certificate has been specified but cannot be parsed, an error is returned, +// and cert is an empty certificate (rather than nil). +func (creds *Credentials) Cert() (cert *Certificate, err error) { + if creds.Certificate == "" { + return + } + cert = new(Certificate) + err = json.Unmarshal([]byte(creds.Certificate), cert) + return +} + +// CredentialsFromEnvVars creates and returns Taskcluster credentials +// initialised from the values of environment variables: +// +// TASKCLUSTER_CLIENT_ID +// TASKCLUSTER_ACCESS_TOKEN +// TASKCLUSTER_CERTIFICATE +// +// No validation is performed on the assigned values, and unset environment +// variables will result in empty string values. +func CredentialsFromEnvVars() *Credentials { + return &Credentials{ + ClientID: os.Getenv("TASKCLUSTER_CLIENT_ID"), + AccessToken: os.Getenv("TASKCLUSTER_ACCESS_TOKEN"), + Certificate: os.Getenv("TASKCLUSTER_CERTIFICATE"), + } +} + +// RootURLFromEnvVars returns the value of environment variable +// TASKCLUSTER_PROXY_URL if set to a non-empty string, otherwise the value of +// TASKCLUSTER_ROOT_URL if set, otherwise the empty string. +func RootURLFromEnvVars() string { + if proxyURL := os.Getenv("TASKCLUSTER_PROXY_URL"); proxyURL != "" { + return proxyURL + } + return os.Getenv("TASKCLUSTER_ROOT_URL") +} diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/go.mod b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/go.mod new file mode 100644 index 0000000..40b753c --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/go.mod @@ -0,0 +1,26 @@ +module github.com/taskcluster/taskcluster/clients/client-go/v23 + +go 1.12 + +require ( + github.com/fatih/camelcase v1.0.0 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 + github.com/taskcluster/httpbackoff/v3 v3.0.0 + github.com/taskcluster/jsonschema2go v1.0.0 + github.com/taskcluster/pulse-go v1.0.0 + github.com/taskcluster/slugid-go v1.1.0 + github.com/taskcluster/taskcluster-base-go v1.0.0 + github.com/taskcluster/taskcluster-lib-urls v12.0.0+incompatible + github.com/tent/hawk-go v0.0.0-20161026210932-d341ea318957 + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.1.0 // indirect + golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386 + gopkg.in/yaml.v2 v2.2.2 // indirect +) + +// https://bugzilla.mozilla.org/show_bug.cgi?id=1580513 +replace gopkg.in/yaml.v2 => github.com/go-yaml/yaml v0.0.0-20181115110504-51d6538a90f8 + +replace gopkg.in/check.v1 => github.com/go-check/check v0.0.0-20190902080502-41f04d3bba15 diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/go.sum b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/go.sum new file mode 100644 index 0000000..914730c --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/go.sum @@ -0,0 +1,73 @@ +github.com/cenkalti/backoff v0.0.0-20190506075156-2146c9339422 h1:+FKjzBIdfBHYDvxCv+djmDJdes/AoDtg8gpcxowBlF8= +github.com/cenkalti/backoff v0.0.0-20190506075156-2146c9339422/go.mod h1:b6Nc7NRH5C4aCISLry0tLnTjcuTEvoiqcWDdsU0sOGM= +github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY= +github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-check/check v0.0.0-20161208181325-20d25e280405/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-check/check v0.0.0-20190902080502-41f04d3bba15/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-yaml/yaml v0.0.0-20181115110504-51d6538a90f8 h1:lMJ4rStmFyGCSg/zzEO1iTNQ8oq7YKFXIQocfGdsrRc= +github.com/go-yaml/yaml v0.0.0-20181115110504-51d6538a90f8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94 h1:0ngsPmuP6XIjiFRNFYlvKwSr5zff2v+uPHaffZ6/M4k= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190819003559-eade30b20f1d h1:ciocj2R6f6p+WgiA3JlmM6/v9w3Ld/g6ENr9YgHBgxM= +github.com/streadway/amqp v0.0.0-20190819003559-eade30b20f1d/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827080102-edfb9018d271 h1:9ZltBYfOn7l3lpNgPb+vGbTc00IBlC+iJv7YII11rNo= +github.com/streadway/amqp v0.0.0-20190827080102-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/taskcluster/httpbackoff v1.0.0 h1:bdh5txPv6geBVSEcx7Jy3kqiBaIrCZJwzCotPJKf9DU= +github.com/taskcluster/httpbackoff v1.0.0/go.mod h1:DEx05B3r52XQRbgzZ5y6XorMjVXBhtoHgc/ap+yLXgY= +github.com/taskcluster/httpbackoff/v3 v3.0.0 h1:Zh2BCW2iA3fzBBuZo2E4MvwyPSB6aimyI4EreeK3TRM= +github.com/taskcluster/httpbackoff/v3 v3.0.0/go.mod h1:99ubellEC0fmRj7wnGkPftT2xfCY7NmbjT3gzn2ZPUM= +github.com/taskcluster/jsonschema2go v1.0.0 h1:ZEDj2NKh8Sceq36zyLhSV6ann/aNXKZIe9cAXq7CDdk= +github.com/taskcluster/jsonschema2go v1.0.0/go.mod h1:jhsT3XPj3iLNRx0efJVfFzZBZgxeYE7IHfZAai8wuKQ= +github.com/taskcluster/pulse-go v1.0.0 h1:ys4ZUNp5TYiV5LSMxge4YF/AtuBUNH9StAA/bkno+r0= +github.com/taskcluster/pulse-go v1.0.0/go.mod h1:uuaqnRQj9XqouabCEKjnrlJiC6UT9Gurx2oSe6s+irM= +github.com/taskcluster/slugid-go v1.0.1 h1:qcuE5VYQ9a7IW5EUNQ63MoCC30LNJfzo56teJPIWKB8= +github.com/taskcluster/slugid-go v1.0.1/go.mod h1:a6qZo5JMShWvjDrupo8HLd+FVldMYW0Tc7YTrLT1ML4= +github.com/taskcluster/slugid-go v1.1.0 h1:SWsUplliyamdYzOKVM4+lDohZKuL63fKreGkvIKJ9aI= +github.com/taskcluster/slugid-go v1.1.0/go.mod h1:5sOAcPHjqso1UkKxSl77CkKgOwha0D9X0msBKBj0AOg= +github.com/taskcluster/taskcluster-base-go v1.0.0 h1:Jh2R/J7+a23LjtYEHQtkFV04QBxx6EVX8E0PrzUqJo4= +github.com/taskcluster/taskcluster-base-go v1.0.0/go.mod h1:ByyzyqqufsfZTrAHUw+0Grp8FwZAizZOKzVE1IpDXxQ= +github.com/taskcluster/taskcluster-lib-urls v12.0.0+incompatible h1:57WLzh7B04y6ahTOJ8wjvdkbwYqnyJkwLXQ1Tu4E/DU= +github.com/taskcluster/taskcluster-lib-urls v12.0.0+incompatible/go.mod h1:ALqTgi15AmJGEGubRKM0ydlLAFatlQPrQrmal9YZpQs= +github.com/tent/hawk-go v0.0.0-20161026210932-d341ea318957 h1:6Fre/uvwovW5YY4nfHZk66cAg9HjT9YdFSAJHUUgOyQ= +github.com/tent/hawk-go v0.0.0-20161026210932-d341ea318957/go.mod h1:dch7ywQEefE1ibFqBG1erFibrdUIwovcwQjksYuHuP4= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190812004523-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190624190245-7f2218787638 h1:uIfBkD8gLczr4XDgYpt/qJYds2YJwZRNw4zs7wSnNhk= +golang.org/x/tools v0.0.0-20190624190245-7f2218787638/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190701013530-fb37f6ba8261 h1:sZzBoJ2n8UwkIuiimaNrJkotv5wGSBI+QzjunXUjLQE= +golang.org/x/tools v0.0.0-20190701013530-fb37f6ba8261/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190708122732-72ffa07ba3db h1:A+9p+kHB/5+SP59X7Zyu7j90kc60c6G//CUYAAayKEE= +golang.org/x/tools v0.0.0-20190708122732-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190715045107-607ca053a137 h1:fTnKBCUDrF9O76dW88IygY9ki9DkVkHaY2TXgzHcyes= +golang.org/x/tools v0.0.0-20190715045107-607ca053a137/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386 h1:W/t3IYUOQPd8DK2ssOWA8sjulHHMxzTgiQkSx0z5sRQ= +golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/http.go b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/http.go new file mode 100644 index 0000000..3e7ff8e --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/http.go @@ -0,0 +1,337 @@ +package tcclient + +import ( + "bytes" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + + // "net/http/httputil" + "net/url" + "reflect" + "time" + + "github.com/taskcluster/httpbackoff/v3" + hawk "github.com/tent/hawk-go" + tcurls "github.com/taskcluster/taskcluster-lib-urls" +) + +var debug = false + +func init() { + if _, ok := os.LookupEnv("TASKCLUSTER_DEBUG"); ok { + debug = true + } +} + +// CallSummary provides information about the underlying http request and +// response issued for a given API call. +type CallSummary struct { + HTTPRequest *http.Request + // Keep a copy of request body in addition to the *http.Request, since + // accessing the Body via the *http.Request object, you get a io.ReadCloser + // - and after the request has been made, the body will have been read, and + // the data lost... This way, it is still available after the api call + // returns. + HTTPRequestBody string + // The Go Type which is marshaled into json and used as the http request + // body. + HTTPRequestObject interface{} + HTTPResponse *http.Response + // Keep a copy of response body in addition to the *http.Response, since + // accessing the Body via the *http.Response object, you get a + // io.ReadCloser - and after the response has been read once (to unmarshal + // json into native go types) the data is lost... This way, it is still + // available after the api call returns. + HTTPResponseBody string + // Keep a record of how many http requests were attempted + Attempts int +} + +func (cs *CallSummary) String() string { + s := "\nCALL SUMMARY\n============\n" + if req := cs.HTTPRequest; req != nil { + s += fmt.Sprintf("Method: %v\n", req.Method) + if debug { + if req.URL != nil { + s += fmt.Sprintf("URL: %v\n", req.URL) + } + s += fmt.Sprintf("Request Headers:\n%#v\n", req.Header) + s += fmt.Sprintf("Request Body:\n%v\n", cs.HTTPRequestBody) + if resp := cs.HTTPResponse; resp != nil { + s += fmt.Sprintf("Response Headers:\n%#v\n", cs.HTTPResponse.Header) + } + } else { + if req.URL != nil { + s += fmt.Sprintf("Service: %s:%s\n", req.URL.Hostname(), req.URL.Port()) + } + } + } + s += fmt.Sprintf("Response Body:\n%v\n", cs.HTTPResponseBody) + s += fmt.Sprintf("Attempts: %v", cs.Attempts) + return s +} + +type APICall struct { + Client *Client + Route string + QueryString url.Values + Payload io.Reader +} + +// ReducedHTTPClient is the interface that wraps the functionality of +// http.Client that we actually use in Client.APICall. +type ReducedHTTPClient interface { + Do(req *http.Request) (*http.Response, error) +} + +// defaultHTTPClient is the HTTP Client used to make requests if none are +// defined in the client. +// A single object is created and used because http.Client is thread-safe when +// making multiple requests in various goroutines. +var defaultHTTPClient ReducedHTTPClient = &http.Client{} + +// utility function to create a URL object based on given data +func setURL(client *Client, route string, query url.Values) (u *url.URL, err error) { + URL := tcurls.API(client.RootURL, client.ServiceName, client.APIVersion, route) + u, err = url.Parse(URL) + if err != nil { + return nil, fmt.Errorf("Cannot parse url: '%v', is RootURL (%v) set correctly?\n%v\n", URL, client.RootURL, err) + } + if query != nil { + u.RawQuery = query.Encode() + } + return +} + +// Request is the underlying method that makes a raw API request, without +// performing any json marshaling/unmarshaling of requests/responses. It is +// useful if you wish to handle raw payloads and/or raw http response bodies, +// rather than calling APICall which translates []byte to/from go types. +func (client *Client) Request(rawPayload []byte, method, route string, query url.Values) (*CallSummary, error) { + callSummary := new(CallSummary) + callSummary.HTTPRequestBody = string(rawPayload) + + // function to perform http request - we call this using backoff library to + // have exponential backoff in case of intermittent failures (e.g. network + // blips or HTTP 5xx errors) + httpCall := func() (*http.Response, error, error) { + var ioReader io.Reader + ioReader = bytes.NewReader(rawPayload) + u, err := setURL(client, route, query) + if err != nil { + return nil, nil, fmt.Errorf("apiCall url cannot be parsed:\n%v\n", err) + } + callSummary.HTTPRequest, err = http.NewRequest(method, u.String(), ioReader) + if err != nil { + return nil, nil, fmt.Errorf("Internal error: apiCall url cannot be parsed although thought to be valid: '%v', is the RootURL (%v) set correctly?\n%v\n", u.String(), client.RootURL, err) + } + if len(rawPayload) > 0 { + callSummary.HTTPRequest.Header.Set("Content-Type", "application/json") + } + // Refresh Authorization header with each call... + // Only authenticate if client library user wishes to. + if client.Authenticate { + err = client.Credentials.SignRequest(callSummary.HTTPRequest) + if err != nil { + return nil, nil, err + } + } + // Set context if one is given + if client.Context != nil { + callSummary.HTTPRequest = callSummary.HTTPRequest.WithContext(client.Context) + } + var resp *http.Response + if client.HTTPClient != nil { + resp, err = client.HTTPClient.Do(callSummary.HTTPRequest) + } else { + resp, err = defaultHTTPClient.Do(callSummary.HTTPRequest) + } + // return cancelled error, if context was cancelled + if client.Context != nil && client.Context.Err() != nil { + return nil, nil, client.Context.Err() + } + // b, e := httputil.DumpResponse(resp, true) + // if e == nil { + // fmt.Println(string(b)) + // } + return resp, err, nil + } + + // Make HTTP API calls using an exponential backoff algorithm... + var err error + callSummary.HTTPResponse, callSummary.Attempts, err = httpbackoff.Retry(httpCall) + + // read response into memory, so that we can return the body + if callSummary.HTTPResponse != nil { + body, err2 := ioutil.ReadAll(callSummary.HTTPResponse.Body) + if err2 == nil { + callSummary.HTTPResponseBody = string(body) + } + } + + return callSummary, err + +} + +// SignRequest will add an Authorization header +func (c *Credentials) SignRequest(req *http.Request) (err error) { + // s, err := c.SignHeader(req.Method, req.URL.String(), hash) + // req.Header.Set("Authorization", s) + // return err + + credentials := &hawk.Credentials{ + ID: c.ClientID, + Key: c.AccessToken, + Hash: sha256.New, + } + reqAuth := hawk.NewRequestAuth(req, credentials, 0) + reqAuth.Ext, err = getExtHeader(c) + if err != nil { + return fmt.Errorf("Internal error: was not able to generate hawk ext header from provided credentials:\n%s\n%s", c, err) + } + req.Header.Set("Authorization", reqAuth.RequestHeader()) + return nil +} + +type APICallException struct { + CallSummary *CallSummary + RootCause error +} + +func (err *APICallException) Error() string { + return err.CallSummary.String() + "\n" + err.RootCause.Error() +} + +// APICall is the generic REST API calling method which performs all REST API +// calls for this library. Each auto-generated REST API method simply is a +// wrapper around this method, calling it with specific specific arguments. +func (client *Client) APICall(payload interface{}, method, route string, result interface{}, query url.Values) (interface{}, *CallSummary, error) { + rawPayload := []byte{} + var err error + if reflect.ValueOf(payload).IsValid() && !reflect.ValueOf(payload).IsNil() { + rawPayload, err = json.Marshal(payload) + if err != nil { + cs := &CallSummary{ + HTTPRequestObject: payload, + } + return result, + cs, + &APICallException{ + CallSummary: cs, + RootCause: err, + } + } + } + callSummary, err := client.Request(rawPayload, method, route, query) + callSummary.HTTPRequestObject = payload + if err != nil { + // If context failed during this request, then we should just return that error + if client.Context != nil && client.Context.Err() != nil { + return result, callSummary, client.Context.Err() + } + return result, + callSummary, + &APICallException{ + CallSummary: callSummary, + RootCause: err, + } + } + // if result is passed in as nil, it means the API defines no response body + // json + if reflect.ValueOf(result).IsValid() && !reflect.ValueOf(result).IsNil() { + err = json.Unmarshal([]byte(callSummary.HTTPResponseBody), &result) + } + + if err != nil { + return result, + callSummary, + &APICallException{ + CallSummary: callSummary, + RootCause: err, + } + } + return result, callSummary, nil +} + +// SignedURL creates a signed URL using the given Client, where route is the +// url path relative to the RootURL stored in the Client, query is the set of +// query string parameters, if any, and duration is the amount of time that the +// signed URL should remain valid for. +func (client *Client) SignedURL(route string, query url.Values, duration time.Duration) (u *url.URL, err error) { + u, err = setURL(client, route, query) + if err != nil { + return + } + credentials := &hawk.Credentials{ + ID: client.Credentials.ClientID, + Key: client.Credentials.AccessToken, + Hash: sha256.New, + } + reqAuth, err := hawk.NewURLAuth(u.String(), credentials, duration) + if err != nil { + return + } + reqAuth.Ext, err = getExtHeader(client.Credentials) + if err != nil { + return + } + bewitSignature := reqAuth.Bewit() + if query == nil { + query = url.Values{} + } + query.Set("bewit", bewitSignature) + u.RawQuery = query.Encode() + return +} + +// getExtHeader generates the hawk ext header based on the authorizedScopes and +// the certificate used in the case of temporary credentials. The header is a +// base64 encoded json object with a "certificate" property set to the +// certificate of the temporary credentials and a "authorizedScopes" property +// set to the array of authorizedScopes, if provided. If either "certificate" +// or "authorizedScopes" is not supplied, they will be omitted from the json +// result. If neither are provided, an empty string is returned, rather than a +// base64 encoded representation of "null" or "{}". Hawk interprets the empty +// string as meaning the ext header is not needed. +// +// See: +// * https://docs.taskcluster.net/docs/manual/design/apis/hawk/authorized-scopes +// * https://docs.taskcluster.net/docs/manual/design/apis/hawk/temporary-credentials +func getExtHeader(credentials *Credentials) (header string, err error) { + ext := &ExtHeader{} + if credentials.Certificate != "" { + certObj := new(Certificate) + err = json.Unmarshal([]byte(credentials.Certificate), certObj) + if err != nil { + return "", err + } + ext.Certificate = certObj + } + + if credentials.AuthorizedScopes != nil { + ext.AuthorizedScopes = &credentials.AuthorizedScopes + } + extJSON, err := json.Marshal(ext) + if err != nil { + return "", err + } + if string(extJSON) != "{}" { + return base64.StdEncoding.EncodeToString(extJSON), nil + } + return "", nil +} + +// ExtHeader represents the authentication/authorization data that is contained +// in the ext field inside the base64 decoded `Authorization` HTTP header in +// outgoing Hawk HTTP requests. +type ExtHeader struct { + Certificate *Certificate `json:"certificate,omitempty"` + // use pointer to slice to distinguish between nil slice and empty slice + AuthorizedScopes *[]string `json:"authorizedScopes,omitempty"` +} diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueue/tcqueue.go b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueue/tcqueue.go new file mode 100644 index 0000000..7a15f05 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueue/tcqueue.go @@ -0,0 +1,964 @@ +// The following code is AUTO-GENERATED. Please DO NOT edit. +// To update this generated code, run the following command: +// in the /codegenerator/model subdirectory of this project, +// making sure that `${GOPATH}/bin` is in your `PATH`: +// +// go install && go generate +// +// This package was generated from the schema defined at +// /references/queue/v1/api.json + +// The queue service is responsible for accepting tasks and track their state +// as they are executed by workers. In order ensure they are eventually +// resolved. +// +// This document describes the API end-points offered by the queue. These +// end-points targets the following audience: +// * Schedulers, who create tasks to be executed, +// * Workers, who execute tasks, and +// * Tools, that wants to inspect the state of a task. +// +// See: +// +// How to use this package +// +// First create a Queue object: +// +// queue := tcqueue.New(nil) +// +// and then call one or more of queue's methods, e.g.: +// +// err := queue.Ping(.....) +// +// handling any errors... +// +// if err != nil { +// // handle error... +// } +// +// Taskcluster Schema +// +// The source code of this go package was auto-generated from the API definition at +// /references/queue/v1/api.json together with the input and output schemas it references, +package tcqueue + +import ( + "net/url" + "time" + + tcclient "github.com/taskcluster/taskcluster/clients/client-go/v23" +) + +type Queue tcclient.Client + +// New returns a Queue client, configured to run against production. Pass in +// nil credentials to create a client without authentication. The +// returned client is mutable, so returned settings can be altered. +// +// queue := tcqueue.New( +// nil, // client without authentication +// "http://localhost:1234/my/taskcluster", // taskcluster hosted at this root URL on local machine +// ) +// err := queue.Ping(.....) // for example, call the Ping(.....) API endpoint (described further down)... +// if err != nil { +// // handle errors... +// } +func New(credentials *tcclient.Credentials, rootURL string) *Queue { + return &Queue{ + Credentials: credentials, + RootURL: rootURL, + ServiceName: "queue", + APIVersion: "v1", + Authenticate: credentials != nil, + } +} + +// NewFromEnv returns a *Queue configured from environment variables. +// +// The root URL is taken from TASKCLUSTER_PROXY_URL if set to a non-empty +// string, otherwise from TASKCLUSTER_ROOT_URL if set, otherwise the empty +// string. +// +// The credentials are taken from environment variables: +// +// TASKCLUSTER_CLIENT_ID +// TASKCLUSTER_ACCESS_TOKEN +// TASKCLUSTER_CERTIFICATE +// +// If TASKCLUSTER_CLIENT_ID is empty/unset, authentication will be +// disabled. +func NewFromEnv() *Queue { + c := tcclient.CredentialsFromEnvVars() + rootURL := tcclient.RootURLFromEnvVars() + return &Queue{ + Credentials: c, + RootURL: rootURL, + ServiceName: "queue", + APIVersion: "v1", + Authenticate: c.ClientID != "", + } +} + +// Respond without doing anything. +// This endpoint is used to check that the service is up. +// +// See #ping +func (queue *Queue) Ping() error { + cd := tcclient.Client(*queue) + _, _, err := (&cd).APICall(nil, "GET", "/ping", nil, nil) + return err +} + +// This end-point will return the task-definition. Notice that the task +// definition may have been modified by queue, if an optional property is +// not specified the queue may provide a default value. +// +// See #task +func (queue *Queue) Task(taskId string) (*TaskDefinitionResponse, error) { + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(nil, "GET", "/task/"+url.QueryEscape(taskId), new(TaskDefinitionResponse), nil) + return responseObject.(*TaskDefinitionResponse), err +} + +// Get task status structure from `taskId` +// +// See #status +func (queue *Queue) Status(taskId string) (*TaskStatusResponse, error) { + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(nil, "GET", "/task/"+url.QueryEscape(taskId)+"/status", new(TaskStatusResponse), nil) + return responseObject.(*TaskStatusResponse), err +} + +// List tasks sharing the same `taskGroupId`. +// +// As a task-group may contain an unbounded number of tasks, this end-point +// may return a `continuationToken`. To continue listing tasks you must call +// the `listTaskGroup` again with the `continuationToken` as the +// query-string option `continuationToken`. +// +// By default this end-point will try to return up to 1000 members in one +// request. But it **may return less**, even if more tasks are available. +// It may also return a `continuationToken` even though there are no more +// results. However, you can only be sure to have seen all results if you +// keep calling `listTaskGroup` with the last `continuationToken` until you +// get a result without a `continuationToken`. +// +// If you are not interested in listing all the members at once, you may +// use the query-string option `limit` to return fewer. +// +// See #listTaskGroup +func (queue *Queue) ListTaskGroup(taskGroupId, continuationToken, limit string) (*ListTaskGroupResponse, error) { + v := url.Values{} + if continuationToken != "" { + v.Add("continuationToken", continuationToken) + } + if limit != "" { + v.Add("limit", limit) + } + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(nil, "GET", "/task-group/"+url.QueryEscape(taskGroupId)+"/list", new(ListTaskGroupResponse), v) + return responseObject.(*ListTaskGroupResponse), err +} + +// List tasks that depend on the given `taskId`. +// +// As many tasks from different task-groups may dependent on a single tasks, +// this end-point may return a `continuationToken`. To continue listing +// tasks you must call `listDependentTasks` again with the +// `continuationToken` as the query-string option `continuationToken`. +// +// By default this end-point will try to return up to 1000 tasks in one +// request. But it **may return less**, even if more tasks are available. +// It may also return a `continuationToken` even though there are no more +// results. However, you can only be sure to have seen all results if you +// keep calling `listDependentTasks` with the last `continuationToken` until +// you get a result without a `continuationToken`. +// +// If you are not interested in listing all the tasks at once, you may +// use the query-string option `limit` to return fewer. +// +// See #listDependentTasks +func (queue *Queue) ListDependentTasks(taskId, continuationToken, limit string) (*ListDependentTasksResponse, error) { + v := url.Values{} + if continuationToken != "" { + v.Add("continuationToken", continuationToken) + } + if limit != "" { + v.Add("limit", limit) + } + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(nil, "GET", "/task/"+url.QueryEscape(taskId)+"/dependents", new(ListDependentTasksResponse), v) + return responseObject.(*ListDependentTasksResponse), err +} + +// Create a new task, this is an **idempotent** operation, so repeat it if +// you get an internal server error or network connection is dropped. +// +// **Task `deadline`**: the deadline property can be no more than 5 days +// into the future. This is to limit the amount of pending tasks not being +// taken care of. Ideally, you should use a much shorter deadline. +// +// **Task expiration**: the `expires` property must be greater than the +// task `deadline`. If not provided it will default to `deadline` + one +// year. Notice, that artifacts created by task must expire before the task. +// +// **Task specific routing-keys**: using the `task.routes` property you may +// define task specific routing-keys. If a task has a task specific +// routing-key: ``, then when the AMQP message about the task is +// published, the message will be CC'ed with the routing-key: +// `route.`. This is useful if you want another component to listen +// for completed tasks you have posted. The caller must have scope +// `queue:route:` for each route. +// +// **Dependencies**: any tasks referenced in `task.dependencies` must have +// already been created at the time of this call. +// +// **Scopes**: Note that the scopes required to complete this API call depend +// on the content of the `scopes`, `routes`, `schedulerId`, `priority`, +// `provisionerId`, and `workerType` properties of the task definition. +// +// **Legacy Scopes**: The `queue:create-task:..` scope without a priority and +// the `queue:define-task:..` and `queue:task-group-id:..` scopes are considered +// legacy and should not be used. Note that the new, non-legacy scopes require +// a `queue:scheduler-id:..` scope as well as scopes for the proper priority. +// +// Required scopes: +// All of: +// * For scope in scopes each +// * For route in routes each queue:route: +// * Any of: +// - All of: +// * queue:scheduler-id: +// * For priority in priorities each queue:create-task::/ +// - If legacyScopes: +// Any of: +// - queue:create-task:/ +// - All of: +// * queue:define-task:/ +// * queue:task-group-id:/ +// * queue:schedule-task:// +// +// See #createTask +func (queue *Queue) CreateTask(taskId string, payload *TaskDefinitionRequest) (*TaskStatusResponse, error) { + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(payload, "PUT", "/task/"+url.QueryEscape(taskId), new(TaskStatusResponse), nil) + return responseObject.(*TaskStatusResponse), err +} + +// Stability: *** DEPRECATED *** +// +// **Deprecated**, this is the same as `createTask` with a **self-dependency**. +// This is only present for legacy. +// +// Required scopes: +// All of: +// * For scope in scopes each +// * For route in routes each queue:route: +// * Any of: +// - All of: +// * queue:scheduler-id: +// * For priority in priorities each queue:create-task::/ +// - If legacyScopes: +// Any of: +// - queue:define-task:/ +// - queue:create-task:/ +// - All of: +// * queue:define-task:/ +// * queue:task-group-id:/ +// +// See #defineTask +func (queue *Queue) DefineTask(taskId string, payload *TaskDefinitionRequest) (*TaskStatusResponse, error) { + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(payload, "POST", "/task/"+url.QueryEscape(taskId)+"/define", new(TaskStatusResponse), nil) + return responseObject.(*TaskStatusResponse), err +} + +// scheduleTask will schedule a task to be executed, even if it has +// unresolved dependencies. A task would otherwise only be scheduled if +// its dependencies were resolved. +// +// This is useful if you have defined a task that depends on itself or on +// some other task that has not been resolved, but you wish the task to be +// scheduled immediately. +// +// This will announce the task as pending and workers will be allowed to +// claim it and resolve the task. +// +// **Note** this operation is **idempotent** and will not fail or complain +// if called with a `taskId` that is already scheduled, or even resolved. +// To reschedule a task previously resolved, use `rerunTask`. +// +// Required scopes: +// Any of: +// - queue:schedule-task:// +// - All of: +// * queue:schedule-task +// * assume:scheduler-id:/ +// +// See #scheduleTask +func (queue *Queue) ScheduleTask(taskId string) (*TaskStatusResponse, error) { + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(nil, "POST", "/task/"+url.QueryEscape(taskId)+"/schedule", new(TaskStatusResponse), nil) + return responseObject.(*TaskStatusResponse), err +} + +// Stability: *** DEPRECATED *** +// +// This method _reruns_ a previously resolved task, even if it was +// _completed_. This is useful if your task completes unsuccessfully, and +// you just want to run it from scratch again. This will also reset the +// number of `retries` allowed. +// +// This method is deprecated in favour of creating a new task with the same +// task definition (but with a new taskId). +// +// Remember that `retries` in the task status counts the number of runs that +// the queue have started because the worker stopped responding, for example +// because a spot node died. +// +// **Remark** this operation is idempotent, if you try to rerun a task that +// is not either `failed` or `completed`, this operation will just return +// the current task status. +// +// Required scopes: +// Any of: +// - queue:rerun-task:// +// - All of: +// * queue:rerun-task +// * assume:scheduler-id:/ +// +// See #rerunTask +func (queue *Queue) RerunTask(taskId string) (*TaskStatusResponse, error) { + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(nil, "POST", "/task/"+url.QueryEscape(taskId)+"/rerun", new(TaskStatusResponse), nil) + return responseObject.(*TaskStatusResponse), err +} + +// This method will cancel a task that is either `unscheduled`, `pending` or +// `running`. It will resolve the current run as `exception` with +// `reasonResolved` set to `canceled`. If the task isn't scheduled yet, ie. +// it doesn't have any runs, an initial run will be added and resolved as +// described above. Hence, after canceling a task, it cannot be scheduled +// with `queue.scheduleTask`, but a new run can be created with +// `queue.rerun`. These semantics is equivalent to calling +// `queue.scheduleTask` immediately followed by `queue.cancelTask`. +// +// **Remark** this operation is idempotent, if you try to cancel a task that +// isn't `unscheduled`, `pending` or `running`, this operation will just +// return the current task status. +// +// Required scopes: +// Any of: +// - queue:cancel-task:// +// - All of: +// * queue:cancel-task +// * assume:scheduler-id:/ +// +// See #cancelTask +func (queue *Queue) CancelTask(taskId string) (*TaskStatusResponse, error) { + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(nil, "POST", "/task/"+url.QueryEscape(taskId)+"/cancel", new(TaskStatusResponse), nil) + return responseObject.(*TaskStatusResponse), err +} + +// Claim pending task(s) for the given `provisionerId`/`workerType` queue. +// +// If any work is available (even if fewer than the requested number of +// tasks, this will return immediately. Otherwise, it will block for tens of +// seconds waiting for work. If no work appears, it will return an emtpy +// list of tasks. Callers should sleep a short while (to avoid denial of +// service in an error condition) and call the endpoint again. This is a +// simple implementation of "long polling". +// +// Required scopes: +// All of: +// * queue:claim-work:/ +// * queue:worker-id:/ +// +// See #claimWork +func (queue *Queue) ClaimWork(provisionerId, workerType string, payload *ClaimWorkRequest) (*ClaimWorkResponse, error) { + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(payload, "POST", "/claim-work/"+url.QueryEscape(provisionerId)+"/"+url.QueryEscape(workerType), new(ClaimWorkResponse), nil) + return responseObject.(*ClaimWorkResponse), err +} + +// Stability: *** DEPRECATED *** +// +// claim a task - never documented +// +// Required scopes: +// Any of: +// - All of: +// * queue:claim-task:/ +// * queue:worker-id:/ +// - All of: +// * queue:claim-task +// * assume:worker-type:/ +// * assume:worker-id:/ +// +// See #claimTask +func (queue *Queue) ClaimTask(taskId, runId string, payload *TaskClaimRequest) (*TaskClaimResponse, error) { + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(payload, "POST", "/task/"+url.QueryEscape(taskId)+"/runs/"+url.QueryEscape(runId)+"/claim", new(TaskClaimResponse), nil) + return responseObject.(*TaskClaimResponse), err +} + +// Refresh the claim for a specific `runId` for given `taskId`. This updates +// the `takenUntil` property and returns a new set of temporary credentials +// for performing requests on behalf of the task. These credentials should +// be used in-place of the credentials returned by `claimWork`. +// +// The `reclaimTask` requests serves to: +// * Postpone `takenUntil` preventing the queue from resolving +// `claim-expired`, +// * Refresh temporary credentials used for processing the task, and +// * Abort execution if the task/run have been resolved. +// +// If the `takenUntil` timestamp is exceeded the queue will resolve the run +// as _exception_ with reason `claim-expired`, and proceeded to retry to the +// task. This ensures that tasks are retried, even if workers disappear +// without warning. +// +// If the task is resolved, this end-point will return `409` reporting +// `RequestConflict`. This typically happens if the task have been canceled +// or the `task.deadline` have been exceeded. If reclaiming fails, workers +// should abort the task and forget about the given `runId`. There is no +// need to resolve the run or upload artifacts. +// +// Required scopes: +// Any of: +// - queue:reclaim-task:/ +// - All of: +// * queue:claim-task +// * assume:worker-id:/ +// +// See #reclaimTask +func (queue *Queue) ReclaimTask(taskId, runId string) (*TaskReclaimResponse, error) { + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(nil, "POST", "/task/"+url.QueryEscape(taskId)+"/runs/"+url.QueryEscape(runId)+"/reclaim", new(TaskReclaimResponse), nil) + return responseObject.(*TaskReclaimResponse), err +} + +// Report a task completed, resolving the run as `completed`. +// +// Required scopes: +// Any of: +// - queue:resolve-task:/ +// - All of: +// * queue:resolve-task +// * assume:worker-id:/ +// +// See #reportCompleted +func (queue *Queue) ReportCompleted(taskId, runId string) (*TaskStatusResponse, error) { + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(nil, "POST", "/task/"+url.QueryEscape(taskId)+"/runs/"+url.QueryEscape(runId)+"/completed", new(TaskStatusResponse), nil) + return responseObject.(*TaskStatusResponse), err +} + +// Report a run failed, resolving the run as `failed`. Use this to resolve +// a run that failed because the task specific code behaved unexpectedly. +// For example the task exited non-zero, or didn't produce expected output. +// +// Do not use this if the task couldn't be run because if malformed +// payload, or other unexpected condition. In these cases we have a task +// exception, which should be reported with `reportException`. +// +// Required scopes: +// Any of: +// - queue:resolve-task:/ +// - All of: +// * queue:resolve-task +// * assume:worker-id:/ +// +// See #reportFailed +func (queue *Queue) ReportFailed(taskId, runId string) (*TaskStatusResponse, error) { + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(nil, "POST", "/task/"+url.QueryEscape(taskId)+"/runs/"+url.QueryEscape(runId)+"/failed", new(TaskStatusResponse), nil) + return responseObject.(*TaskStatusResponse), err +} + +// Resolve a run as _exception_. Generally, you will want to report tasks as +// failed instead of exception. You should `reportException` if, +// +// * The `task.payload` is invalid, +// * Non-existent resources are referenced, +// * Declared actions cannot be executed due to unavailable resources, +// * The worker had to shutdown prematurely, +// * The worker experienced an unknown error, or, +// * The task explicitly requested a retry. +// +// Do not use this to signal that some user-specified code crashed for any +// reason specific to this code. If user-specific code hits a resource that +// is temporarily unavailable worker should report task _failed_. +// +// Required scopes: +// Any of: +// - queue:resolve-task:/ +// - All of: +// * queue:resolve-task +// * assume:worker-id:/ +// +// See #reportException +func (queue *Queue) ReportException(taskId, runId string, payload *TaskExceptionRequest) (*TaskStatusResponse, error) { + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(payload, "POST", "/task/"+url.QueryEscape(taskId)+"/runs/"+url.QueryEscape(runId)+"/exception", new(TaskStatusResponse), nil) + return responseObject.(*TaskStatusResponse), err +} + +// This API end-point creates an artifact for a specific run of a task. This +// should **only** be used by a worker currently operating on this task, or +// from a process running within the task (ie. on the worker). +// +// All artifacts must specify when they `expires`, the queue will +// automatically take care of deleting artifacts past their +// expiration point. This features makes it feasible to upload large +// intermediate artifacts from data processing applications, as the +// artifacts can be set to expire a few days later. +// +// We currently support "S3 Artifacts" officially, with remaining support +// for two deprecated types. Do not use these deprecated types. +// +// **S3 artifacts**, is useful for static files which will be +// stored on S3. When creating an S3 artifact the queue will return a +// pre-signed URL to which you can do a `PUT` request to upload your +// artifact. Note that `PUT` request **must** specify the `content-length` +// header and **must** give the `content-type` header the same value as in +// the request to `createArtifact`. +// +// **Reference artifacts**, only consists of meta-data which the queue will +// store for you. These artifacts really only have a `url` property and +// when the artifact is requested the client will be redirect the URL +// provided with a `303` (See Other) redirect. Please note that we cannot +// delete artifacts you upload to other service, we can only delete the +// reference to the artifact, when it expires. +// +// **Error artifacts**, only consists of meta-data which the queue will +// store for you. These artifacts are only meant to indicate that you the +// worker or the task failed to generate a specific artifact, that you +// would otherwise have uploaded. For example docker-worker will upload an +// error artifact, if the file it was supposed to upload doesn't exists or +// turns out to be a directory. Clients requesting an error artifact will +// get a `424` (Failed Dependency) response. This is mainly designed to +// ensure that dependent tasks can distinguish between artifacts that were +// suppose to be generated and artifacts for which the name is misspelled. +// +// **Artifact immutability**, generally speaking you cannot overwrite an +// artifact when created. But if you repeat the request with the same +// properties the request will succeed as the operation is idempotent. +// This is useful if you need to refresh a signed URL while uploading. +// Do not abuse this to overwrite artifacts created by another entity! +// Such as worker-host overwriting artifact created by worker-code. +// +// As a special case the `url` property on _reference artifacts_ can be +// updated. You should only use this to update the `url` property for +// reference artifacts your process has created. +// +// Required scopes: +// Any of: +// - queue:create-artifact:/ +// - All of: +// * queue:create-artifact: +// * assume:worker-id:/ +// +// See #createArtifact +func (queue *Queue) CreateArtifact(taskId, runId, name string, payload *PostArtifactRequest) (*PostArtifactResponse, error) { + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(payload, "POST", "/task/"+url.QueryEscape(taskId)+"/runs/"+url.QueryEscape(runId)+"/artifacts/"+url.QueryEscape(name), new(PostArtifactResponse), nil) + return responseObject.(*PostArtifactResponse), err +} + +// Get artifact by `` from a specific run. +// +// **Public Artifacts**, in-order to get an artifact you need the scope +// `queue:get-artifact:`, where `` is the name of the artifact. +// But if the artifact `name` starts with `public/`, authentication and +// authorization is not necessary to fetch the artifact. +// +// **API Clients**, this method will redirect you to the artifact, if it is +// stored externally. Either way, the response may not be JSON. So API +// client users might want to generate a signed URL for this end-point and +// use that URL with an HTTP client that can handle responses correctly. +// +// **Downloading artifacts** +// There are some special considerations for those http clients which download +// artifacts. This api endpoint is designed to be compatible with an HTTP 1.1 +// compliant client, but has extra features to ensure the download is valid. +// It is strongly recommend that consumers use either taskcluster-lib-artifact (JS), +// taskcluster-lib-artifact-go (Go) or the CLI written in Go to interact with +// artifacts. +// +// In order to download an artifact the following must be done: +// +// 1. Obtain queue url. Building a signed url with a taskcluster client is +// recommended +// 1. Make a GET request which does not follow redirects +// 1. In all cases, if specified, the +// x-taskcluster-location-{content,transfer}-{sha256,length} values must be +// validated to be equal to the Content-Length and Sha256 checksum of the +// final artifact downloaded. as well as any intermediate redirects +// 1. If this response is a 500-series error, retry using an exponential +// backoff. No more than 5 retries should be attempted +// 1. If this response is a 400-series error, treat it appropriately for +// your context. This might be an error in responding to this request or +// an Error storage type body. This request should not be retried. +// 1. If this response is a 200-series response, the response body is the artifact. +// If the x-taskcluster-location-{content,transfer}-{sha256,length} and +// x-taskcluster-location-content-encoding are specified, they should match +// this response body +// 1. If the response type is a 300-series redirect, the artifact will be at the +// location specified by the `Location` header. There are multiple artifact storage +// types which use a 300-series redirect. +// 1. For all redirects followed, the user must verify that the content-sha256, content-length, +// transfer-sha256, transfer-length and content-encoding match every further request. The final +// artifact must also be validated against the values specified in the original queue response +// 1. Caching of requests with an x-taskcluster-artifact-storage-type value of `reference` +// must not occur +// +// **Headers** +// The following important headers are set on the response to this method: +// +// * location: the url of the artifact if a redirect is to be performed +// * x-taskcluster-artifact-storage-type: the storage type. Example: s3 +// +// The following important headers are set on responses to this method for Blob artifacts +// +// * x-taskcluster-location-content-sha256: the SHA256 of the artifact +// *after* any content-encoding is undone. Sha256 is hex encoded (e.g. [0-9A-Fa-f]{64}) +// * x-taskcluster-location-content-length: the number of bytes *after* any content-encoding +// is undone +// * x-taskcluster-location-transfer-sha256: the SHA256 of the artifact +// *before* any content-encoding is undone. This is the SHA256 of what is sent over +// the wire. Sha256 is hex encoded (e.g. [0-9A-Fa-f]{64}) +// * x-taskcluster-location-transfer-length: the number of bytes *after* any content-encoding +// is undone +// * x-taskcluster-location-content-encoding: the content-encoding used. It will either +// be `gzip` or `identity` right now. This is hardcoded to a value set when the artifact +// was created and no content-negotiation occurs +// * x-taskcluster-location-content-type: the content-type of the artifact +// +// **Caching**, artifacts may be cached in data centers closer to the +// workers in-order to reduce bandwidth costs. This can lead to longer +// response times. Caching can be skipped by setting the header +// `x-taskcluster-skip-cache: true`, this should only be used for resources +// where request volume is known to be low, and caching not useful. +// (This feature may be disabled in the future, use is sparingly!) +// +// Required scopes: +// If private: +// queue:get-artifact: +// +// See #getArtifact +func (queue *Queue) GetArtifact(taskId, runId, name string) error { + cd := tcclient.Client(*queue) + _, _, err := (&cd).APICall(nil, "GET", "/task/"+url.QueryEscape(taskId)+"/runs/"+url.QueryEscape(runId)+"/artifacts/"+url.QueryEscape(name), nil, nil) + return err +} + +// Returns a signed URL for GetArtifact, valid for the specified duration. +// +// Required scopes: +// If private: +// queue:get-artifact: +// +// See GetArtifact for more details. +func (queue *Queue) GetArtifact_SignedURL(taskId, runId, name string, duration time.Duration) (*url.URL, error) { + cd := tcclient.Client(*queue) + return (&cd).SignedURL("/task/"+url.QueryEscape(taskId)+"/runs/"+url.QueryEscape(runId)+"/artifacts/"+url.QueryEscape(name), nil, duration) +} + +// Get artifact by `` from the last run of a task. +// +// **Public Artifacts**, in-order to get an artifact you need the scope +// `queue:get-artifact:`, where `` is the name of the artifact. +// But if the artifact `name` starts with `public/`, authentication and +// authorization is not necessary to fetch the artifact. +// +// **API Clients**, this method will redirect you to the artifact, if it is +// stored externally. Either way, the response may not be JSON. So API +// client users might want to generate a signed URL for this end-point and +// use that URL with a normal HTTP client. +// +// **Remark**, this end-point is slightly slower than +// `queue.getArtifact`, so consider that if you already know the `runId` of +// the latest run. Otherwise, just us the most convenient API end-point. +// +// Required scopes: +// If private: +// queue:get-artifact: +// +// See #getLatestArtifact +func (queue *Queue) GetLatestArtifact(taskId, name string) error { + cd := tcclient.Client(*queue) + _, _, err := (&cd).APICall(nil, "GET", "/task/"+url.QueryEscape(taskId)+"/artifacts/"+url.QueryEscape(name), nil, nil) + return err +} + +// Returns a signed URL for GetLatestArtifact, valid for the specified duration. +// +// Required scopes: +// If private: +// queue:get-artifact: +// +// See GetLatestArtifact for more details. +func (queue *Queue) GetLatestArtifact_SignedURL(taskId, name string, duration time.Duration) (*url.URL, error) { + cd := tcclient.Client(*queue) + return (&cd).SignedURL("/task/"+url.QueryEscape(taskId)+"/artifacts/"+url.QueryEscape(name), nil, duration) +} + +// Stability: *** EXPERIMENTAL *** +// +// Returns a list of artifacts and associated meta-data for a given run. +// +// As a task may have many artifacts paging may be necessary. If this +// end-point returns a `continuationToken`, you should call the end-point +// again with the `continuationToken` as the query-string option: +// `continuationToken`. +// +// By default this end-point will list up-to 1000 artifacts in a single page +// you may limit this with the query-string parameter `limit`. +// +// See #listArtifacts +func (queue *Queue) ListArtifacts(taskId, runId, continuationToken, limit string) (*ListArtifactsResponse, error) { + v := url.Values{} + if continuationToken != "" { + v.Add("continuationToken", continuationToken) + } + if limit != "" { + v.Add("limit", limit) + } + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(nil, "GET", "/task/"+url.QueryEscape(taskId)+"/runs/"+url.QueryEscape(runId)+"/artifacts", new(ListArtifactsResponse), v) + return responseObject.(*ListArtifactsResponse), err +} + +// Stability: *** EXPERIMENTAL *** +// +// Returns a list of artifacts and associated meta-data for the latest run +// from the given task. +// +// As a task may have many artifacts paging may be necessary. If this +// end-point returns a `continuationToken`, you should call the end-point +// again with the `continuationToken` as the query-string option: +// `continuationToken`. +// +// By default this end-point will list up-to 1000 artifacts in a single page +// you may limit this with the query-string parameter `limit`. +// +// See #listLatestArtifacts +func (queue *Queue) ListLatestArtifacts(taskId, continuationToken, limit string) (*ListArtifactsResponse, error) { + v := url.Values{} + if continuationToken != "" { + v.Add("continuationToken", continuationToken) + } + if limit != "" { + v.Add("limit", limit) + } + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(nil, "GET", "/task/"+url.QueryEscape(taskId)+"/artifacts", new(ListArtifactsResponse), v) + return responseObject.(*ListArtifactsResponse), err +} + +// Stability: *** EXPERIMENTAL *** +// +// Get all active provisioners. +// +// The term "provisioner" is taken broadly to mean anything with a provisionerId. +// This does not necessarily mean there is an associated service performing any +// provisioning activity. +// +// The response is paged. If this end-point returns a `continuationToken`, you +// should call the end-point again with the `continuationToken` as a query-string +// option. By default this end-point will list up to 1000 provisioners in a single +// page. You may limit this with the query-string parameter `limit`. +// +// See #listProvisioners +func (queue *Queue) ListProvisioners(continuationToken, limit string) (*ListProvisionersResponse, error) { + v := url.Values{} + if continuationToken != "" { + v.Add("continuationToken", continuationToken) + } + if limit != "" { + v.Add("limit", limit) + } + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(nil, "GET", "/provisioners", new(ListProvisionersResponse), v) + return responseObject.(*ListProvisionersResponse), err +} + +// Stability: *** EXPERIMENTAL *** +// +// Get an active provisioner. +// +// The term "provisioner" is taken broadly to mean anything with a provisionerId. +// This does not necessarily mean there is an associated service performing any +// provisioning activity. +// +// See #getProvisioner +func (queue *Queue) GetProvisioner(provisionerId string) (*ProvisionerResponse, error) { + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(nil, "GET", "/provisioners/"+url.QueryEscape(provisionerId), new(ProvisionerResponse), nil) + return responseObject.(*ProvisionerResponse), err +} + +// Stability: *** EXPERIMENTAL *** +// +// Declare a provisioner, supplying some details about it. +// +// `declareProvisioner` allows updating one or more properties of a provisioner as long as the required scopes are +// possessed. For example, a request to update the `my-provisioner` +// provisioner with a body `{description: 'This provisioner is great'}` would require you to have the scope +// `queue:declare-provisioner:my-provisioner#description`. +// +// The term "provisioner" is taken broadly to mean anything with a provisionerId. +// This does not necessarily mean there is an associated service performing any +// provisioning activity. +// +// Required scopes: +// For property in properties each queue:declare-provisioner:# +// +// See #declareProvisioner +func (queue *Queue) DeclareProvisioner(provisionerId string, payload *ProvisionerRequest) (*ProvisionerResponse, error) { + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(payload, "PUT", "/provisioners/"+url.QueryEscape(provisionerId), new(ProvisionerResponse), nil) + return responseObject.(*ProvisionerResponse), err +} + +// Get an approximate number of pending tasks for the given `provisionerId` +// and `workerType`. +// +// The underlying Azure Storage Queues only promises to give us an estimate. +// Furthermore, we cache the result in memory for 20 seconds. So consumers +// should be no means expect this to be an accurate number. +// It is, however, a solid estimate of the number of pending tasks. +// +// See #pendingTasks +func (queue *Queue) PendingTasks(provisionerId, workerType string) (*CountPendingTasksResponse, error) { + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(nil, "GET", "/pending/"+url.QueryEscape(provisionerId)+"/"+url.QueryEscape(workerType), new(CountPendingTasksResponse), nil) + return responseObject.(*CountPendingTasksResponse), err +} + +// Stability: *** EXPERIMENTAL *** +// +// Get all active worker-types for the given provisioner. +// +// The response is paged. If this end-point returns a `continuationToken`, you +// should call the end-point again with the `continuationToken` as a query-string +// option. By default this end-point will list up to 1000 worker-types in a single +// page. You may limit this with the query-string parameter `limit`. +// +// See #listWorkerTypes +func (queue *Queue) ListWorkerTypes(provisionerId, continuationToken, limit string) (*ListWorkerTypesResponse, error) { + v := url.Values{} + if continuationToken != "" { + v.Add("continuationToken", continuationToken) + } + if limit != "" { + v.Add("limit", limit) + } + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(nil, "GET", "/provisioners/"+url.QueryEscape(provisionerId)+"/worker-types", new(ListWorkerTypesResponse), v) + return responseObject.(*ListWorkerTypesResponse), err +} + +// Stability: *** EXPERIMENTAL *** +// +// Get a worker-type from a provisioner. +// +// See #getWorkerType +func (queue *Queue) GetWorkerType(provisionerId, workerType string) (*WorkerTypeResponse, error) { + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(nil, "GET", "/provisioners/"+url.QueryEscape(provisionerId)+"/worker-types/"+url.QueryEscape(workerType), new(WorkerTypeResponse), nil) + return responseObject.(*WorkerTypeResponse), err +} + +// Stability: *** EXPERIMENTAL *** +// +// Declare a workerType, supplying some details about it. +// +// `declareWorkerType` allows updating one or more properties of a worker-type as long as the required scopes are +// possessed. For example, a request to update the `highmem` worker-type within the `my-provisioner` +// provisioner with a body `{description: 'This worker type is great'}` would require you to have the scope +// `queue:declare-worker-type:my-provisioner/highmem#description`. +// +// Required scopes: +// For property in properties each queue:declare-worker-type:/# +// +// See #declareWorkerType +func (queue *Queue) DeclareWorkerType(provisionerId, workerType string, payload *WorkerTypeRequest) (*WorkerTypeResponse, error) { + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(payload, "PUT", "/provisioners/"+url.QueryEscape(provisionerId)+"/worker-types/"+url.QueryEscape(workerType), new(WorkerTypeResponse), nil) + return responseObject.(*WorkerTypeResponse), err +} + +// Stability: *** EXPERIMENTAL *** +// +// Get a list of all active workers of a workerType. +// +// `listWorkers` allows a response to be filtered by quarantined and non quarantined workers. +// To filter the query, you should call the end-point with `quarantined` as a query-string option with a +// true or false value. +// +// The response is paged. If this end-point returns a `continuationToken`, you +// should call the end-point again with the `continuationToken` as a query-string +// option. By default this end-point will list up to 1000 workers in a single +// page. You may limit this with the query-string parameter `limit`. +// +// See #listWorkers +func (queue *Queue) ListWorkers(provisionerId, workerType, continuationToken, limit, quarantined string) (*ListWorkersResponse, error) { + v := url.Values{} + if continuationToken != "" { + v.Add("continuationToken", continuationToken) + } + if limit != "" { + v.Add("limit", limit) + } + if quarantined != "" { + v.Add("quarantined", quarantined) + } + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(nil, "GET", "/provisioners/"+url.QueryEscape(provisionerId)+"/worker-types/"+url.QueryEscape(workerType)+"/workers", new(ListWorkersResponse), v) + return responseObject.(*ListWorkersResponse), err +} + +// Stability: *** EXPERIMENTAL *** +// +// Get a worker from a worker-type. +// +// See #getWorker +func (queue *Queue) GetWorker(provisionerId, workerType, workerGroup, workerId string) (*WorkerResponse, error) { + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(nil, "GET", "/provisioners/"+url.QueryEscape(provisionerId)+"/worker-types/"+url.QueryEscape(workerType)+"/workers/"+url.QueryEscape(workerGroup)+"/"+url.QueryEscape(workerId), new(WorkerResponse), nil) + return responseObject.(*WorkerResponse), err +} + +// Stability: *** EXPERIMENTAL *** +// +// Quarantine a worker +// +// Required scopes: +// queue:quarantine-worker:/// +// +// See #quarantineWorker +func (queue *Queue) QuarantineWorker(provisionerId, workerType, workerGroup, workerId string, payload *QuarantineWorkerRequest) (*WorkerResponse, error) { + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(payload, "PUT", "/provisioners/"+url.QueryEscape(provisionerId)+"/worker-types/"+url.QueryEscape(workerType)+"/workers/"+url.QueryEscape(workerGroup)+"/"+url.QueryEscape(workerId), new(WorkerResponse), nil) + return responseObject.(*WorkerResponse), err +} + +// Stability: *** EXPERIMENTAL *** +// +// Declare a worker, supplying some details about it. +// +// `declareWorker` allows updating one or more properties of a worker as long as the required scopes are +// possessed. +// +// Required scopes: +// For property in properties each queue:declare-worker:///# +// +// See #declareWorker +func (queue *Queue) DeclareWorker(provisionerId, workerType, workerGroup, workerId string, payload *WorkerRequest) (*WorkerResponse, error) { + cd := tcclient.Client(*queue) + responseObject, _, err := (&cd).APICall(payload, "PUT", "/provisioners/"+url.QueryEscape(provisionerId)+"/worker-types/"+url.QueryEscape(workerType)+"/"+url.QueryEscape(workerGroup)+"/"+url.QueryEscape(workerId), new(WorkerResponse), nil) + return responseObject.(*WorkerResponse), err +} diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueue/types.go b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueue/types.go new file mode 100644 index 0000000..abaa56a --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueue/types.go @@ -0,0 +1,1710 @@ +// This source code file is AUTO-GENERATED by github.com/taskcluster/jsonschema2go + +package tcqueue + +import ( + "encoding/json" + "errors" + + tcclient "github.com/taskcluster/taskcluster/clients/client-go/v23" +) + +type ( + // Actions provide a generic mechanism to expose additional features of a + // provisioner, worker type, or worker to Taskcluster clients. + // + // An action is comprised of metadata describing the feature it exposes, + // together with a webhook for triggering it. + // + // The Taskcluster tools site, for example, retrieves actions when displaying + // provisioners, worker types and workers. It presents the provisioner/worker + // type/worker specific actions to the user. When the user triggers an action, + // the web client takes the registered webhook, substitutes parameters into the + // URL (see `url`), signs the requests with the Taskcluster credentials of the + // user operating the web interface, and issues the HTTP request. + // + // The level to which the action relates (provisioner, worker type, worker) is + // called the action context. All actions, regardless of the action contexts, + // are registered against the provisioner when calling + // `queue.declareProvisioner`. + // + // The action context is used by the web client to determine where in the web + // interface to present the action to the user as follows: + // + // | `context` | Tool where action is displayed | + // |-------------|--------------------------------| + // | provisioner | Provisioner Explorer | + // | worker-type | Workers Explorer | + // | worker | Worker Explorer | + // + // See [actions docs](/docs/reference/platform/taskcluster-queue/docs/actions) + // for more information. + Action struct { + + // Actions have a "context" that is one of provisioner, worker-type, or worker, indicating + // which it applies to. `context` is used by the front-end to know where to display the action. + // + // | `context` | Page displayed | + // |-------------|-----------------------| + // | provisioner | Provisioner Explorer | + // | worker-type | Workers Explorer | + // | worker | Worker Explorer | + // + // Possible values: + // * "provisioner" + // * "worker-type" + // * "worker" + Context string `json:"context"` + + // Description of the provisioner. + Description string `json:"description"` + + // Method to indicate the desired action to be performed for a given resource. + // + // Possible values: + // * "POST" + // * "PUT" + // * "DELETE" + // * "PATCH" + Method string `json:"method"` + + // Short names for things like logging/error messages. + Name string `json:"name"` + + // Appropriate title for any sort of Modal prompt. + Title json.RawMessage `json:"title"` + + // When an action is triggered, a request is made using the `url` and `method`. + // Depending on the `context`, the following parameters will be substituted in the url: + // + // | `context` | Path parameters | + // |-------------|----------------------------------------------------------| + // | provisioner | | + // | worker-type | , | + // | worker | , , , | + // + // _Note: The request needs to be signed with the user's Taskcluster credentials._ + URL string `json:"url"` + } + + // Information about an artifact for the given `taskId` and `runId`. + Artifact struct { + + // Mimetype for the artifact that was created. + // + // Max length: 255 + ContentType string `json:"contentType"` + + // Date and time after which the artifact created will be automatically + // deleted by the queue. + Expires tcclient.Time `json:"expires"` + + // Name of the artifact that was created, this is useful if you want to + // attempt to fetch the artifact. + // + // Max length: 1024 + Name string `json:"name"` + + // This is the `storageType` for the request that was used to create + // the artifact. + // + // Possible values: + // * "s3" + // * "reference" + // * "error" + StorageType string `json:"storageType"` + } + + // Request to claim a task for a worker to process. + ClaimWorkRequest struct { + + // Number of tasks to attempt to claim. + // + // Default: 1 + // Mininum: 1 + // Maximum: 32 + Tasks int64 `json:"tasks"` + + // Identifier for group that worker claiming the task is a part of. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerGroup string `json:"workerGroup"` + + // Identifier for worker within the given workerGroup + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerID string `json:"workerId"` + } + + // Response to an attempt to claim tasks for a worker to process. + ClaimWorkResponse struct { + + // List of task claims, may be empty if no tasks was claimed, in which case + // the worker should sleep a tiny bit before polling again. + Tasks []TaskClaim `json:"tasks"` + } + + // Response to a request for the number of pending tasks for a given + // `provisionerId` and `workerType`. + CountPendingTasksResponse struct { + + // An approximate number of pending tasks for the given `provisionerId` and + // `workerType`. This is based on Azure Queue Storage metadata API, thus, + // number of reported here may be higher than actual number of pending tasks. + // But there cannot be more pending tasks reported here. Ie. this is an + // **upper-bound** on the number of pending tasks. + // + // Mininum: 0 + PendingTasks int64 `json:"pendingTasks"` + + // Unique identifier for a provisioner, that can supply specified + // `workerType` + // + // Syntax: ^[a-zA-Z0-9-_]{1,38}$ + ProvisionerID string `json:"provisionerId"` + + // Unique identifier for a worker-type within a specific provisioner + // + // Syntax: ^[a-z]([-a-z0-9]{0,36}[a-z0-9])?$ + WorkerType string `json:"workerType"` + } + + // Request the queue to reply `424` (Failed Dependency) with `reason` and + // `message` to any `GET` request for this artifact. This is mainly useful + // as a way for a task to declare that it failed to provide an artifact it + // wanted to upload. + ErrorArtifactRequest struct { + + // Date-time after which the queue should stop replying with the error + // and forget about the artifact. + Expires tcclient.Time `json:"expires"` + + // Human readable explanation of why the artifact is missing + // + // Max length: 4096 + Message string `json:"message"` + + // Reason why the artifact doesn't exist. + // + // Possible values: + // * "file-missing-on-worker" + // * "invalid-resource-on-worker" + // * "too-large-file-on-worker" + Reason string `json:"reason"` + + // Artifact storage type, in this case `error` + // + // Possible values: + // * "error" + StorageType string `json:"storageType"` + } + + // Response to a request for the queue to reply `424` (Failed Dependency) + // with `reason` and `message` to any `GET` request for this artifact. + ErrorArtifactResponse struct { + + // Artifact storage type, in this case `error` + // + // Possible values: + // * "error" + StorageType string `json:"storageType"` + } + + // List of artifacts for a given `taskId` and `runId`. + ListArtifactsResponse struct { + + // List of artifacts for given `taskId` and `runId`. + Artifacts []Artifact `json:"artifacts"` + + // Opaque `continuationToken` to be given as query-string option to get the + // next set of artifacts. + // This property is only present if another request is necessary to fetch all + // results. In practice the next request with a `continuationToken` may not + // return additional results, but it can. Thus, you can only be sure to have + // all the results if you've called with `continuationToken` until you get a + // result without a `continuationToken`. + ContinuationToken string `json:"continuationToken,omitempty"` + } + + // Response from a `listDependentTasks` request. + ListDependentTasksResponse struct { + + // Opaque `continuationToken` to be given as query-string option to get the + // next set of dependent tasks. + // This property is only present if another request is necessary to fetch all + // results. In practice the next request with a `continuationToken` may not + // return additional results, but it can. Thus, you can only be sure to have + // all the results if you've called `listDependentTasks` with + // `continuationToken` until you get a result without a `continuationToken`. + ContinuationToken string `json:"continuationToken,omitempty"` + + // Identifier for the task whose dependents are being listed. + // + // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ + TaskID string `json:"taskId"` + + // List of tasks that have `taskId` in the `task.dependencies` property. + Tasks []TaskDefinitionAndStatus `json:"tasks"` + } + + ListProvisionersResponse struct { + + // Opaque `continuationToken` to be given as query-string option to get the + // next set of provisioners. + // This property is only present if another request is necessary to fetch all + // results. In practice the next request with a `continuationToken` may not + // return additional results, but it can. Thus, you can only be sure to have + // all the results if you've called with `continuationToken` until you get a + // result without a `continuationToken`. + ContinuationToken string `json:"continuationToken,omitempty"` + + Provisioners []ProvisionerInformation `json:"provisioners"` + } + + // Response from a `listTaskGroup` request. + ListTaskGroupResponse struct { + + // Opaque `continuationToken` to be given as query-string option to get the + // next set of tasks in the task-group. + // This property is only present if another request is necessary to fetch all + // results. In practice the next request with a `continuationToken` may not + // return additional results, but it can. Thus, you can only be sure to have + // all the results if you've called `listTaskGroup` with `continuationToken` + // until you get a result without a `continuationToken`. + ContinuationToken string `json:"continuationToken,omitempty"` + + // Identifier for the task-group being listed. + // + // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ + TaskGroupID string `json:"taskGroupId"` + + // List of tasks in this task-group. + Tasks []TaskDefinitionAndStatus `json:"tasks"` + } + + // Response from a `listWorkerTypes` request. + ListWorkerTypesResponse struct { + + // Opaque `continuationToken` to be given as query-string option to get the + // next set of worker-types in the provisioner. + // This property is only present if another request is necessary to fetch all + // results. In practice the next request with a `continuationToken` may not + // return additional results, but it can. Thus, you can only be sure to have + // all the results if you've called `listWorkerTypes` with `continuationToken` + // until you get a result without a `continuationToken`. + ContinuationToken string `json:"continuationToken,omitempty"` + + // List of worker-types in this provisioner. + WorkerTypes []WorkerType `json:"workerTypes"` + } + + // Response from a `listWorkers` request. + ListWorkersResponse struct { + + // Opaque `continuationToken` to be given as query-string option to get the + // next set of workers in the worker-type. + // This property is only present if another request is necessary to fetch all + // results. In practice the next request with a `continuationToken` may not + // return additional results, but it can. Thus, you can only be sure to have + // all the results if you've called `listWorkerTypes` with `continuationToken` + // until you get a result without a `continuationToken`. + ContinuationToken string `json:"continuationToken,omitempty"` + + // List of workers in this worker-type. + Workers []Worker `json:"workers"` + } + + // Request a authorization to put and artifact or posting of a URL as an artifact. Note that the `storageType` property is referenced in the response as well. + // + // One of: + // * S3ArtifactRequest + // * RedirectArtifactRequest + // * ErrorArtifactRequest + PostArtifactRequest json.RawMessage + + // Response to a request for posting an artifact. + // Note that the `storageType` property is referenced in the request as well. + // + // One of: + // * S3ArtifactResponse + // * RedirectArtifactResponse + // * ErrorArtifactResponse + PostArtifactResponse json.RawMessage + + ProvisionerInformation struct { + + // See taskcluster [actions](/docs/reference/platform/taskcluster-queue/docs/actions) documentation. + Actions []Action `json:"actions"` + + // Description of the provisioner. + Description string `json:"description"` + + // Date and time after which the provisioner created will be automatically + // deleted by the queue. + Expires tcclient.Time `json:"expires"` + + // Date and time where the provisioner was last seen active + LastDateActive tcclient.Time `json:"lastDateActive"` + + // Unique identifier for a provisioner, that can supply specified + // `workerType` + // + // Syntax: ^[a-zA-Z0-9-_]{1,38}$ + ProvisionerID string `json:"provisionerId"` + + // This is the stability of the provisioner. Accepted values: + // * `experimental` + // * `stable` + // * `deprecated` + // + // Possible values: + // * "experimental" + // * "stable" + // * "deprecated" + Stability string `json:"stability"` + } + + // Request to update a provisioner. + ProvisionerRequest struct { + + // See taskcluster [actions](/docs/reference/platform/taskcluster-queue/docs/actions) documentation. + Actions []Action `json:"actions,omitempty"` + + // Description of the provisioner. + Description string `json:"description,omitempty"` + + // Date and time after which the provisioner will be automatically + // deleted by the queue. + Expires tcclient.Time `json:"expires,omitempty"` + + // This is the stability of the provisioner. Accepted values: + // * `experimental` + // * `stable` + // * `deprecated` + // + // Possible values: + // * "experimental" + // * "stable" + // * "deprecated" + Stability string `json:"stability,omitempty"` + } + + // Response containing information about a provisioner. + ProvisionerResponse struct { + + // See taskcluster [actions](/docs/reference/platform/taskcluster-queue/docs/actions) documentation. + Actions []Action `json:"actions"` + + // Description of the provisioner. + Description string `json:"description"` + + // Date and time after which the provisioner will be automatically + // deleted by the queue. + Expires tcclient.Time `json:"expires"` + + // Date of the last time this provisioner was seen active. `lastDateActive` is updated every 6 hours + // but may be off by up-to 6 hours. Nonetheless, `lastDateActive` is a good indicator + // of when the provisioner was last seen active. + LastDateActive tcclient.Time `json:"lastDateActive"` + + // Unique identifier for a provisioner, that can supply specified + // `workerType` + // + // Syntax: ^[a-zA-Z0-9-_]{1,38}$ + ProvisionerID string `json:"provisionerId"` + + // This is the stability of the provisioner. Accepted values: + // * `experimental` + // * `stable` + // * `deprecated` + // + // Possible values: + // * "experimental" + // * "stable" + // * "deprecated" + Stability string `json:"stability"` + } + + // Request to update a worker's quarantineUntil property. + QuarantineWorkerRequest struct { + + // Quarantining a worker allows the machine to remain alive but not accept jobs. + // Once the quarantineUntil time has elapsed, the worker resumes accepting jobs. + // Note that a quarantine can be lifted by setting `quarantineUntil` to the present time (or + // somewhere in the past). + QuarantineUntil tcclient.Time `json:"quarantineUntil"` + } + + // Request the queue to redirect to a URL for a given artifact. + // This allows you to reference artifacts that aren't managed by the queue. + // The queue will still authenticate the request, so depending on the level + // of secrecy required, secret URLs **might** work. Note, this is mainly + // useful for public artifacts, for example temporary files directly + // stored on the worker host and only available there for a specific + // amount of time. + RedirectArtifactRequest struct { + + // Artifact mime-type for the resource to which the queue should + // redirect. Please use the same `Content-Type`, consistently using + // the correct mime-type make tooling a lot easier, specifically, + // always using `application/json` for JSON artifacts. + // + // Max length: 255 + ContentType string `json:"contentType"` + + // Date-time after which the queue should no longer redirect to this URL. + // Note, that the queue will and cannot delete the resource your URL + // references, you are responsible for doing that yourself. + Expires tcclient.Time `json:"expires"` + + // Artifact storage type, in this case `reference` + // + // Possible values: + // * "reference" + StorageType string `json:"storageType"` + + // URL to which the queue should redirect using a `303` (See other) + // redirect. + URL string `json:"url"` + } + + // Response to a request for the queue to redirect to a URL for a given + // artifact. + RedirectArtifactResponse struct { + + // Artifact storage type, in this case `reference` + // + // Possible values: + // * "reference" + StorageType string `json:"storageType"` + } + + // JSON object with information about a run + RunInformation struct { + + // Reason for the creation of this run, + // **more reasons may be added in the future**. + // + // Possible values: + // * "scheduled" + // * "retry" + // * "task-retry" + // * "rerun" + // * "exception" + ReasonCreated string `json:"reasonCreated"` + + // Reason that run was resolved, this is mainly + // useful for runs resolved as `exception`. + // Note, **more reasons may be added in the future**, also this + // property is only available after the run is resolved. Some of these + // reasons, notably `intermittent-task`, `worker-shutdown`, and + // `claim-expired`, will trigger an automatic retry of the task. + // + // Possible values: + // * "completed" + // * "failed" + // * "deadline-exceeded" + // * "canceled" + // * "superseded" + // * "claim-expired" + // * "worker-shutdown" + // * "malformed-payload" + // * "resource-unavailable" + // * "internal-error" + // * "intermittent-task" + ReasonResolved string `json:"reasonResolved,omitempty"` + + // Date-time at which this run was resolved, ie. when the run changed + // state from `running` to either `completed`, `failed` or `exception`. + // This property is only present after the run as been resolved. + Resolved tcclient.Time `json:"resolved,omitempty"` + + // Id of this task run, `run-id`s always starts from `0` + // + // Mininum: 0 + // Maximum: 1000 + RunID int64 `json:"runId"` + + // Date-time at which this run was scheduled, ie. when the run was + // created in state `pending`. + Scheduled tcclient.Time `json:"scheduled"` + + // Date-time at which this run was claimed, ie. when the run changed + // state from `pending` to `running`. This property is only present + // after the run has been claimed. + Started tcclient.Time `json:"started,omitempty"` + + // State of this run + // + // Possible values: + // * "pending" + // * "running" + // * "completed" + // * "failed" + // * "exception" + State string `json:"state"` + + // Time at which the run expires and is resolved as `failed`, if the + // run isn't reclaimed. Note, only present after the run has been + // claimed. + TakenUntil tcclient.Time `json:"takenUntil,omitempty"` + + // Identifier for group that worker who executes this run is a part of, + // this identifier is mainly used for efficient routing. + // Note, this property is only present after the run is claimed. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerGroup string `json:"workerGroup,omitempty"` + + // Identifier for worker evaluating this run within given + // `workerGroup`. Note, this property is only available after the run + // has been claimed. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerID string `json:"workerId,omitempty"` + } + + // Request for a signed PUT URL that will allow you to upload an artifact + // to an S3 bucket managed by the queue. + S3ArtifactRequest struct { + + // Artifact mime-type, when uploading artifact to the signed + // `PUT` URL returned from this request this must given with the + // `ContentType` header. Please, provide correct mime-type, + // this make tooling a lot easier, specifically, + // always using `application/json` for JSON artifacts. + // + // Max length: 255 + ContentType string `json:"contentType"` + + // Date-time after which the artifact should be deleted. Note, that + // these will be collected over time, and artifacts may remain + // available after expiration. S3 based artifacts are identified in + // azure table storage and explicitly deleted on S3 after expiration. + Expires tcclient.Time `json:"expires"` + + // Artifact storage type, in this case `'s3'` + // + // Possible values: + // * "s3" + StorageType string `json:"storageType"` + } + + // Response to a request for a signed PUT URL that will allow you to + // upload an artifact to an S3 bucket managed by the queue. + S3ArtifactResponse struct { + + // Artifact mime-type, must be specified as header when uploading with + // the signed `putUrl`. + // + // Max length: 255 + ContentType string `json:"contentType"` + + // Date-time after which the signed `putUrl` no longer works + Expires tcclient.Time `json:"expires"` + + // URL to which a `PUT` request can be made to upload the artifact + // requested. Note, the `Content-Length` must be specified correctly, + // and the `ContentType` header must be set the value specified below. + PutURL string `json:"putUrl"` + + // Artifact storage type, in this case `'s3'` + // + // Possible values: + // * "s3" + StorageType string `json:"storageType"` + } + + TaskClaim struct { + + // Temporary credentials granting `task.scopes` and the scope: + // `queue:claim-task:/` which allows the worker to reclaim + // the task, upload artifacts and report task resolution. + // + // The temporary credentials are set to expire after `takenUntil`. They + // won't expire exactly at `takenUntil` but shortly after, hence, requests + // coming close `takenUntil` won't have problems even if there is a little + // clock drift. + // + // Workers should use these credentials when making requests on behalf of + // a task. This includes requests to create artifacts, reclaiming the task + // reporting the task `completed`, `failed` or `exception`. + // + // Note, a new set of temporary credentials is issued when the worker + // reclaims the task. + Credentials TaskCredentials `json:"credentials"` + + // `run-id` assigned to this run of the task + // + // Mininum: 0 + // Maximum: 1000 + RunID int64 `json:"runId"` + + // A representation of **task status** as known by the queue + Status TaskStatusStructure `json:"status"` + + // Time at which the run expires and is resolved as `exception`, + // with reason `claim-expired` if the run haven't been reclaimed. + TakenUntil tcclient.Time `json:"takenUntil"` + + // Definition of a task that can be scheduled + Task TaskDefinitionResponse `json:"task"` + + // Identifier for the worker-group within which this run started. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerGroup string `json:"workerGroup"` + + // Identifier for the worker executing this run. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerID string `json:"workerId"` + } + + // Request to claim (or reclaim) a task + TaskClaimRequest struct { + + // Identifier for group that worker claiming the task is a part of. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerGroup string `json:"workerGroup"` + + // Identifier for worker within the given workerGroup + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerID string `json:"workerId"` + } + + // Response to a successful task claim + TaskClaimResponse struct { + + // Temporary credentials granting `task.scopes` and the scope: + // `queue:claim-task:/` which allows the worker to reclaim + // the task, upload artifacts and report task resolution. + // + // The temporary credentials are set to expire after `takenUntil`. They + // won't expire exactly at `takenUntil` but shortly after, hence, requests + // coming close `takenUntil` won't have problems even if there is a little + // clock drift. + // + // Workers should use these credentials when making requests on behalf of + // a task. This includes requests to create artifacts, reclaiming the task + // reporting the task `completed`, `failed` or `exception`. + // + // Note, a new set of temporary credentials is issued when the worker + // reclaims the task. + Credentials TaskCredentials `json:"credentials"` + + // `run-id` assigned to this run of the task + // + // Mininum: 0 + // Maximum: 1000 + RunID int64 `json:"runId"` + + // A representation of **task status** as known by the queue + Status TaskStatusStructure `json:"status"` + + // Time at which the run expires and is resolved as `exception`, + // with reason `claim-expired` if the run haven't been reclaimed. + TakenUntil tcclient.Time `json:"takenUntil"` + + // Definition of a task that can be scheduled + Task TaskDefinitionResponse `json:"task"` + + // Identifier for the worker-group within which this run started. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerGroup string `json:"workerGroup"` + + // Identifier for the worker executing this run. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerID string `json:"workerId"` + } + + // Temporary credentials granting `task.scopes` and the scope: + // `queue:claim-task:/` which allows the worker to reclaim + // the task, upload artifacts and report task resolution. + // + // The temporary credentials are set to expire after `takenUntil`. They + // won't expire exactly at `takenUntil` but shortly after, hence, requests + // coming close `takenUntil` won't have problems even if there is a little + // clock drift. + // + // Workers should use these credentials when making requests on behalf of + // a task. This includes requests to create artifacts, reclaiming the task + // reporting the task `completed`, `failed` or `exception`. + // + // Note, a new set of temporary credentials is issued when the worker + // reclaims the task. + TaskCredentials struct { + + // The `accessToken` for the temporary credentials. + // + // Min length: 1 + AccessToken string `json:"accessToken"` + + // The `certificate` for the temporary credentials, these are required + // for the temporary credentials to work. + // + // Min length: 1 + Certificate string `json:"certificate"` + + // The `clientId` for the temporary credentials. + // + // Min length: 1 + ClientID string `json:"clientId"` + } + + // Task Definition and task status structure. + TaskDefinitionAndStatus struct { + + // A representation of **task status** as known by the queue + Status TaskStatusStructure `json:"status"` + + // Definition of a task that can be scheduled + Task TaskDefinitionResponse `json:"task"` + } + + // Definition of a task that can be scheduled + TaskDefinitionRequest struct { + + // Creation time of task + Created tcclient.Time `json:"created"` + + // Deadline of the task, `pending` and `running` runs are + // resolved as **exception** if not resolved by other means + // before the deadline. Note, deadline cannot be more than + // 5 days into the future + Deadline tcclient.Time `json:"deadline"` + + // List of dependent tasks. These must either be _completed_ or _resolved_ + // before this task is scheduled. See `requires` for semantics. + // + // Default: [] + // + // Array items: + // The `taskId` of a task that must be resolved before this task is + // scheduled. + // + // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ + Dependencies []string `json:"dependencies,omitempty"` + + // Task expiration, time at which task definition and status is deleted. + // Notice that all artifacts for the task must have an expiration that is no + // later than this. If this property isn't it will be set to `deadline` + // plus one year (this default may change). + Expires tcclient.Time `json:"expires,omitempty"` + + // Object with properties that can hold any kind of extra data that should be + // associated with the task. This can be data for the task which doesn't + // fit into `payload`, or it can supplementary data for use in services + // listening for events from this task. For example this could be details to + // display on dashboard, or information for indexing the task. Please, try + // to put all related information under one property, so `extra` data keys + // don't conflict. **Warning**, do not stuff large data-sets in here -- + // task definitions should not take-up multiple MiBs. + // + // Default: {} + // + // Additional properties allowed + Extra json.RawMessage `json:"extra,omitempty"` + + // Required task metadata + Metadata TaskMetadata `json:"metadata"` + + // Task-specific payload following worker-specific format. + // Refer to the documentation for the worker implementing + // `/` for details. + // + // Additional properties allowed + Payload json.RawMessage `json:"payload"` + + // Priority of task. This defaults to `lowest` and the scope + // `queue:create-task://` is required + // to define a task with ``. The `normal` priority is treated as + // `lowest`. + // + // Possible values: + // * "highest" + // * "very-high" + // * "high" + // * "medium" + // * "low" + // * "very-low" + // * "lowest" + // * "normal" + // + // Default: "lowest" + Priority string `json:"priority,omitempty"` + + // Unique identifier for a provisioner, that can supply specified + // `workerType` + // + // Syntax: ^[a-zA-Z0-9-_]{1,38}$ + ProvisionerID string `json:"provisionerId"` + + // The tasks relation to its dependencies. This property specifies the + // semantics of the `task.dependencies` property. + // If `all-completed` is given the task will be scheduled when all + // dependencies are resolved _completed_ (successful resolution). + // If `all-resolved` is given the task will be scheduled when all dependencies + // have been resolved, regardless of what their resolution is. + // + // Possible values: + // * "all-completed" + // * "all-resolved" + // + // Default: "all-completed" + Requires string `json:"requires,omitempty"` + + // Number of times to retry the task in case of infrastructure issues. + // An _infrastructure issue_ is a worker node that crashes or is shutdown, + // these events are to be expected. + // + // Default: 5 + // Mininum: 0 + // Maximum: 49 + Retries int64 `json:"retries,omitempty"` + + // List of task-specific routes. Pulse messages about the task will be CC'ed to + // `route.` for each `` in this array. + // + // This array has a maximum size due to a limitation of the AMQP protocol, + // over which Pulse runs. All routes must fit in the same "frame" of this + // protocol, and the frames have a fixed maximum size (typically 128k). + // + // Default: [] + // + // Array items: + // A task specific route. + // + // Min length: 1 + // Max length: 249 + Routes []string `json:"routes,omitempty"` + + // All tasks in a task group must have the same `schedulerId`. This is used for several purposes: + // + // * it can represent the entity that created the task; + // * it can limit addition of new tasks to a task group: the caller of + // `createTask` must have a scope related to the `schedulerId` of the task + // group; + // * it controls who can manipulate tasks, again by requiring + // `schedulerId`-related scopes; and + // * it appears in the routing key for Pulse messages about the task. + // + // Default: "-" + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + SchedulerID string `json:"schedulerId,omitempty"` + + // List of scopes that the task is authorized to use during its execution. + // + // Array items: + // A single scope. A scope must be composed of + // printable ASCII characters and spaces. Scopes ending in more than + // one `*` character are forbidden. + // + // Syntax: ^[ -~]*$ + Scopes []string `json:"scopes,omitempty"` + + // Arbitrary key-value tags (only strings limited to 4k). These can be used + // to attach informal metadata to a task. Use this for informal tags that + // tasks can be classified by. You can also think of strings here as + // candidates for formal metadata. Something like + // `purpose: 'build' || 'test'` is a good example. + // + // Default: {} + // + // Map entries: + // Max length: 4096 + Tags map[string]string `json:"tags,omitempty"` + + // Identifier for a group of tasks scheduled together with this task. + // Generally, all tasks related to a single event such as a version-control + // push or a nightly build have the same `taskGroupId`. This property + // defaults to `taskId` if it isn't specified. Tasks with `taskId` equal to + // the `taskGroupId` are, [by convention](/docs/manual/using/task-graph), + // decision tasks. + // + // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ + TaskGroupID string `json:"taskGroupId,omitempty"` + + // Unique identifier for a worker-type within a specific provisioner + // + // Syntax: ^[a-z]([-a-z0-9]{0,36}[a-z0-9])?$ + WorkerType string `json:"workerType"` + } + + // Definition of a task that can be scheduled + TaskDefinitionResponse struct { + + // Creation time of task + Created tcclient.Time `json:"created"` + + // Deadline of the task, `pending` and `running` runs are + // resolved as **exception** if not resolved by other means + // before the deadline. Note, deadline cannot be more than + // 5 days into the future + Deadline tcclient.Time `json:"deadline"` + + // List of dependent tasks. These must either be _completed_ or _resolved_ + // before this task is scheduled. See `requires` for semantics. + // + // Default: [] + // + // Array items: + // The `taskId` of a task that must be resolved before this task is + // scheduled. + // + // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ + Dependencies []string `json:"dependencies"` + + // Task expiration, time at which task definition and status is deleted. + // Notice that all artifacts for the task must have an expiration that is no + // later than this. If this property isn't it will be set to `deadline` + // plus one year (this default may change). + Expires tcclient.Time `json:"expires,omitempty"` + + // Object with properties that can hold any kind of extra data that should be + // associated with the task. This can be data for the task which doesn't + // fit into `payload`, or it can supplementary data for use in services + // listening for events from this task. For example this could be details to + // display on dashboard, or information for indexing the task. Please, try + // to put all related information under one property, so `extra` data keys + // don't conflict. **Warning**, do not stuff large data-sets in here -- + // task definitions should not take-up multiple MiBs. + // + // Default: {} + // + // Additional properties allowed + Extra json.RawMessage `json:"extra"` + + // Required task metadata + Metadata TaskMetadata `json:"metadata"` + + // Task-specific payload following worker-specific format. + // Refer to the documentation for the worker implementing + // `/` for details. + // + // Additional properties allowed + Payload json.RawMessage `json:"payload"` + + // Priority of task. This defaults to `lowest` and the scope + // `queue:create-task://` is required + // to define a task with ``. The `normal` priority is treated as + // `lowest`. + // + // Possible values: + // * "highest" + // * "very-high" + // * "high" + // * "medium" + // * "low" + // * "very-low" + // * "lowest" + // * "normal" + // + // Default: "lowest" + Priority string `json:"priority"` + + // Unique identifier for a provisioner, that can supply specified + // `workerType` + // + // Syntax: ^[a-zA-Z0-9-_]{1,38}$ + ProvisionerID string `json:"provisionerId"` + + // The tasks relation to its dependencies. This property specifies the + // semantics of the `task.dependencies` property. + // If `all-completed` is given the task will be scheduled when all + // dependencies are resolved _completed_ (successful resolution). + // If `all-resolved` is given the task will be scheduled when all dependencies + // have been resolved, regardless of what their resolution is. + // + // Possible values: + // * "all-completed" + // * "all-resolved" + // + // Default: "all-completed" + Requires string `json:"requires"` + + // Number of times to retry the task in case of infrastructure issues. + // An _infrastructure issue_ is a worker node that crashes or is shutdown, + // these events are to be expected. + // + // Default: 5 + // Mininum: 0 + // Maximum: 49 + Retries int64 `json:"retries"` + + // List of task-specific routes. Pulse messages about the task will be CC'ed to + // `route.` for each `` in this array. + // + // This array has a maximum size due to a limitation of the AMQP protocol, + // over which Pulse runs. All routes must fit in the same "frame" of this + // protocol, and the frames have a fixed maximum size (typically 128k). + // + // Default: [] + // + // Array items: + // A task specific route. + // + // Min length: 1 + // Max length: 249 + Routes []string `json:"routes"` + + // All tasks in a task group must have the same `schedulerId`. This is used for several purposes: + // + // * it can represent the entity that created the task; + // * it can limit addition of new tasks to a task group: the caller of + // `createTask` must have a scope related to the `schedulerId` of the task + // group; + // * it controls who can manipulate tasks, again by requiring + // `schedulerId`-related scopes; and + // * it appears in the routing key for Pulse messages about the task. + // + // Default: "-" + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + SchedulerID string `json:"schedulerId"` + + // List of scopes that the task is authorized to use during its execution. + // + // Array items: + // A single scope. A scope must be composed of + // printable ASCII characters and spaces. Scopes ending in more than + // one `*` character are forbidden. + // + // Syntax: ^[ -~]*$ + Scopes []string `json:"scopes"` + + // Arbitrary key-value tags (only strings limited to 4k). These can be used + // to attach informal metadata to a task. Use this for informal tags that + // tasks can be classified by. You can also think of strings here as + // candidates for formal metadata. Something like + // `purpose: 'build' || 'test'` is a good example. + // + // Default: {} + // + // Map entries: + // Max length: 4096 + Tags map[string]string `json:"tags"` + + // Identifier for a group of tasks scheduled together with this task. + // Generally, all tasks related to a single event such as a version-control + // push or a nightly build have the same `taskGroupId`. This property + // defaults to `taskId` if it isn't specified. Tasks with `taskId` equal to + // the `taskGroupId` are, [by convention](/docs/manual/using/task-graph), + // decision tasks. + // + // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ + TaskGroupID string `json:"taskGroupId"` + + // Unique identifier for a worker-type within a specific provisioner + // + // Syntax: ^[a-z]([-a-z0-9]{0,36}[a-z0-9])?$ + WorkerType string `json:"workerType"` + } + + // Request for a run of a task to be resolved with an exception + TaskExceptionRequest struct { + + // Reason that the task is resolved with an exception. This is a subset + // of the values for `resolvedReason` given in the task status structure. + // **Report `worker-shutdown`** if the run failed because the worker + // had to shutdown (spot node disappearing). In case of `worker-shutdown` + // the queue will immediately **retry** the task, by making a new run. + // This is much faster than ignoreing the issue and letting the task _retry_ + // by claim expiration. For any other _reason_ reported the queue will not + // retry the task. + // **Report `malformed-payload`** if the `task.payload` doesn't match the + // schema for the worker payload, or referenced resource doesn't exists. + // In either case, you should still log the error to a log file for the + // specific run. + // **Report `resource-unavailable`** if a resource/service needed or + // referenced in `task.payload` is _temporarily_ unavailable. Do not use this + // unless you know the resource exists, if the resource doesn't exist you + // should report `malformed-payload`. Example use-case if you contact the + // index (a service) on behalf of the task, because of a declaration in + // `task.payload`, and the service (index) is temporarily down. Don't use + // this if a URL returns 404, but if it returns 503 or hits a timeout when + // you retry the request, then this _may_ be a valid exception. The queue + // assumes that workers have applied retries as needed, and will not retry + // the task. + // **Report `internal-error`** if the worker experienced an unhandled internal + // error from which it couldn't recover. The queue will not retry runs + // resolved with this reason, but you are clearly signaling that this is a + // bug in the worker code. + // **Report `superseded`** if the task was determined to have been + // superseded by another task, and its results are no longer needed. It is + // convention in this case to create an artifact entitled + // `public/superseded-by` containing the taskId of the task that superseded + // this one. + // **Report `intermittent-task`** if the task explicitly requested a retry + // because task is intermittent. Workers can choose whether or not to + // support this, but workers shouldn't blindly report this for every task + // that fails. + // + // Possible values: + // * "worker-shutdown" + // * "malformed-payload" + // * "resource-unavailable" + // * "internal-error" + // * "superseded" + // * "intermittent-task" + Reason string `json:"reason"` + } + + // Required task metadata + TaskMetadata struct { + + // Human readable description of the task, please **explain** what the + // task does. A few lines of documentation is not going to hurt you. + // + // Max length: 32768 + Description string `json:"description"` + + // Human readable name of task, used to very briefly given an idea about + // what the task does. + // + // Max length: 255 + Name string `json:"name"` + + // E-mail of person who caused this task, e.g. the person who did + // `hg push`. The person we should contact to ask why this task is here. + // + // Max length: 255 + Owner string `json:"owner"` + + // Link to source of this task, should specify a file, revision and + // repository. This should be place someone can go an do a git/hg blame + // to who came up with recipe for this task. + // + // Syntax: ^https?:// + // Max length: 4096 + Source string `json:"source"` + } + + // Response to a successful task claim + TaskReclaimResponse struct { + + // Temporary credentials granting `task.scopes` and the scope: + // `queue:claim-task:/` which allows the worker to reclaim + // the task, upload artifacts and report task resolution. + // + // The temporary credentials are set to expire after `takenUntil`. They + // won't expire exactly at `takenUntil` but shortly after, hence, requests + // coming close `takenUntil` won't have problems even if there is a little + // clock drift. + // + // Workers should use these credentials when making requests on behalf of + // a task. This includes requests to create artifacts, reclaiming the task + // reporting the task `completed`, `failed` or `exception`. + // + // Note, a new set of temporary credentials is issued when the worker + // reclaims the task. + Credentials TaskCredentials `json:"credentials"` + + // `run-id` assigned to this run of the task + // + // Mininum: 0 + // Maximum: 1000 + RunID int64 `json:"runId"` + + // A representation of **task status** as known by the queue + Status TaskStatusStructure `json:"status"` + + // Time at which the run expires and is resolved as `exception`, + // with reason `claim-expired` if the run haven't been reclaimed. + TakenUntil tcclient.Time `json:"takenUntil"` + + // Identifier for the worker-group within which this run started. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerGroup string `json:"workerGroup"` + + // Identifier for the worker executing this run. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerID string `json:"workerId"` + } + + // A run of a task. + TaskRun struct { + + // Id of this task run, `run-id`s always starts from `0` + // + // Mininum: 0 + // Maximum: 1000 + RunID int64 `json:"runId"` + + // Unique task identifier, this is UUID encoded as + // [URL-safe base64](http://tools.ietf.org/html/rfc4648#section-5) and + // stripped of `=` padding. + // + // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ + TaskID string `json:"taskId"` + } + + // Response to a task status request + TaskStatusResponse struct { + + // A representation of **task status** as known by the queue + Status TaskStatusStructure `json:"status"` + } + + // A representation of **task status** as known by the queue + TaskStatusStructure struct { + + // Deadline of the task, `pending` and `running` runs are + // resolved as **exception** if not resolved by other means + // before the deadline. Note, deadline cannot be more than + // 5 days into the future + Deadline tcclient.Time `json:"deadline"` + + // Task expiration, time at which task definition and + // status is deleted. Notice that all artifacts for the task + // must have an expiration that is no later than this. + Expires tcclient.Time `json:"expires"` + + // Unique identifier for a provisioner, that can supply specified + // `workerType` + // + // Syntax: ^[a-zA-Z0-9-_]{1,38}$ + ProvisionerID string `json:"provisionerId"` + + // Number of retries left for the task in case of infrastructure issues + // + // Mininum: 0 + // Maximum: 999 + RetriesLeft int64 `json:"retriesLeft"` + + // List of runs, ordered so that index `i` has `runId == i` + Runs []RunInformation `json:"runs"` + + // All tasks in a task group must have the same `schedulerId`. This is used for several purposes: + // + // * it can represent the entity that created the task; + // * it can limit addition of new tasks to a task group: the caller of + // `createTask` must have a scope related to the `schedulerId` of the task + // group; + // * it controls who can manipulate tasks, again by requiring + // `schedulerId`-related scopes; and + // * it appears in the routing key for Pulse messages about the task. + // + // Default: "-" + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + SchedulerID string `json:"schedulerId"` + + // State of this task. This is just an auxiliary property derived from state + // of latests run, or `unscheduled` if none. + // + // Possible values: + // * "unscheduled" + // * "pending" + // * "running" + // * "completed" + // * "failed" + // * "exception" + State string `json:"state"` + + // Identifier for a group of tasks scheduled together with this task. + // Generally, all tasks related to a single event such as a version-control + // push or a nightly build have the same `taskGroupId`. This property + // defaults to `taskId` if it isn't specified. Tasks with `taskId` equal to + // the `taskGroupId` are, [by convention](/docs/manual/using/task-graph), + // decision tasks. + // + // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ + TaskGroupID string `json:"taskGroupId"` + + // Unique task identifier, this is UUID encoded as + // [URL-safe base64](http://tools.ietf.org/html/rfc4648#section-5) and + // stripped of `=` padding. + // + // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ + TaskID string `json:"taskId"` + + // Unique identifier for a worker-type within a specific provisioner + // + // Syntax: ^[a-z]([-a-z0-9]{0,36}[a-z0-9])?$ + WorkerType string `json:"workerType"` + } + + Worker struct { + + // Date of the first time this worker claimed a task. + FirstClaim tcclient.Time `json:"firstClaim"` + + // A run of a task. + LatestTask TaskRun `json:"latestTask,omitempty"` + + // Quarantining a worker allows the machine to remain alive but not accept jobs. + // Once the quarantineUntil time has elapsed, the worker resumes accepting jobs. + // Note that a quarantine can be lifted by setting `quarantineUntil` to the present time (or + // somewhere in the past). + QuarantineUntil tcclient.Time `json:"quarantineUntil,omitempty"` + + // Identifier for the worker group containing this worker. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerGroup string `json:"workerGroup"` + + // Identifier for this worker (unique within this worker group). + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerID string `json:"workerId"` + } + + // Actions provide a generic mechanism to expose additional features of a + // provisioner, worker type, or worker to Taskcluster clients. + // + // An action is comprised of metadata describing the feature it exposes, + // together with a webhook for triggering it. + // + // The Taskcluster tools site, for example, retrieves actions when displaying + // provisioners, worker types and workers. It presents the provisioner/worker + // type/worker specific actions to the user. When the user triggers an action, + // the web client takes the registered webhook, substitutes parameters into the + // URL (see `url`), signs the requests with the Taskcluster credentials of the + // user operating the web interface, and issues the HTTP request. + // + // The level to which the action relates (provisioner, worker type, worker) is + // called the action context. All actions, regardless of the action contexts, + // are registered against the provisioner when calling + // `queue.declareProvisioner`. + // + // The action context is used by the web client to determine where in the web + // interface to present the action to the user as follows: + // + // | `context` | Tool where action is displayed | + // |-------------|--------------------------------| + // | provisioner | Provisioner Explorer | + // | worker-type | Workers Explorer | + // | worker | Worker Explorer | + // + // See [actions docs](/docs/reference/platform/taskcluster-queue/docs/actions) + // for more information. + WorkerAction struct { + + // Only actions with the context `worker` are included. + // + // Possible values: + // * "worker" + Context string `json:"context"` + + // Description of the provisioner. + Description string `json:"description"` + + // Method to indicate the desired action to be performed for a given resource. + // + // Possible values: + // * "POST" + // * "PUT" + // * "DELETE" + // * "PATCH" + Method string `json:"method"` + + // Short names for things like logging/error messages. + Name string `json:"name"` + + // Appropriate title for any sort of Modal prompt. + Title json.RawMessage `json:"title"` + + // When an action is triggered, a request is made using the `url` and `method`. + // Depending on the `context`, the following parameters will be substituted in the url: + // + // | `context` | Path parameters | + // |-------------|----------------------------------------------------------| + // | provisioner | | + // | worker-type | , | + // | worker | , , , | + // + // _Note: The request needs to be signed with the user's Taskcluster credentials._ + URL string `json:"url"` + } + + // Request to update a worker. + WorkerRequest struct { + + // Date and time after which the worker will be automatically + // deleted by the queue. + Expires tcclient.Time `json:"expires,omitempty"` + } + + // Response containing information about a worker. + WorkerResponse struct { + Actions []WorkerAction `json:"actions"` + + // Date and time after which the worker will be automatically + // deleted by the queue. + Expires tcclient.Time `json:"expires"` + + // Date of the first time this worker claimed a task. + FirstClaim tcclient.Time `json:"firstClaim"` + + // Unique identifier for a provisioner, that can supply specified + // `workerType` + // + // Syntax: ^[a-zA-Z0-9-_]{1,38}$ + ProvisionerID string `json:"provisionerId"` + + // Quarantining a worker allows the machine to remain alive but not accept jobs. + // Once the quarantineUntil time has elapsed, the worker resumes accepting jobs. + // Note that a quarantine can be lifted by setting `quarantineUntil` to the present time (or + // somewhere in the past). + QuarantineUntil tcclient.Time `json:"quarantineUntil,omitempty"` + + // List of 20 most recent tasks claimed by the worker. + RecentTasks []TaskRun `json:"recentTasks"` + + // Identifier for group that worker who executes this run is a part of, + // this identifier is mainly used for efficient routing. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerGroup string `json:"workerGroup"` + + // Identifier for worker evaluating this run within given + // `workerGroup`. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerID string `json:"workerId"` + + // Unique identifier for a worker-type within a specific provisioner + // + // Syntax: ^[a-z]([-a-z0-9]{0,36}[a-z0-9])?$ + WorkerType string `json:"workerType"` + } + + WorkerType struct { + + // Description of the worker-type. + Description string `json:"description"` + + // Date and time after which the worker-type will be automatically + // deleted by the queue. + Expires tcclient.Time `json:"expires"` + + // Date and time where the worker-type was last seen active + LastDateActive tcclient.Time `json:"lastDateActive"` + + // Unique identifier for a provisioner, that can supply specified + // `workerType` + // + // Syntax: ^[a-zA-Z0-9-_]{1,38}$ + ProvisionerID string `json:"provisionerId"` + + // This is the stability of the worker-type. Accepted values: + // * `experimental` + // * `stable` + // * `deprecated` + // + // Possible values: + // * "experimental" + // * "stable" + // * "deprecated" + Stability string `json:"stability"` + + // Unique identifier for a worker-type within a specific provisioner + // + // Syntax: ^[a-z]([-a-z0-9]{0,36}[a-z0-9])?$ + WorkerType string `json:"workerType"` + } + + // Actions provide a generic mechanism to expose additional features of a + // provisioner, worker type, or worker to Taskcluster clients. + // + // An action is comprised of metadata describing the feature it exposes, + // together with a webhook for triggering it. + // + // The Taskcluster tools site, for example, retrieves actions when displaying + // provisioners, worker types and workers. It presents the provisioner/worker + // type/worker specific actions to the user. When the user triggers an action, + // the web client takes the registered webhook, substitutes parameters into the + // URL (see `url`), signs the requests with the Taskcluster credentials of the + // user operating the web interface, and issues the HTTP request. + // + // The level to which the action relates (provisioner, worker type, worker) is + // called the action context. All actions, regardless of the action contexts, + // are registered against the provisioner when calling + // `queue.declareProvisioner`. + // + // The action context is used by the web client to determine where in the web + // interface to present the action to the user as follows: + // + // | `context` | Tool where action is displayed | + // |-------------|--------------------------------| + // | provisioner | Provisioner Explorer | + // | worker-type | Workers Explorer | + // | worker | Worker Explorer | + // + // See [actions docs](/docs/reference/platform/taskcluster-queue/docs/actions) + // for more information. + WorkerTypeAction struct { + + // Only actions with the context `worker-type` are included. + // + // Possible values: + // * "worker-type" + Context string `json:"context"` + + // Description of the provisioner. + Description string `json:"description"` + + // Method to indicate the desired action to be performed for a given resource. + // + // Possible values: + // * "POST" + // * "PUT" + // * "DELETE" + // * "PATCH" + Method string `json:"method"` + + // Short names for things like logging/error messages. + Name string `json:"name"` + + // Appropriate title for any sort of Modal prompt. + Title json.RawMessage `json:"title"` + + // When an action is triggered, a request is made using the `url` and `method`. + // Depending on the `context`, the following parameters will be substituted in the url: + // + // | `context` | Path parameters | + // |-------------|----------------------------------------------------------| + // | provisioner | | + // | worker-type | , | + // | worker | , , , | + // + // _Note: The request needs to be signed with the user's Taskcluster credentials._ + URL string `json:"url"` + } + + // Request to update a worker-type. + WorkerTypeRequest struct { + + // Description of the provisioner. + Description string `json:"description,omitempty"` + + // Date and time after which the worker-type will be automatically + // deleted by the queue. + Expires tcclient.Time `json:"expires,omitempty"` + + // This is the stability of the provisioner. Accepted values: + // * `experimental` + // * `stable` + // * `deprecated` + // + // Possible values: + // * "experimental" + // * "stable" + // * "deprecated" + Stability string `json:"stability,omitempty"` + } + + // Response to a worker-type request from a provisioner. + WorkerTypeResponse struct { + Actions []WorkerTypeAction `json:"actions"` + + // Description of the worker-type. + Description string `json:"description"` + + // Date and time after which the worker-type will be automatically + // deleted by the queue. + Expires tcclient.Time `json:"expires"` + + // Date of the last time this worker-type was seen active. `lastDateActive` is updated every 6 hours + // but may be off by up-to 6 hours. Nonetheless, `lastDateActive` is a good indicator + // of when the worker-type was last seen active. + LastDateActive tcclient.Time `json:"lastDateActive"` + + // Unique identifier for a provisioner, that can supply specified + // `workerType` + // + // Syntax: ^[a-zA-Z0-9-_]{1,38}$ + ProvisionerID string `json:"provisionerId"` + + // This is the stability of the worker-type. Accepted values: + // * `experimental` + // * `stable` + // * `deprecated` + // + // Possible values: + // * "experimental" + // * "stable" + // * "deprecated" + Stability string `json:"stability"` + + // Unique identifier for a worker-type within a specific provisioner + // + // Syntax: ^[a-z]([-a-z0-9]{0,36}[a-z0-9])?$ + WorkerType string `json:"workerType"` + } +) + +// MarshalJSON calls json.RawMessage method of the same name. Required since +// PostArtifactRequest is of type json.RawMessage... +func (this *PostArtifactRequest) MarshalJSON() ([]byte, error) { + x := json.RawMessage(*this) + return (&x).MarshalJSON() +} + +// UnmarshalJSON is a copy of the json.RawMessage implementation. +func (this *PostArtifactRequest) UnmarshalJSON(data []byte) error { + if this == nil { + return errors.New("PostArtifactRequest: UnmarshalJSON on nil pointer") + } + *this = append((*this)[0:0], data...) + return nil +} + +// MarshalJSON calls json.RawMessage method of the same name. Required since +// PostArtifactResponse is of type json.RawMessage... +func (this *PostArtifactResponse) MarshalJSON() ([]byte, error) { + x := json.RawMessage(*this) + return (&x).MarshalJSON() +} + +// UnmarshalJSON is a copy of the json.RawMessage implementation. +func (this *PostArtifactResponse) UnmarshalJSON(data []byte) error { + if this == nil { + return errors.New("PostArtifactResponse: UnmarshalJSON on nil pointer") + } + *this = append((*this)[0:0], data...) + return nil +} diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueueevents/tcqueueevents.go b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueueevents/tcqueueevents.go new file mode 100644 index 0000000..58e4d17 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueueevents/tcqueueevents.go @@ -0,0 +1,374 @@ +// The following code is AUTO-GENERATED. Please DO NOT edit. +// To update this generated code, run the following command: +// in the /codegenerator/model subdirectory of this project, +// making sure that `${GOPATH}/bin` is in your `PATH`: +// +// go install && go generate +// +// This package was generated from the schema defined at +// /references/queue/v1/exchanges.json + +// The queue service is responsible for accepting tasks and track their state +// as they are executed by workers. In order ensure they are eventually +// resolved. +// +// This document describes AMQP exchanges offered by the queue, which allows +// third-party listeners to monitor tasks as they progress to resolution. +// These exchanges targets the following audience: +// * Schedulers, who takes action after tasks are completed, +// * Workers, who wants to listen for new or canceled tasks (optional), +// * Tools, that wants to update their view as task progress. +// +// You'll notice that all the exchanges in the document shares the same +// routing key pattern. This makes it very easy to bind to all messages +// about a certain kind tasks. +// +// **Task specific routes**, a task can define a task specific route using +// the `task.routes` property. See task creation documentation for details +// on permissions required to provide task specific routes. If a task has +// the entry `'notify.by-email'` in as task specific route defined in +// `task.routes` all messages about this task will be CC'ed with the +// routing-key `'route.notify.by-email'`. +// +// These routes will always be prefixed `route.`, so that cannot interfere +// with the _primary_ routing key as documented here. Notice that the +// _primary_ routing key is always prefixed `primary.`. This is ensured +// in the routing key reference, so API clients will do this automatically. +// +// Please, note that the way RabbitMQ works, the message will only arrive +// in your queue once, even though you may have bound to the exchange with +// multiple routing key patterns that matches more of the CC'ed routing +// routing keys. +// +// **Delivery guarantees**, most operations on the queue are idempotent, +// which means that if repeated with the same arguments then the requests +// will ensure completion of the operation and return the same response. +// This is useful if the server crashes or the TCP connection breaks, but +// when re-executing an idempotent operation, the queue will also resend +// any related AMQP messages. Hence, messages may be repeated. +// +// This shouldn't be much of a problem, as the best you can achieve using +// confirm messages with AMQP is at-least-once delivery semantics. Hence, +// this only prevents you from obtaining at-most-once delivery semantics. +// +// **Remark**, some message generated by timeouts maybe dropped if the +// server crashes at wrong time. Ideally, we'll address this in the +// future. For now we suggest you ignore this corner case, and notify us +// if this corner case is of concern to you. +// +// See: +// +// How to use this package +// +// This package is designed to sit on top of http://godoc.org/github.com/taskcluster/pulse-go/pulse. Please read +// the pulse package overview to get an understanding of how the pulse client is implemented in go. +// +// This package provides two things in addition to the basic pulse package: structured types for unmarshaling +// pulse message bodies into, and custom Binding interfaces, for defining the fixed strings for task cluster +// exchange names, and routing keys as structured types. +// +// For example, when specifying a binding, rather than using: +// +// pulse.Bind( +// "*.*.*.*.*.*.gaia.#", +// "exchange/taskcluster-queue/v1/task-defined", +// ) +// +// You can rather use: +// +// queueevents.TaskDefined{WorkerType: "gaia"} +// +// In addition, this means that you will also get objects in your callback method like *queueevents.TaskDefinedMessage +// rather than just interface{}. +package tcqueueevents + +import ( + "reflect" + "strings" +) + +// When a task is created or just defined a message is posted to this +// exchange. +// +// This message exchange is mainly useful when tasks are scheduled by a +// scheduler that uses `defineTask` as this does not make the task +// `pending`. Thus, no `taskPending` message is published. +// Please, note that messages are also published on this exchange if defined +// using `createTask`. +// +// See #taskDefined +type TaskDefined struct { + RoutingKeyKind string `mwords:"*"` + TaskID string `mwords:"*"` + RunID string `mwords:"*"` + WorkerGroup string `mwords:"*"` + WorkerID string `mwords:"*"` + ProvisionerID string `mwords:"*"` + WorkerType string `mwords:"*"` + SchedulerID string `mwords:"*"` + TaskGroupID string `mwords:"*"` + Reserved string `mwords:"#"` +} + +func (binding TaskDefined) RoutingKey() string { + return generateRoutingKey(&binding) +} + +func (binding TaskDefined) ExchangeName() string { + return "exchange/taskcluster-queue/v1/task-defined" +} + +func (binding TaskDefined) NewPayloadObject() interface{} { + return new(TaskDefinedMessage) +} + +// When a task becomes `pending` a message is posted to this exchange. +// +// This is useful for workers who doesn't want to constantly poll the queue +// for new tasks. The queue will also be authority for task states and +// claims. But using this exchange workers should be able to distribute work +// efficiently and they would be able to reduce their polling interval +// significantly without affecting general responsiveness. +// +// See #taskPending +type TaskPending struct { + RoutingKeyKind string `mwords:"*"` + TaskID string `mwords:"*"` + RunID string `mwords:"*"` + WorkerGroup string `mwords:"*"` + WorkerID string `mwords:"*"` + ProvisionerID string `mwords:"*"` + WorkerType string `mwords:"*"` + SchedulerID string `mwords:"*"` + TaskGroupID string `mwords:"*"` + Reserved string `mwords:"#"` +} + +func (binding TaskPending) RoutingKey() string { + return generateRoutingKey(&binding) +} + +func (binding TaskPending) ExchangeName() string { + return "exchange/taskcluster-queue/v1/task-pending" +} + +func (binding TaskPending) NewPayloadObject() interface{} { + return new(TaskPendingMessage) +} + +// Whenever a task is claimed by a worker, a run is started on the worker, +// and a message is posted on this exchange. +// +// See #taskRunning +type TaskRunning struct { + RoutingKeyKind string `mwords:"*"` + TaskID string `mwords:"*"` + RunID string `mwords:"*"` + WorkerGroup string `mwords:"*"` + WorkerID string `mwords:"*"` + ProvisionerID string `mwords:"*"` + WorkerType string `mwords:"*"` + SchedulerID string `mwords:"*"` + TaskGroupID string `mwords:"*"` + Reserved string `mwords:"#"` +} + +func (binding TaskRunning) RoutingKey() string { + return generateRoutingKey(&binding) +} + +func (binding TaskRunning) ExchangeName() string { + return "exchange/taskcluster-queue/v1/task-running" +} + +func (binding TaskRunning) NewPayloadObject() interface{} { + return new(TaskRunningMessage) +} + +// Whenever the `createArtifact` end-point is called, the queue will create +// a record of the artifact and post a message on this exchange. All of this +// happens before the queue returns a signed URL for the caller to upload +// the actual artifact with (pending on `storageType`). +// +// This means that the actual artifact is rarely available when this message +// is posted. But it is not unreasonable to assume that the artifact will +// will become available at some point later. Most signatures will expire in +// 30 minutes or so, forcing the uploader to call `createArtifact` with +// the same payload again in-order to continue uploading the artifact. +// +// However, in most cases (especially for small artifacts) it's very +// reasonable assume the artifact will be available within a few minutes. +// This property means that this exchange is mostly useful for tools +// monitoring task evaluation. One could also use it count number of +// artifacts per task, or _index_ artifacts though in most cases it'll be +// smarter to index artifacts after the task in question have completed +// successfully. +// +// *NOTE*: this message is currently only sent for reference and error +// artifacts. This will be remedied in a future version of Taskcluster. +// +// See #artifactCreated +type ArtifactCreated struct { + RoutingKeyKind string `mwords:"*"` + TaskID string `mwords:"*"` + RunID string `mwords:"*"` + WorkerGroup string `mwords:"*"` + WorkerID string `mwords:"*"` + ProvisionerID string `mwords:"*"` + WorkerType string `mwords:"*"` + SchedulerID string `mwords:"*"` + TaskGroupID string `mwords:"*"` + Reserved string `mwords:"#"` +} + +func (binding ArtifactCreated) RoutingKey() string { + return generateRoutingKey(&binding) +} + +func (binding ArtifactCreated) ExchangeName() string { + return "exchange/taskcluster-queue/v1/artifact-created" +} + +func (binding ArtifactCreated) NewPayloadObject() interface{} { + return new(ArtifactCreatedMessage) +} + +// When a task is successfully completed by a worker a message is posted +// this exchange. +// This message is routed using the `runId`, `workerGroup` and `workerId` +// that completed the task. But information about additional runs is also +// available from the task status structure. +// +// See #taskCompleted +type TaskCompleted struct { + RoutingKeyKind string `mwords:"*"` + TaskID string `mwords:"*"` + RunID string `mwords:"*"` + WorkerGroup string `mwords:"*"` + WorkerID string `mwords:"*"` + ProvisionerID string `mwords:"*"` + WorkerType string `mwords:"*"` + SchedulerID string `mwords:"*"` + TaskGroupID string `mwords:"*"` + Reserved string `mwords:"#"` +} + +func (binding TaskCompleted) RoutingKey() string { + return generateRoutingKey(&binding) +} + +func (binding TaskCompleted) ExchangeName() string { + return "exchange/taskcluster-queue/v1/task-completed" +} + +func (binding TaskCompleted) NewPayloadObject() interface{} { + return new(TaskCompletedMessage) +} + +// When a task ran, but failed to complete successfully a message is posted +// to this exchange. This is same as worker ran task-specific code, but the +// task specific code exited non-zero. +// +// See #taskFailed +type TaskFailed struct { + RoutingKeyKind string `mwords:"*"` + TaskID string `mwords:"*"` + RunID string `mwords:"*"` + WorkerGroup string `mwords:"*"` + WorkerID string `mwords:"*"` + ProvisionerID string `mwords:"*"` + WorkerType string `mwords:"*"` + SchedulerID string `mwords:"*"` + TaskGroupID string `mwords:"*"` + Reserved string `mwords:"#"` +} + +func (binding TaskFailed) RoutingKey() string { + return generateRoutingKey(&binding) +} + +func (binding TaskFailed) ExchangeName() string { + return "exchange/taskcluster-queue/v1/task-failed" +} + +func (binding TaskFailed) NewPayloadObject() interface{} { + return new(TaskFailedMessage) +} + +// Whenever Taskcluster fails to run a message is posted to this exchange. +// This happens if the task isn't completed before its `deadlìne`, +// all retries failed (i.e. workers stopped responding), the task was +// canceled by another entity, or the task carried a malformed payload. +// +// The specific _reason_ is evident from that task status structure, refer +// to the `reasonResolved` property for the last run. +// +// See #taskException +type TaskException struct { + RoutingKeyKind string `mwords:"*"` + TaskID string `mwords:"*"` + RunID string `mwords:"*"` + WorkerGroup string `mwords:"*"` + WorkerID string `mwords:"*"` + ProvisionerID string `mwords:"*"` + WorkerType string `mwords:"*"` + SchedulerID string `mwords:"*"` + TaskGroupID string `mwords:"*"` + Reserved string `mwords:"#"` +} + +func (binding TaskException) RoutingKey() string { + return generateRoutingKey(&binding) +} + +func (binding TaskException) ExchangeName() string { + return "exchange/taskcluster-queue/v1/task-exception" +} + +func (binding TaskException) NewPayloadObject() interface{} { + return new(TaskExceptionMessage) +} + +// A message is published on task-group-resolved whenever all submitted +// tasks (whether scheduled or unscheduled) for a given task group have +// been resolved, regardless of whether they resolved as successful or +// not. A task group may be resolved multiple times, since new tasks may +// be submitted against an already resolved task group. +// +// See #taskGroupResolved +type TaskGroupResolved struct { + RoutingKeyKind string `mwords:"*"` + TaskGroupID string `mwords:"*"` + SchedulerID string `mwords:"*"` + Reserved string `mwords:"#"` +} + +func (binding TaskGroupResolved) RoutingKey() string { + return generateRoutingKey(&binding) +} + +func (binding TaskGroupResolved) ExchangeName() string { + return "exchange/taskcluster-queue/v1/task-group-resolved" +} + +func (binding TaskGroupResolved) NewPayloadObject() interface{} { + return new(TaskGroupResolvedMessage) +} + +func generateRoutingKey(x interface{}) string { + val := reflect.ValueOf(x).Elem() + p := make([]string, 0, val.NumField()) + for i := 0; i < val.NumField(); i++ { + valueField := val.Field(i) + typeField := val.Type().Field(i) + tag := typeField.Tag + if t := tag.Get("mwords"); t != "" { + if v := valueField.Interface(); v == "" { + p = append(p, t) + } else { + p = append(p, v.(string)) + } + } + } + return strings.Join(p, ".") +} diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueueevents/types.go b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueueevents/types.go new file mode 100644 index 0000000..d496257 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueueevents/types.go @@ -0,0 +1,533 @@ +// This source code file is AUTO-GENERATED by github.com/taskcluster/jsonschema2go + +package tcqueueevents + +import ( + tcclient "github.com/taskcluster/taskcluster/clients/client-go/v23" +) + +type ( + // Information about the artifact that was created + Artifact struct { + + // Mimetype for the artifact that was created. + // + // Max length: 255 + ContentType string `json:"contentType"` + + // Date and time after which the artifact created will be automatically + // deleted by the queue. + Expires tcclient.Time `json:"expires"` + + // Name of the artifact that was created, this is useful if you want to + // attempt to fetch the artifact. But keep in mind that just because an + // artifact is created doesn't mean that it's immediately available. + // + // Max length: 1024 + Name string `json:"name"` + + // This is the `storageType` for the request that was used to create the + // artifact. + // + // Possible values: + // * "reference" + // * "error" + StorageType string `json:"storageType"` + } + + // Message reporting a new artifact has been created for a given task. + ArtifactCreatedMessage struct { + + // Information about the artifact that was created + Artifact Artifact `json:"artifact"` + + // Id of the run on which artifact was created. + // + // Mininum: 0 + // Maximum: 1000 + RunID int64 `json:"runId"` + + // A representation of **task status** as known by the queue + Status TaskStatusStructure `json:"status"` + + // Message version + // + // Possible values: + // * 1 + Version int64 `json:"version"` + + // Identifier for the worker-group within which the run with the created + // artifacted is running. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerGroup string `json:"workerGroup"` + + // Identifier for the worker within which the run with the created artifact + // is running. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerID string `json:"workerId"` + } + + // JSON object with information about a run + RunInformation struct { + + // Reason for the creation of this run, + // **more reasons may be added in the future**. + // + // Possible values: + // * "scheduled" + // * "retry" + // * "task-retry" + // * "rerun" + // * "exception" + ReasonCreated string `json:"reasonCreated"` + + // Reason that run was resolved, this is mainly + // useful for runs resolved as `exception`. + // Note, **more reasons may be added in the future**, also this + // property is only available after the run is resolved. Some of these + // reasons, notably `intermittent-task`, `worker-shutdown`, and + // `claim-expired`, will trigger an automatic retry of the task. + // + // Possible values: + // * "completed" + // * "failed" + // * "deadline-exceeded" + // * "canceled" + // * "superseded" + // * "claim-expired" + // * "worker-shutdown" + // * "malformed-payload" + // * "resource-unavailable" + // * "internal-error" + // * "intermittent-task" + ReasonResolved string `json:"reasonResolved,omitempty"` + + // Date-time at which this run was resolved, ie. when the run changed + // state from `running` to either `completed`, `failed` or `exception`. + // This property is only present after the run as been resolved. + Resolved tcclient.Time `json:"resolved,omitempty"` + + // Id of this task run, `run-id`s always starts from `0` + // + // Mininum: 0 + // Maximum: 1000 + RunID int64 `json:"runId"` + + // Date-time at which this run was scheduled, ie. when the run was + // created in state `pending`. + Scheduled tcclient.Time `json:"scheduled"` + + // Date-time at which this run was claimed, ie. when the run changed + // state from `pending` to `running`. This property is only present + // after the run has been claimed. + Started tcclient.Time `json:"started,omitempty"` + + // State of this run + // + // Possible values: + // * "pending" + // * "running" + // * "completed" + // * "failed" + // * "exception" + State string `json:"state"` + + // Time at which the run expires and is resolved as `failed`, if the + // run isn't reclaimed. Note, only present after the run has been + // claimed. + TakenUntil tcclient.Time `json:"takenUntil,omitempty"` + + // Identifier for group that worker who executes this run is a part of, + // this identifier is mainly used for efficient routing. + // Note, this property is only present after the run is claimed. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerGroup string `json:"workerGroup,omitempty"` + + // Identifier for worker evaluating this run within given + // `workerGroup`. Note, this property is only available after the run + // has been claimed. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerID string `json:"workerId,omitempty"` + } + + // Subset of a task definition + Task struct { + + // Arbitrary key-value tags (only strings limited to 4k). These can be used + // to attach informal metadata to a task. Use this for informal tags that + // tasks can be classified by. You can also think of strings here as + // candidates for formal metadata. Something like + // `purpose: 'build' || 'test'` is a good example. + // + // Default: {} + // + // Map entries: + // Max length: 4096 + Tags map[string]string `json:"tags"` + } + + // Message reporting that a task has complete successfully. + TaskCompletedMessage struct { + + // Id of the run that completed the task + // + // Mininum: 0 + // Maximum: 1000 + RunID int64 `json:"runId"` + + // A representation of **task status** as known by the queue + Status TaskStatusStructure `json:"status"` + + // Subset of a task definition containing values that are useful for determining + // whether a message is interesting to the receiver. Where the full task + // definition is required, the receiver should call queue.task to download that + // definition. + Task Var `json:"task,omitempty"` + + // Message version + // + // Possible values: + // * 1 + Version int64 `json:"version"` + + // Identifier for the worker-group within which this run ran. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerGroup string `json:"workerGroup"` + + // Identifier for the worker that executed this run. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerID string `json:"workerId"` + } + + // Message reporting that a task has been defined. The task may or may not be + // _scheduled_ too. + TaskDefinedMessage struct { + + // A representation of **task status** as known by the queue + Status TaskStatusStructure `json:"status"` + + // Subset of a task definition containing values that are useful for determining + // whether a message is interesting to the receiver. Where the full task + // definition is required, the receiver should call queue.task to download that + // definition. + Task Var `json:"task,omitempty"` + + // Message version + // + // Possible values: + // * 1 + Version int64 `json:"version"` + } + + // Message reporting that Taskcluster have failed to run a task. + TaskExceptionMessage struct { + + // Id of the last run for the task, not provided if `deadline` + // was exceeded before a run was started. + // + // Mininum: 0 + // Maximum: 1000 + RunID int64 `json:"runId,omitempty"` + + // A representation of **task status** as known by the queue + Status TaskStatusStructure `json:"status"` + + // Subset of a task definition containing values that are useful for determining + // whether a message is interesting to the receiver. Where the full task + // definition is required, the receiver should call queue.task to download that + // definition. + Task Var `json:"task,omitempty"` + + // Message version + // + // Possible values: + // * 1 + Version int64 `json:"version"` + + // Identifier for the worker-group within which the last attempt of the task + // ran. Not provided, if `deadline` was exceeded before a run was started. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerGroup string `json:"workerGroup,omitempty"` + + // Identifier for the last worker that failed to report, causing the task + // to fail. Not provided, if `deadline` was exceeded before a run + // was started. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerID string `json:"workerId,omitempty"` + } + + // Message reporting that a task failed to complete successfully. + TaskFailedMessage struct { + + // Id of the run that failed. + // + // Mininum: 0 + // Maximum: 1000 + RunID int64 `json:"runId"` + + // A representation of **task status** as known by the queue + Status TaskStatusStructure `json:"status"` + + // Subset of a task definition containing values that are useful for determining + // whether a message is interesting to the receiver. Where the full task + // definition is required, the receiver should call queue.task to download that + // definition. + Task Var `json:"task,omitempty"` + + // Message version + // + // Possible values: + // * 1 + Version int64 `json:"version"` + + // Identifier for the worker-group within which this run ran. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerGroup string `json:"workerGroup"` + + // Identifier for the worker that executed this run. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerID string `json:"workerId"` + } + + // Message written once a task group has no tasks to be run. It is + // possible for a task group to later have another task added, in which + // case this message will be sent again once it finishes. + TaskGroupResolvedMessage struct { + + // Identifier for the scheduler that created this task-group. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + SchedulerID string `json:"schedulerId"` + + // Identifier for the task-group being listed. + // + // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ + TaskGroupID string `json:"taskGroupId"` + + // Message version + // + // Possible values: + // * 1 + Version int64 `json:"version,omitempty"` + } + + // Required task metadata + TaskMetadata struct { + + // Human readable description of the task, please **explain** what the + // task does. A few lines of documentation is not going to hurt you. + // + // Max length: 32768 + Description string `json:"description"` + + // Human readable name of task, used to very briefly given an idea about + // what the task does. + // + // Max length: 255 + Name string `json:"name"` + + // E-mail of person who caused this task, e.g. the person who did + // `hg push`. The person we should contact to ask why this task is here. + // + // Max length: 255 + Owner string `json:"owner"` + + // Link to source of this task, should specify a file, revision and + // repository. This should be place someone can go an do a git/hg blame + // to who came up with recipe for this task. + // + // Syntax: ^https?:// + // Max length: 4096 + Source string `json:"source"` + } + + // Message reporting that a task is now pending + TaskPendingMessage struct { + + // Id of run that became pending, `run-id`s always starts from 0 + // + // Mininum: 0 + // Maximum: 1000 + RunID int64 `json:"runId"` + + // A representation of **task status** as known by the queue + Status TaskStatusStructure `json:"status"` + + // Subset of a task definition + Task Task `json:"task,omitempty"` + + // Message version + // + // Possible values: + // * 1 + Version int64 `json:"version"` + } + + // Message reporting that a given run of a task have started + TaskRunningMessage struct { + + // Id of the run that just started, always starts from 0 + // + // Mininum: 0 + // Maximum: 1000 + RunID int64 `json:"runId"` + + // A representation of **task status** as known by the queue + Status TaskStatusStructure `json:"status"` + + // Time at which the run expires and is resolved as `failed`, if the run + // isn't reclaimed. + TakenUntil tcclient.Time `json:"takenUntil"` + + // Message version + // + // Possible values: + // * 1 + Version int64 `json:"version"` + + // Identifier for the worker-group within which this run started. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerGroup string `json:"workerGroup"` + + // Identifier for the worker executing this run. + // + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + WorkerID string `json:"workerId"` + } + + // A representation of **task status** as known by the queue + TaskStatusStructure struct { + + // Deadline of the task, `pending` and `running` runs are + // resolved as **exception** if not resolved by other means + // before the deadline. Note, deadline cannot be more than + // 5 days into the future + Deadline tcclient.Time `json:"deadline"` + + // Task expiration, time at which task definition and + // status is deleted. Notice that all artifacts for the task + // must have an expiration that is no later than this. + Expires tcclient.Time `json:"expires"` + + // Unique identifier for a provisioner, that can supply specified + // `workerType` + // + // Syntax: ^[a-zA-Z0-9-_]{1,38}$ + ProvisionerID string `json:"provisionerId"` + + // Number of retries left for the task in case of infrastructure issues + // + // Mininum: 0 + // Maximum: 999 + RetriesLeft int64 `json:"retriesLeft"` + + // List of runs, ordered so that index `i` has `runId == i` + Runs []RunInformation `json:"runs"` + + // All tasks in a task group must have the same `schedulerId`. This is used for several purposes: + // + // * it can represent the entity that created the task; + // * it can limit addition of new tasks to a task group: the caller of + // `createTask` must have a scope related to the `schedulerId` of the task + // group; + // * it controls who can manipulate tasks, again by requiring + // `schedulerId`-related scopes; and + // * it appears in the routing key for Pulse messages about the task. + // + // Default: "-" + // Syntax: ^([a-zA-Z0-9-_]*)$ + // Min length: 1 + // Max length: 38 + SchedulerID string `json:"schedulerId"` + + // State of this task. This is just an auxiliary property derived from state + // of latests run, or `unscheduled` if none. + // + // Possible values: + // * "unscheduled" + // * "pending" + // * "running" + // * "completed" + // * "failed" + // * "exception" + State string `json:"state"` + + // Identifier for a group of tasks scheduled together with this task. + // Generally, all tasks related to a single event such as a version-control + // push or a nightly build have the same `taskGroupId`. This property + // defaults to `taskId` if it isn't specified. Tasks with `taskId` equal to + // the `taskGroupId` are, [by convention](/docs/manual/using/task-graph), + // decision tasks. + // + // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ + TaskGroupID string `json:"taskGroupId"` + + // Unique task identifier, this is UUID encoded as + // [URL-safe base64](http://tools.ietf.org/html/rfc4648#section-5) and + // stripped of `=` padding. + // + // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ + TaskID string `json:"taskId"` + + // Unique identifier for a worker-type within a specific provisioner + // + // Syntax: ^[a-z]([-a-z0-9]{0,36}[a-z0-9])?$ + WorkerType string `json:"workerType"` + } + + // Subset of a task definition containing values that are useful for determining + // whether a message is interesting to the receiver. Where the full task + // definition is required, the receiver should call queue.task to download that + // definition. + Var struct { + + // Arbitrary key-value tags (only strings limited to 4k). These can be used + // to attach informal metadata to a task. Use this for informal tags that + // tasks can be classified by. You can also think of strings here as + // candidates for formal metadata. Something like + // `purpose: 'build' || 'test'` is a good example. + // + // Default: {} + // + // Map entries: + // Max length: 4096 + Tags map[string]string `json:"tags"` + } +) diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/testcases.json b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/testcases.json new file mode 100644 index 0000000..dd8b631 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/testcases.json @@ -0,0 +1,102 @@ +[ + { + "description": "Unnamed Temp Creds with no Authorized Scopes", + "expectedTempCreds": { + "accessToken": "R4OVHWpIvy6KsqS4AWE51QwbvgLvsstS6e6UW8IfHUY", + "authorizedScopes": null, + "certificate": "{\"version\":1,\"scopes\":[\"scope/asd:fhjdf/X\",\"scope/asd:fhjdf/XYZ*\"],\"start\":1438964811744,\"expiry\":1439051211744,\"seed\":\"JYR4wzMCTG6XeDS2cDUCMwH0RFUXGfQjK7LgqD-e6lSQ\",\"signature\":\"45AlB/hKZZZz4Tf3NaidutasfgBnr4t2AxwBiGDQF9Q=\"}", + "clientId": "def/ghi@XXX" + }, + "expiry": "2015-08-08T16:26:51.744Z", + "permCreds": { + "accessToken": "tokenABCDEFGH", + "authorizedScopes": null, + "certificate": "", + "clientId": "def/ghi@XXX" + }, + "seed": "JYR4wzMCTG6XeDS2cDUCMwH0RFUXGfQjK7LgqD-e6lSQ", + "start": "2015-08-07T16:26:51.744Z", + "tempCredsName": "", + "tempCredsScopes": [ + "scope/asd:fhjdf/X", + "scope/asd:fhjdf/XYZ*" + ] + }, + { + "description": "Named Temp Creds with no Authorized Scopes", + "expectedTempCreds": { + "accessToken": "R4OVHWpIvy6KsqS4AWE51QwbvgLvsstS6e6UW8IfHUY", + "authorizedScopes": null, + "certificate": "{\"version\":1,\"scopes\":[\"scope/asd:fhjdf/X\",\"scope/asd:fhjdf/XYZ*\"],\"start\":1438964811744,\"expiry\":1439051211744,\"seed\":\"JYR4wzMCTG6XeDS2cDUCMwH0RFUXGfQjK7LgqD-e6lSQ\",\"signature\":\"nNEaLtZMiw627NuDbF5Z8HDFc57MGWCptXBQSYNFgBk=\",\"issuer\":\"def/ghi@XXX\"}", + "clientId": "abc/def/ghi" + }, + "expiry": "2015-08-08T16:26:51.744Z", + "permCreds": { + "accessToken": "tokenABCDEFGH", + "authorizedScopes": null, + "certificate": "", + "clientId": "def/ghi@XXX" + }, + "seed": "JYR4wzMCTG6XeDS2cDUCMwH0RFUXGfQjK7LgqD-e6lSQ", + "start": "2015-08-07T16:26:51.744Z", + "tempCredsName": "abc/def/ghi", + "tempCredsScopes": [ + "scope/asd:fhjdf/X", + "scope/asd:fhjdf/XYZ*" + ] + }, + { + "description": "Unnamed Temp Creds with Authorized Scopes", + "expectedTempCreds": { + "accessToken": "R4OVHWpIvy6KsqS4AWE51QwbvgLvsstS6e6UW8IfHUY", + "authorizedScopes": [ + "scope/asd:fhjdf/X" + ], + "certificate": "{\"version\":1,\"scopes\":[\"scope/asd:fhjdf/X\",\"scope/asd:fhjdf/XYZ*\"],\"start\":1438964811744,\"expiry\":1439051211744,\"seed\":\"JYR4wzMCTG6XeDS2cDUCMwH0RFUXGfQjK7LgqD-e6lSQ\",\"signature\":\"45AlB/hKZZZz4Tf3NaidutasfgBnr4t2AxwBiGDQF9Q=\"}", + "clientId": "def/ghi@XXX" + }, + "expiry": "2015-08-08T16:26:51.744Z", + "permCreds": { + "accessToken": "tokenABCDEFGH", + "authorizedScopes": [ + "scope/asd:fhjdf/X" + ], + "certificate": "", + "clientId": "def/ghi@XXX" + }, + "seed": "JYR4wzMCTG6XeDS2cDUCMwH0RFUXGfQjK7LgqD-e6lSQ", + "start": "2015-08-07T16:26:51.744Z", + "tempCredsName": "", + "tempCredsScopes": [ + "scope/asd:fhjdf/X", + "scope/asd:fhjdf/XYZ*" + ] + }, + { + "description": "Named Temp Creds with Authorized Scopes", + "expectedTempCreds": { + "accessToken": "R4OVHWpIvy6KsqS4AWE51QwbvgLvsstS6e6UW8IfHUY", + "authorizedScopes": [ + "scope/asd:fhjdf/X" + ], + "certificate": "{\"version\":1,\"scopes\":[\"scope/asd:fhjdf/X\",\"scope/asd:fhjdf/XYZ*\"],\"start\":1438964811744,\"expiry\":1439051211744,\"seed\":\"JYR4wzMCTG6XeDS2cDUCMwH0RFUXGfQjK7LgqD-e6lSQ\",\"signature\":\"nNEaLtZMiw627NuDbF5Z8HDFc57MGWCptXBQSYNFgBk=\",\"issuer\":\"def/ghi@XXX\"}", + "clientId": "abc/def/ghi" + }, + "expiry": "2015-08-08T16:26:51.744Z", + "permCreds": { + "accessToken": "tokenABCDEFGH", + "authorizedScopes": [ + "scope/asd:fhjdf/X" + ], + "certificate": "", + "clientId": "def/ghi@XXX" + }, + "seed": "JYR4wzMCTG6XeDS2cDUCMwH0RFUXGfQjK7LgqD-e6lSQ", + "start": "2015-08-07T16:26:51.744Z", + "tempCredsName": "abc/def/ghi", + "tempCredsScopes": [ + "scope/asd:fhjdf/X", + "scope/asd:fhjdf/XYZ*" + ] + } +] diff --git a/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/time.go b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/time.go new file mode 100644 index 0000000..fb59f21 --- /dev/null +++ b/vendor/github.com/taskcluster/taskcluster/clients/client-go/v23/time.go @@ -0,0 +1,42 @@ +package tcclient + +import ( + "errors" + "time" +) + +// Time wraps time.Time in order that json serialisation/deserialisation can be +// adapted. Marshaling time.Time types results in RFC3339 dates with nanosecond +// precision in the user's timezone. In order that the json date representation +// is consistent between what we send in json payloads, and what taskcluster +// services return, we wrap time.Time into type tcclient.Time which marshals +// instead to the same format used by the Taskcluster services; UTC based, with +// millisecond precision, using 'Z' timezone, e.g. 2015-10-27T20:36:19.255Z. +type Time time.Time + +// MarshalJSON implements the json.Marshaler interface. +// The time is a quoted string in RFC 3339 format, with sub-second precision added if present. +func (t Time) MarshalJSON() ([]byte, error) { + if y := time.Time(t).Year(); y < 0 || y >= 10000 { + // RFC 3339 is clear that years are 4 digits exactly. + // See golang.org/issue/4556#c15 for more discussion. + return nil, errors.New("queue.Time.MarshalJSON: year outside of range [0,9999]") + } + return []byte(`"` + t.String() + `"`), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// The time is expected to be a quoted string in RFC 3339 format. +func (t *Time) UnmarshalJSON(data []byte) (err error) { + // Fractional seconds are handled implicitly by Parse. + x := new(time.Time) + *x, err = time.Parse(`"`+time.RFC3339+`"`, string(data)) + *t = Time(*x) + return +} + +// Returns the Time in canonical RFC3339 representation, e.g. +// 2015-10-27T20:36:19.255Z +func (t Time) String() string { + return time.Time(t).UTC().Format("2006-01-02T15:04:05.000Z") +} diff --git a/vendor/github.com/tent/hawk-go/.gitignore b/vendor/github.com/tent/hawk-go/.gitignore new file mode 100644 index 0000000..9ed3b07 --- /dev/null +++ b/vendor/github.com/tent/hawk-go/.gitignore @@ -0,0 +1 @@ +*.test diff --git a/vendor/github.com/tent/hawk-go/.travis.yml b/vendor/github.com/tent/hawk-go/.travis.yml new file mode 100644 index 0000000..3438247 --- /dev/null +++ b/vendor/github.com/tent/hawk-go/.travis.yml @@ -0,0 +1,6 @@ +language: go +go: + - 1.3 + - tip +before_install: + - go get launchpad.net/gocheck diff --git a/vendor/github.com/tent/hawk-go/LICENSE b/vendor/github.com/tent/hawk-go/LICENSE new file mode 100644 index 0000000..88dcd4a --- /dev/null +++ b/vendor/github.com/tent/hawk-go/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 Tent.is, LLC. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Tent.is, LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/tent/hawk-go/README.md b/vendor/github.com/tent/hawk-go/README.md new file mode 100644 index 0000000..e3a333f --- /dev/null +++ b/vendor/github.com/tent/hawk-go/README.md @@ -0,0 +1,12 @@ +# hawk-go [![Build Status](https://travis-ci.org/tent/hawk-go.png?branch=master)](https://travis-ci.org/tent/hawk-go) + +hawk-go implements the [Hawk](https://github.com/hueniverse/hawk) HTTP +authentication scheme in Go. + +[**Documentation**](http://godoc.org/github.com/tent/hawk-go) + +## Installation + +```text +go get github.com/tent/hawk-go +``` diff --git a/vendor/github.com/tent/hawk-go/hawk.go b/vendor/github.com/tent/hawk-go/hawk.go new file mode 100644 index 0000000..a17d9c0 --- /dev/null +++ b/vendor/github.com/tent/hawk-go/hawk.go @@ -0,0 +1,691 @@ +// Package hawk implements the Hawk HTTP authentication scheme. +package hawk + +import ( + "bytes" + "crypto/hmac" + "crypto/rand" + "encoding/base64" + "fmt" + "hash" + "io" + "net" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" + "time" +) + +// Now is a func() time.Time that is used by the package to get the current time. +var Now = time.Now + +// MaxTimestampSkew is the maximum ±skew that a request timestamp can have without returning ErrTimestampSkew. +var MaxTimestampSkew = time.Minute + +var ( + ErrNoAuth = AuthError("no Authorization header or bewit parameter found") + ErrReplay = AuthError("request nonce is being replayed") + ErrInvalidMAC = AuthError("invalid MAC") + ErrBewitExpired = AuthError("bewit expired") + ErrTimestampSkew = AuthError("timestamp skew too high") + ErrMissingServerAuth = AuthError("missing Server-Authentication header") + ErrInvalidBewitMethod = AuthError("bewit only allows HEAD and GET requests") +) + +type AuthError string + +func (e AuthError) Error() string { return "hawk: " + string(e) } + +type CredentialErrorType int + +const ( + UnknownID CredentialErrorType = iota + UnknownApp + IDAppMismatch +) + +func (t CredentialErrorType) String() string { + switch t { + case UnknownApp: + return "unknown app" + case IDAppMismatch: + return "id/app mismatch" + } + return "unknown id" +} + +// CredentialError is returned by a CredentialsLookupFunc when the provided credentials +// ID is invalid. +type CredentialError struct { + Type CredentialErrorType + Credentials *Credentials +} + +func (e *CredentialError) Error() string { + return fmt.Sprintf("hawk: credential error with id %s and app %s: %s", e.Credentials.ID, e.Credentials.App, e.Type) +} + +type Credentials struct { + ID string + Key string + Hash func() hash.Hash + + // Data may be set in a CredentialsLookupFunc to correlate the credentials + // with an internal data record. + Data interface{} + + App string + Delegate string +} + +func (creds *Credentials) MAC() hash.Hash { return hmac.New(creds.Hash, []byte(creds.Key)) } + +type AuthType int + +const ( + AuthHeader AuthType = iota + AuthResponse + AuthBewit +) + +func (a AuthType) String() string { + switch a { + case AuthResponse: + return "response" + case AuthBewit: + return "bewit" + default: + return "header" + } +} + +// A CredentialsLookupFunc is called by NewAuthFromRequest after parsing the +// request auth. The Credentials will never be nil and ID will always be set. +// App and Delegate will be set if provided in the request. This function must +// look up the corresponding Key and Hash and set them on the provided +// Credentials. If the Key/Hash are found and the App/Delegate are valid (if +// provided) the error should be nil. If the Key or App could not be found or +// the App does not match the ID, then a CredentialError must be returned. +// Errors will propagate to the caller of NewAuthFromRequest, so internal errors +// may be returned. +type CredentialsLookupFunc func(*Credentials) error + +// A NonceCheckFunc is called by NewAuthFromRequest and should make sure that +// the provided nonce is unique within the context of the provided time.Time and +// Credentials. It should return false if the nonce is being replayed. +type NonceCheckFunc func(string, time.Time, *Credentials) bool + +type AuthFormatError struct { + Field string + Err string +} + +func (e AuthFormatError) Error() string { return "hawk: invalid " + e.Field + ", " + e.Err } + +// ParseRequestHeader parses a Hawk header (provided in the Authorization +// HTTP header) and populates an Auth. If an error is returned it will always be +// of type AuthFormatError. +func ParseRequestHeader(header string) (*Auth, error) { + auth := &Auth{ActualTimestamp: Now()} + err := auth.ParseHeader(header, AuthHeader) + if err != nil { + return nil, err + } + + if auth.Credentials.ID == "" { + return nil, AuthFormatError{"id", "missing or empty"} + } + if auth.Timestamp.IsZero() { + return nil, AuthFormatError{"ts", "missing, empty, or zero"} + } + if auth.Nonce == "" { + return nil, AuthFormatError{"nonce", "missing or empty"} + } + auth.ReqHash = true + + return auth, nil +} + +// ParseBewit parses a bewit token provided in a URL parameter and populates an +// Auth. If an error is returned it will always be of type AuthFormatError. +func ParseBewit(bewit string) (*Auth, error) { + if len(bewit)%4 != 0 { + bewit += strings.Repeat("=", 4-len(bewit)%4) + } + decoded, err := base64.URLEncoding.DecodeString(bewit) + if err != nil { + return nil, AuthFormatError{"bewit", "malformed base64 encoding"} + } + components := bytes.SplitN(decoded, []byte(`\`), 4) + if len(components) != 4 { + return nil, AuthFormatError{"bewit", "missing components"} + } + + auth := &Auth{ + Credentials: Credentials{ID: string(components[0])}, + Ext: string(components[3]), + Method: "GET", + ActualTimestamp: Now(), + IsBewit: true, + } + + ts, err := strconv.ParseInt(string(components[1]), 10, 64) + if err != nil { + return nil, AuthFormatError{"ts", "not an integer"} + } + auth.Timestamp = time.Unix(ts, 0) + + auth.MAC = make([]byte, base64.StdEncoding.DecodedLen(len(components[2]))) + n, err := base64.StdEncoding.Decode(auth.MAC, components[2]) + if err != nil { + return nil, AuthFormatError{"mac", "malformed base64 encoding"} + } + auth.MAC = auth.MAC[:n] + + return auth, nil +} + +// NewAuthFromRequest parses a request containing an Authorization header or +// bewit parameter and populates an Auth. If creds is not nil it will be called +// to look up the associated credentials. If nonce is not nil it will be called +// to make sure the nonce is not replayed. +// +// If the request does not contain a bewit or Authorization header, ErrNoAuth is +// returned. If the request contains a bewit and it is not a GET or HEAD +// request, ErrInvalidBewitMethod is returned. If there is an error parsing the +// provided auth details, an AuthFormatError will be returned. If creds returns +// an error, it will be returned. If nonce returns false, ErrReplay will be +// returned. +func NewAuthFromRequest(req *http.Request, creds CredentialsLookupFunc, nonce NonceCheckFunc) (*Auth, error) { + header := req.Header.Get("Authorization") + bewit := req.URL.Query().Get("bewit") + + var auth *Auth + var err error + if header != "" { + auth, err = ParseRequestHeader(header) + if err != nil { + return nil, err + } + } + if auth == nil && bewit != "" { + if req.Method != "GET" && req.Method != "HEAD" { + return nil, ErrInvalidBewitMethod + } + auth, err = ParseBewit(bewit) + if err != nil { + return nil, err + } + } + if auth == nil { + return nil, ErrNoAuth + } + + auth.Method = req.Method + auth.RequestURI = req.URL.Path + if req.URL.RawQuery != "" { + auth.RequestURI += "?" + req.URL.RawQuery + } + if bewit != "" { + auth.Method = "GET" + bewitPattern, _ := regexp.Compile(`\?bewit=` + bewit + `\z|bewit=` + bewit + `&|&bewit=` + bewit + `\z`) + auth.RequestURI = bewitPattern.ReplaceAllString(auth.RequestURI, "") + } + auth.Host, auth.Port = extractReqHostPort(req) + if creds != nil { + err = creds(&auth.Credentials) + if err != nil { + return nil, err + } + } + if nonce != nil && !auth.IsBewit && !nonce(auth.Nonce, auth.Timestamp, &auth.Credentials) { + return nil, ErrReplay + } + return auth, nil +} + +func extractReqHostPort(req *http.Request) (host string, port string) { + if idx := strings.Index(req.Host, ":"); idx != -1 { + host, port, _ = net.SplitHostPort(req.Host) + } else { + host = req.Host + } + if req.URL.Host != "" { + if idx := strings.Index(req.Host, ":"); idx != -1 { + host, port, _ = net.SplitHostPort(req.Host) + } else { + host = req.URL.Host + } + } + if port == "" { + if req.URL.Scheme == "http" { + port = "80" + } else { + port = "443" + } + } + return +} + +// NewRequestAuth builds a client Auth based on req and creds. tsOffset will be +// applied to Now when setting the timestamp. +func NewRequestAuth(req *http.Request, creds *Credentials, tsOffset time.Duration) *Auth { + auth := &Auth{ + Method: req.Method, + Credentials: *creds, + Timestamp: Now().Add(tsOffset), + Nonce: nonce(), + RequestURI: req.URL.RequestURI(), + } + auth.Host, auth.Port = extractReqHostPort(req) + return auth +} + +// NewRequestAuth builds a client Auth based on uri and creds. tsOffset will be +// applied to Now when setting the timestamp. +func NewURLAuth(uri string, creds *Credentials, tsOffset time.Duration) (*Auth, error) { + u, err := url.Parse(uri) + if err != nil { + return nil, err + } + auth := &Auth{ + Method: "GET", + Credentials: *creds, + Timestamp: Now().Add(tsOffset), + } + if u.Path != "" { + // url.Parse unescapes the path, which is unexpected + auth.RequestURI = "/" + strings.SplitN(uri[8:], "/", 2)[1] + } else { + auth.RequestURI = "/" + } + auth.Host, auth.Port = extractURLHostPort(u) + return auth, nil +} + +func extractURLHostPort(u *url.URL) (host string, port string) { + if idx := strings.Index(u.Host, ":"); idx != -1 { + host, port, _ = net.SplitHostPort(u.Host) + } else { + host = u.Host + } + if port == "" { + if u.Scheme == "http" { + port = "80" + } else { + port = "443" + } + } + return +} + +func nonce() string { + b := make([]byte, 8) + _, err := io.ReadFull(rand.Reader, b) + if err != nil { + panic(err) + } + return base64.StdEncoding.EncodeToString(b)[:8] +} + +const headerVersion = "1" + +type Auth struct { + Credentials Credentials + + Method string + RequestURI string + Host string + Port string + + MAC []byte + Nonce string + Ext string + Hash []byte + + // ReqHash is true if the request contained a hash + ReqHash bool + IsBewit bool + Timestamp time.Time + + // ActualTimestamp is when the request was received + ActualTimestamp time.Time +} + +// field is of form: key="value" +func lexField(r *strings.Reader) (string, string, error) { + key := make([]byte, 0, 5) + val := make([]byte, 0, 32) + + // read the key + for { + ch, _ := r.ReadByte() + if ch == '=' { + break + } + if ch < 'a' || ch > 'z' { // fail if not ASCII lowercase letter + return "", "", AuthFormatError{"header", "cannot parse header field"} + } + key = append(key, ch) + } + if ch, _ := r.ReadByte(); ch != '"' { + return "", "", AuthFormatError{string(key), "cannot parse value"} + } + // read the value + for { + ch, _ := r.ReadByte() + if ch == '"' { + break + } + // character class is ASCII printable [\x20-\x7E] without \ and " + if ch < 0x20 || ch > 0x7E || ch == '\\' { + return "", "", AuthFormatError{string(key), "cannot parse value"} + } + val = append(val, ch) + } + + return string(key), string(val), nil +} + +func lexHeader(header string) (map[string]string, error) { + params := make(map[string]string, 8) + + r := strings.NewReader(header) + + for { + ch, eof := r.ReadByte() + if eof != nil { + break + } + + switch { + case ch == ' ' || ch == '\t' || ch == ',': //ignore spaces and commas + case ch >= 'a' && ch <= 'z': //beginning of key/value pair like 'id="abcdefg"' + r.UnreadByte() + key, val, err := lexField(r) + if err != nil { + return params, err + } + params[key] = val + default: //invalid character encountered + return params, AuthFormatError{"header", "cannot parse header"} + } + } + return params, nil +} + +// ParseHeader parses a Hawk request or response header and populates auth. +// t must be AuthHeader if the header is an Authorization header from a request +// or AuthResponse if the header is a Server-Authorization header from +// a response. +func (auth *Auth) ParseHeader(header string, t AuthType) error { + if len(header) < 4 || strings.ToLower(header[:4]) != "hawk" { + return AuthFormatError{"scheme", "must be Hawk"} + } + + fields, err := lexHeader(header[4:]) + if err != nil { + return err + } + + if hash, ok := fields["hash"]; ok { + auth.Hash, err = base64.StdEncoding.DecodeString(hash) + if err != nil { + return AuthFormatError{"hash", "malformed base64 encoding"} + } + } + auth.Ext = fields["ext"] + + mac := fields["mac"] + if mac == "" { + return AuthFormatError{"mac", "missing or empty"} + } + auth.MAC, err = base64.StdEncoding.DecodeString(mac) + if err != nil { + return AuthFormatError{"mac", "malformed base64 encoding"} + } + if t == AuthHeader { + auth.Credentials.App = fields["app"] + auth.Credentials.Delegate = fields["dlg"] + auth.Credentials.ID = fields["id"] + + if ts, ok := fields["ts"]; ok { + tsint, err := strconv.ParseInt(ts, 10, 64) + if err != nil { + return AuthFormatError{"ts", "not an integer"} + } + auth.Timestamp = time.Unix(tsint, 0) + } + auth.Nonce = fields["nonce"] + } + return nil +} + +// Valid confirms that the timestamp is within skew and verifies the MAC. +// +// If the request is valid, nil will be returned. If auth is a bewit and the +// method is not GET or HEAD, ErrInvalidBewitMethod will be returned. If auth is +// a bewit and the timestamp is after the the specified expiry, ErrBewitExpired +// will be returned. If auth is from a request header and the timestamp is +// outside the maximum skew, ErrTimestampSkew will be returned. If the MAC is +// not the expected value, ErrInvalidMAC will be returned. +func (auth *Auth) Valid() error { + t := AuthHeader + if auth.IsBewit { + t = AuthBewit + if auth.Method != "GET" && auth.Method != "HEAD" { + return ErrInvalidBewitMethod + } + if auth.ActualTimestamp.After(auth.Timestamp) { + return ErrBewitExpired + } + } else { + skew := auth.ActualTimestamp.Sub(auth.Timestamp) + if abs(skew) > MaxTimestampSkew { + return ErrTimestampSkew + } + } + if !hmac.Equal(auth.mac(t), auth.MAC) { + if auth.IsBewit && strings.HasPrefix(auth.RequestURI, "http") && len(auth.RequestURI) > 9 { + // try just the path + uri := auth.RequestURI + auth.RequestURI = "/" + strings.SplitN(auth.RequestURI[8:], "/", 2)[1] + if auth.Valid() == nil { + return nil + } + auth.RequestURI = uri + } + return ErrInvalidMAC + } + return nil +} + +func abs(d time.Duration) time.Duration { + if d < 0 { + return -d + } + return d +} + +// ValidResponse checks that a response Server-Authorization header is correct. +// +// ErrMissingServerAuth is returned if header is an empty string. ErrInvalidMAC +// is returned if the MAC is not the expected value. +func (auth *Auth) ValidResponse(header string) error { + if header == "" { + return ErrMissingServerAuth + } + err := auth.ParseHeader(header, AuthResponse) + if err != nil { + return err + } + if !hmac.Equal(auth.mac(AuthResponse), auth.MAC) { + return ErrInvalidMAC + } + return nil +} + +// PayloadHash initializes a hash for body validation. To validate a request or +// response body, call PayloadHash with contentType set to the body Content-Type +// with all parameters and prefix/suffix whitespace stripped, write the entire +// body to the returned hash, and then validate the hash with ValidHash. +func (auth *Auth) PayloadHash(contentType string) hash.Hash { + h := auth.Credentials.Hash() + h.Write([]byte("hawk." + headerVersion + ".payload\n" + contentType + "\n")) + return h +} + +// ValidHash writes the final newline to h and checks if it matches auth.Hash. +func (auth *Auth) ValidHash(h hash.Hash) bool { + h.Write([]byte("\n")) + return bytes.Equal(h.Sum(nil), auth.Hash) +} + +// SetHash writes the final newline to h and sets auth.Hash to the sum. This is +// used to specify a response payload hash. +func (auth *Auth) SetHash(h hash.Hash) { + h.Write([]byte("\n")) + auth.Hash = h.Sum(nil) + auth.ReqHash = false +} + +// ResponseHeader builds a response header based on the auth and provided ext, +// which may be an empty string. Use PayloadHash and SetHash before +// ResponseHeader to include a hash of the response payload. +func (auth *Auth) ResponseHeader(ext string) string { + auth.Ext = ext + if auth.ReqHash { + auth.Hash = nil + } + + h := `Hawk mac="` + base64.StdEncoding.EncodeToString(auth.mac(AuthResponse)) + `"` + if auth.Ext != "" { + h += `, ext="` + auth.Ext + `"` + } + if auth.Hash != nil { + h += `, hash="` + base64.StdEncoding.EncodeToString(auth.Hash) + `"` + } + + return h +} + +// RequestHeader builds a request header based on the auth. +func (auth *Auth) RequestHeader() string { + auth.MAC = auth.mac(AuthHeader) + + h := `Hawk id="` + auth.Credentials.ID + + `", mac="` + base64.StdEncoding.EncodeToString(auth.MAC) + + `", ts="` + strconv.FormatInt(auth.Timestamp.Unix(), 10) + + `", nonce="` + auth.Nonce + `"` + + if len(auth.Hash) > 0 { + h += `, hash="` + base64.StdEncoding.EncodeToString(auth.Hash) + `"` + } + if auth.Ext != "" { + h += `, ext="` + auth.Ext + `"` + } + if auth.Credentials.App != "" { + h += `, app="` + auth.Credentials.App + `"` + } + if auth.Credentials.Delegate != "" { + h += `, dlg="` + auth.Credentials.Delegate + `"` + } + + return h +} + +// Bewit creates and encoded request bewit parameter based on the auth. +func (auth *Auth) Bewit() string { + auth.Method = "GET" + auth.Nonce = "" + return strings.TrimRight(base64.URLEncoding.EncodeToString([]byte(auth.Credentials.ID+`\`+ + strconv.FormatInt(auth.Timestamp.Unix(), 10)+`\`+ + base64.StdEncoding.EncodeToString(auth.mac(AuthBewit))+`\`+ + auth.Ext)), "=") +} + +// NormalizedString builds the string that will be HMACed to create a request +// MAC. +func (auth *Auth) NormalizedString(t AuthType) string { + str := "hawk." + headerVersion + "." + t.String() + "\n" + + strconv.FormatInt(auth.Timestamp.Unix(), 10) + "\n" + + auth.Nonce + "\n" + + auth.Method + "\n" + + auth.RequestURI + "\n" + + auth.Host + "\n" + + auth.Port + "\n" + + base64.StdEncoding.EncodeToString(auth.Hash) + "\n" + + auth.Ext + "\n" + + if auth.Credentials.App != "" { + str += auth.Credentials.App + "\n" + str += auth.Credentials.Delegate + "\n" + } + + return str +} + +func (auth *Auth) mac(t AuthType) []byte { + mac := auth.Credentials.MAC() + mac.Write([]byte(auth.NormalizedString(t))) + return mac.Sum(nil) +} + +func (auth *Auth) tsMac(ts string) []byte { + mac := auth.Credentials.MAC() + mac.Write([]byte("hawk." + headerVersion + ".ts\n" + ts + "\n")) + return mac.Sum(nil) +} + +// StaleTimestampHeader builds a signed WWW-Authenticate response header for use +// when Valid returns ErrTimestampSkew. +func (auth *Auth) StaleTimestampHeader() string { + ts := strconv.FormatInt(Now().Unix(), 10) + return `Hawk ts="` + ts + + `", tsm="` + base64.StdEncoding.EncodeToString(auth.tsMac(ts)) + + `", error="Stale timestamp"` +} + +var tsHeaderRegex = regexp.MustCompile(`(ts|tsm|error)="([ !#-\[\]-~]+)"`) // character class is ASCII printable [\x20-\x7E] without \ and " + +// UpdateOffset parses a signed WWW-Authenticate response header containing +// a stale timestamp error and updates auth.Timestamp with an adjusted +// timestamp. +func (auth *Auth) UpdateOffset(header string) (time.Duration, error) { + if len(header) < 4 || strings.ToLower(header[:4]) != "hawk" { + return 0, AuthFormatError{"scheme", "must be Hawk"} + } + + matches := tsHeaderRegex.FindAllStringSubmatch(header, 3) + + var err error + var ts time.Time + var tsm []byte + + for _, match := range matches { + switch match[1] { + case "ts": + t, err := strconv.ParseInt(match[2], 10, 64) + if err != nil { + return 0, AuthFormatError{"ts", "not an integer"} + } + ts = time.Unix(t, 0) + case "tsm": + tsm, err = base64.StdEncoding.DecodeString(match[2]) + if err != nil { + return 0, AuthFormatError{"tsm", "malformed base64 encoding"} + } + } + } + + if !hmac.Equal(tsm, auth.tsMac(strconv.FormatInt(ts.Unix(), 10))) { + return 0, ErrInvalidMAC + } + + offset := ts.Sub(Now()) + auth.Timestamp = ts + auth.Nonce = nonce() + return offset, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 417ae4a..8e6cb53 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,10 +1,13 @@ +# github.com/cenkalti/backoff/v3 v3.0.0 +github.com/cenkalti/backoff/v3 # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew +# github.com/fatih/camelcase v1.0.0 +github.com/fatih/camelcase # github.com/google/uuid v1.0.0 github.com/google/uuid # github.com/pborman/uuid v1.2.0 -## explicit github.com/pborman/uuid # github.com/pmezard/go-difflib v1.0.0 ## explicit @@ -15,12 +18,29 @@ github.com/streadway/amqp # github.com/stretchr/testify v1.1.4-0.20160524234229-8d64eb7173c7 ## explicit github.com/stretchr/testify/assert +# github.com/taskcluster/httpbackoff/v3 v3.0.0 +github.com/taskcluster/httpbackoff/v3 +# github.com/taskcluster/jsonschema2go v1.0.0 +github.com/taskcluster/jsonschema2go/text # github.com/taskcluster/pulse-go v1.0.0 ## explicit github.com/taskcluster/pulse-go/pulse +# github.com/taskcluster/slugid-go v1.1.0 +github.com/taskcluster/slugid-go/slugid +# github.com/taskcluster/taskcluster-lib-urls v12.0.0+incompatible +github.com/taskcluster/taskcluster-lib-urls +# github.com/taskcluster/taskcluster/clients/client-go/v23 v23.0.0 +## explicit +github.com/taskcluster/taskcluster/clients/client-go/v23 +github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueue +github.com/taskcluster/taskcluster/clients/client-go/v23/tcqueueevents +# github.com/tent/hawk-go v0.0.0-20161026210932-d341ea318957 +github.com/tent/hawk-go # github.com/urfave/cli v1.17.1-0.20160608151511-fa949b48f384 ## explicit github.com/urfave/cli # go.mozilla.org/mozlog v0.0.0-20160610165107-cd74695caf44 ## explicit go.mozilla.org/mozlog +# launchpad.net/gocheck v0.0.0-20140225173054-000000000087 +## explicit