diff --git a/Dockerfile b/Dockerfile index 210fc3ff..0235d2d0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,8 @@ FROM ubuntu -RUN apt-get update -RUN apt-get install tzdata -y +RUN if (dpkg -l | grep -cq tzdata); then \ + echo "tzdata package already installed! Skipping tzdata installation"; \ + else \ + echo "Installing tzdata to avoid go panic caused by missing timezone data"; \ + apt-get update && apt-get install tzdata -y --no-install-recommends apt-utils; \ + fi COPY kube-monkey /kube-monkey diff --git a/Makefile b/Makefile index c81e972a..7c2b4b02 100644 --- a/Makefile +++ b/Makefile @@ -3,13 +3,30 @@ all: build ENVVAR = GOOS=linux GOARCH=amd64 CGO_ENABLED=0 TAG = v0.1.0 +.PHONY: all build container clean + build: clean $(ENVVAR) go build -o kube-monkey +# Supressing docker build avoids printing the env variables container: build +ifneq ($(and $(http_proxy), $(https_proxy)),) + @echo Starting Docker build, importing both http_proxy and https_proxy env variables + @docker build --build-arg http_proxy=$(http_proxy) --build-arg https_proxy=$(https_proxy) -t kube-monkey:$(TAG) . +else +ifdef http_proxy + @echo Starting Docker build, importing http_proxy + @docker build --build-arg http_proxy=$(http_proxy) -t kube-monkey:$(TAG) . +else +ifdef https_proxy + @echo Starting Docker build, importing https_proxy + @docker build --build-arg https_proxy=$(https_proxy) -t kube-monkey:$(TAG) . +else + @echo no env proxies set, building normally docker build -t kube-monkey:$(TAG) . +endif +endif +endif clean: rm -f kube-monkey - -.PHONY: all build container clean diff --git a/README.md b/README.md index dd1e8e0c..e8d3bef1 100644 --- a/README.md +++ b/README.md @@ -94,6 +94,10 @@ in other namespaces (eg. `kube-system`). See dir [`examples/`](https://github.com/asobti/kube-monkey/tree/master/examples) for example Kubernetes yaml files. +## Logging + +kube-monkey uses glog and supports all command-line features for glog. To specify a custom v level or a custom log directory on the pod, see `args: ["-v=5", "-log_dir=/path/to/custom/log"]` in the [example deployment file](https://github.com/asobti/kube-monkey/tree/master/examples/deployment.yaml) + ## Compatibility with Kubernetes kube-monkey is built using v1.5 of [kubernetes/client-go](https://github.com/kubernetes/client-go). Refer to the diff --git a/calendar/calendar.go b/calendar/calendar.go index afe9c93d..6c46c865 100644 --- a/calendar/calendar.go +++ b/calendar/calendar.go @@ -1,9 +1,10 @@ package calendar import ( - "fmt" - "math/rand" "time" + "math/rand" + + "github.com/golang/glog" ) // Checks if specified Time is a weekday @@ -15,7 +16,9 @@ func isWeekday(t time.Time) bool { return false } - panic(fmt.Sprintf("Unrecognized day of the week: %s", t.Weekday().String())) + glog.Fatalf("Unrecognized day of the week: %s", t.Weekday().String()) + + panic("Explicit Panic to avoid compiler error: missing return at end of function") } // Returns the next weekday in Location diff --git a/chaos/chaos.go b/chaos/chaos.go index 46e5af65..a68162cd 100644 --- a/chaos/chaos.go +++ b/chaos/chaos.go @@ -2,13 +2,17 @@ package chaos import ( "fmt" + "time" + "math/rand" + + "github.com/golang/glog" + "github.com/asobti/kube-monkey/config" "github.com/asobti/kube-monkey/deployments" "github.com/asobti/kube-monkey/kubernetes" + kube "k8s.io/client-go/1.5/kubernetes" "k8s.io/client-go/1.5/pkg/api/v1" - "math/rand" - "time" ) type Chaos struct { @@ -75,8 +79,7 @@ func (c *Chaos) Execute(resultchan chan<- *ChaosResult) { // Do the termination killAll, err := c.deployment.HasKillAll(client) if err != nil { - fmt.Printf("Failed to check KillAll label for deployment %s. Proceeding with termination of a single pod.\n", c.deployment.Name()) - fmt.Printf(err.Error()) + glog.Errorf("Failed to check KillAll label for deployment %s. Proceeding with termination of a single pod. Error: %v", c.deployment.Name(), err.Error()) } if killAll { @@ -107,14 +110,14 @@ func (c *Chaos) Terminate(client *kube.Clientset) error { targetPod := RandomPodName(pods) - fmt.Printf("Terminating pod %s for deployment %s\n", targetPod, c.deployment.Name()) + glog.Errorf("Terminating pod %s for deployment %s\n", targetPod, c.deployment.Name()) return c.DeletePod(client, targetPod) } // Terminates ALL pods for the deployment // Not the default, or recommended, behavior func (c *Chaos) TerminateAll(client *kube.Clientset) error { - fmt.Printf("Terminating ALL pods for deployment %s\n", c.deployment.Name()) + glog.V(1).Infof("Terminating ALL pods for deployment %s\n", c.deployment.Name()) pods, err := c.deployment.Pods(client) if err != nil { @@ -128,7 +131,7 @@ func (c *Chaos) TerminateAll(client *kube.Clientset) error { for _, pod := range pods { // In case of error, log it and move on to next pod if err = c.DeletePod(client, pod.Name); err != nil { - fmt.Printf("Failed to delete pod %s for deployment %s", pod.Name, c.deployment.Name()) + glog.Errorf("Failed to delete pod %s for deployment %s", pod.Name, c.deployment.Name()) } } @@ -138,7 +141,7 @@ func (c *Chaos) TerminateAll(client *kube.Clientset) error { // Deletes a pod for a deployment func (c *Chaos) DeletePod(client *kube.Clientset, podName string) error { if config.DryRun() { - fmt.Printf("[DryRun Mode] Terminated pod %s for deployment %s\n", podName, c.deployment.Name()) + glog.V(1).Infof("[DryRun Mode] Terminated pod %s for deployment %s\n", podName, c.deployment.Name()) return nil } else { return c.deployment.DeletePod(client, podName) @@ -157,13 +160,13 @@ func (c *Chaos) NewResult(e error) *ChaosResult { func CreateClient() (*kube.Clientset, error) { client, err := kubernetes.NewInClusterClient() if err != nil { - return nil, err + return nil, fmt.Errorf("Failed to generate NewInClusterClient: %v", err) } if kubernetes.VerifyClient(client) { return client, nil } else { - return nil, fmt.Errorf("Unable to verify Kubernetes client") + return nil, fmt.Errorf("Unable to verify client connectivity to Kubernetes apiserver") } } diff --git a/config/config.go b/config/config.go index 5b906d0f..2e562382 100644 --- a/config/config.go +++ b/config/config.go @@ -1,12 +1,15 @@ package config import ( - "fmt" - "github.com/asobti/kube-monkey/config/param" - "github.com/fsnotify/fsnotify" + "time" + + "github.com/golang/glog" "github.com/spf13/viper" + "github.com/fsnotify/fsnotify" + + "github.com/asobti/kube-monkey/config/param" + "k8s.io/client-go/1.5/pkg/util/sets" - "time" ) const ( @@ -46,7 +49,7 @@ func setupWatch() { // TODO: This does not appear to be working viper.WatchConfig() viper.OnConfigChange(func(e fsnotify.Event) { - fmt.Println("Config change detected") + glog.V(2).Infoln("Config change detected") ValidateConfigs() }) } @@ -61,7 +64,12 @@ func Init() error { return err } - ValidateConfigs() + if err := ValidateConfigs(); err != nil { + glog.Errorf("Failed to validate %v", err) + return err + } else { + glog.V(3).Info("Successfully validated configs") + } setupWatch() return nil } @@ -74,7 +82,7 @@ func Timezone() *time.Location { tz := viper.GetString(param.Timezone) location, err := time.LoadLocation(tz) if err != nil { - panic(err.Error()) + glog.Fatal(err.Error()) } return location } diff --git a/config/validations.go b/config/validations.go index 8a75229a..a9256696 100644 --- a/config/validations.go +++ b/config/validations.go @@ -2,37 +2,37 @@ package config import ( "fmt" + "github.com/asobti/kube-monkey/config/param" ) func ValidateConfigs() error { // RunHour should be [0, 23] runHour := RunHour() - if !IsValidHour(runHour) { - return fmt.Errorf("%s is outside valid range of [0,23]", param.RunHour) + return fmt.Errorf("RunHour: %s is outside valid range of [0,23]", param.RunHour) } // StartHour should be [0, 23] startHour := StartHour() if !IsValidHour(startHour) { - return fmt.Errorf("%s is outside valid range of [0,23]", param.StartHour) + return fmt.Errorf("StartHour: %s is outside valid range of [0,23]", param.StartHour) } // EndHour should be [0, 23] endHour := EndHour() if !IsValidHour(endHour) { - return fmt.Errorf("%s is outside valid range of [0,23]", param.EndHour) + return fmt.Errorf("EndHour: %s is outside valid range of [0,23]", param.EndHour) } // StartHour should be < EndHour if !(startHour < endHour) { - return fmt.Errorf("%s must be less than %s", param.StartHour, param.EndHour) + return fmt.Errorf("StartHour: %s must be less than %s", param.StartHour, param.EndHour) } // RunHour should be < StartHour if !(runHour < startHour) { - return fmt.Errorf("%s should be less than %s", param.RunHour, param.StartHour) + return fmt.Errorf("RunHour: %s should be less than %s", param.RunHour, param.StartHour) } return nil diff --git a/deployments/deployments.go b/deployments/deployments.go index 7fc2146d..f94f4c6f 100644 --- a/deployments/deployments.go +++ b/deployments/deployments.go @@ -2,7 +2,10 @@ package deployments import ( "fmt" + "strconv" + "github.com/asobti/kube-monkey/config" + kube "k8s.io/client-go/1.5/kubernetes" "k8s.io/client-go/1.5/pkg/api" "k8s.io/client-go/1.5/pkg/api/v1" @@ -10,7 +13,6 @@ import ( "k8s.io/client-go/1.5/pkg/labels" "k8s.io/client-go/1.5/pkg/selection" "k8s.io/client-go/1.5/pkg/util/sets" - "strconv" ) type Deployment struct { diff --git a/deployments/eligible_deployments.go b/deployments/eligible_deployments.go index 3c609775..400d8aa7 100644 --- a/deployments/eligible_deployments.go +++ b/deployments/eligible_deployments.go @@ -1,9 +1,11 @@ package deployments import ( - "fmt" + "github.com/golang/glog" + "github.com/asobti/kube-monkey/config" "github.com/asobti/kube-monkey/kubernetes" + "k8s.io/client-go/1.5/pkg/api" "k8s.io/client-go/1.5/pkg/apis/extensions/v1beta1" "k8s.io/client-go/1.5/pkg/labels" @@ -23,7 +25,7 @@ func EligibleDeployments() ([]*Deployment, error) { for _, dep := range enabledDeployments { deployment, err := New(&dep) if err != nil { - fmt.Printf("Skipping eligible deployment %s because of error:\n%s\n", dep.Name, err.Error()) + glog.V(3).Infof("Skipping eligible deployment %s because of error:\n%s\n", dep.Name, err.Error()) continue } diff --git a/examples/deployment.yaml b/examples/deployment.yaml index 3732f3a0..aadeb3c0 100644 --- a/examples/deployment.yaml +++ b/examples/deployment.yaml @@ -15,6 +15,7 @@ - name: kube-monkey command: - "/kube-monkey" + args: ["-v=5", "-log_dir=/var/log/kube-monkey"] image: kube-monkey:v0.1.0 volumeMounts: - name: config-volume diff --git a/kubemonkey/kubemonkey.go b/kubemonkey/kubemonkey.go index e713025e..0dd7b0bc 100644 --- a/kubemonkey/kubemonkey.go +++ b/kubemonkey/kubemonkey.go @@ -1,35 +1,29 @@ package kubemonkey import ( - "fmt" - "github.com/asobti/kube-monkey/calendar" + "time" + + "github.com/golang/glog" + "github.com/asobti/kube-monkey/chaos" "github.com/asobti/kube-monkey/config" - "github.com/asobti/kube-monkey/kubernetes" + "github.com/asobti/kube-monkey/calendar" "github.com/asobti/kube-monkey/schedule" - "time" ) func verifyKubeClient() error { - client, err := kubernetes.NewInClusterClient() - if err != nil { - return err - } - if !kubernetes.VerifyClient(client) { - fmt.Println(err) - return fmt.Errorf("Unable to verify client connectivity to Kubernetes server") - } - return nil + _, err := chaos.CreateClient() + return err } func durationToNextRun(runhour int, location *time.Location) time.Duration { if config.DebugEnabled() { debugDelayDuration := config.DebugScheduleDelay() - fmt.Printf("Debug mode detected. Next run scheduled in %.0f sec\n", debugDelayDuration.Seconds()) + glog.V(2).Infof("Debug mode detected! Generating next schedule in %.0f sec\n", debugDelayDuration.Seconds()) return debugDelayDuration } else { nextRun := calendar.NextRuntime(location, runhour) - fmt.Printf("Next run scheduled at %s\n", nextRun) + glog.V(2).Infof("Generating next schedule at %s\n", nextRun) return nextRun.Sub(time.Now()) } } @@ -48,7 +42,7 @@ func Run() error { schedule, err := schedule.New() if err != nil { - panic(err.Error()) + glog.Fatal(err.Error()) } schedule.Print() ScheduleTerminations(schedule.Entries()) @@ -67,19 +61,18 @@ func ScheduleTerminations(entries []*chaos.Chaos) { completedCount := 0 var result *chaos.ChaosResult - fmt.Println("Waiting for terminations to run") + glog.V(3).Infof("Waiting for terminations to run") // Gather results for completedCount < len(entries) { result = <-resultchan if result.Error() != nil { - fmt.Printf("Failed to execute termination for deployment %s. Error:\n", result.Deployment().Name()) - fmt.Println(result.Error().Error()) + glog.Errorf("Failed to execute termination for deployment %s. Error: %v", result.Deployment().Name(), result.Error().Error()) } else { - fmt.Printf("Termination successfully executed for deployment %s\n", result.Deployment().Name()) + glog.V(2).Infof("Termination successfully executed for deployment %s\n", result.Deployment().Name()) } completedCount++ } - fmt.Println("All terminations done") + glog.V(3).Info("All terminations done") } diff --git a/kubernetes/kubernetes.go b/kubernetes/kubernetes.go index 7fdda8c9..b59a4f94 100644 --- a/kubernetes/kubernetes.go +++ b/kubernetes/kubernetes.go @@ -1,27 +1,29 @@ package kubernetes import ( + "github.com/golang/glog" + cfg "github.com/asobti/kube-monkey/config" + kube "k8s.io/client-go/1.5/kubernetes" "k8s.io/client-go/1.5/rest" - "fmt" ) func NewInClusterClient() (*kube.Clientset, error) { config, err := rest.InClusterConfig() if err != nil { - fmt.Println(err) + glog.Errorf("failed to obtain config from InClusterConfig: %v", err) return nil, err } if apiserverHost, override := cfg.ClusterAPIServerHost(); override { - fmt.Printf("API server host overriden to: %s\n", apiserverHost) + glog.V(1).Infof("API server host overriden to: %s\n", apiserverHost) config.Host = apiserverHost } clientset, err := kube.NewForConfig(config) if err != nil { - fmt.Println(err) + glog.Errorf("failed to create clientset in NewForConfig: %v", err) return nil, err } return clientset, nil diff --git a/main.go b/main.go index a20745fa..bbd958ee 100644 --- a/main.go +++ b/main.go @@ -2,25 +2,56 @@ package main import ( "fmt" + "os" + "flag" + + "github.com/golang/glog" + "github.com/asobti/kube-monkey/config" "github.com/asobti/kube-monkey/kubemonkey" ) +func glogUsage() { + fmt.Fprintf(os.Stderr, "usage: example -stderrthreshold=[INFO|WARN|FATAL] -log_dir=[string]\n", ) + flag.PrintDefaults() + os.Exit(2) +} + +func initLogging() { + // Check commandline options or "flags" for glog parameters + // to be picked up by the glog module + flag.Usage = glogUsage + flag.Parse() + + if _, err := os.Stat(flag.Lookup("log_dir").Value.String()); os.IsNotExist(err) { + err = os.MkdirAll(flag.Lookup("log_dir").Value.String(), os.ModePerm) + if (err != nil) { + glog.Errorf("Failed to open custom log directory; defaulting to /tmp! Error: %v", flag.Lookup("log_dir").Value, err) + } else { + glog.V(8).Infof("Created custom logging %s directory!", flag.Lookup("log_dir").Value) + } + } + // Since km runs as a k8 pod, log everything to stderr (stdout not supported) + // this takes advantage of k8's logging driver allowing kubectl logs kube-monkey + flag.Lookup("alsologtostderr").Value.Set("true") +} + func initConfig() { if err := config.Init(); err != nil { - panic(err.Error()) + glog.Fatal(err.Error()) } } func main() { - // TODO: Set up logging - + // Initialize logging + initLogging() + // Initialize configs initConfig() - - fmt.Println("Starting kube-monkey...") - + + glog.Infof("Starting kube-monkey with v logging level %v and local log directory %s", flag.Lookup("v").Value, flag.Lookup("log_dir").Value) + if err := kubemonkey.Run(); err != nil { - panic(err.Error()) + glog.Fatal(err.Error()) } } diff --git a/schedule/schedule.go b/schedule/schedule.go index d4795d5d..f8c011f3 100644 --- a/schedule/schedule.go +++ b/schedule/schedule.go @@ -1,13 +1,15 @@ package schedule import ( - "fmt" - "github.com/asobti/kube-monkey/calendar" + "time" + "math/rand" + + "github.com/golang/glog" + "github.com/asobti/kube-monkey/chaos" "github.com/asobti/kube-monkey/config" + "github.com/asobti/kube-monkey/calendar" "github.com/asobti/kube-monkey/deployments" - "math/rand" - "time" ) type Schedule struct { @@ -23,22 +25,22 @@ func (s *Schedule) Add(entry *chaos.Chaos) { } func (s *Schedule) Print() { - fmt.Println("********** Today's schedule **********") + glog.V(3).Info("\t********** Today's schedule **********") if len(s.entries) == 0 { - fmt.Println("No terminations scheduled") + glog.V(3).Info("No terminations scheduled") } else { - fmt.Printf("\tDeployment\t\tTermination time\n") - fmt.Printf("\t----------\t\t----------------\n") + glog.V(3).Info("\tDeployment\t\tTermination time\n") + glog.V(3).Info("\t----------\t\t----------------\n") for _, chaos := range s.entries { - fmt.Printf("\t%s\t\t%s\n", chaos.Deployment().Name(), chaos.KillAt()) + glog.V(3).Infof("\t%s\t\t%s\n", chaos.Deployment().Name(), chaos.KillAt()) } } - fmt.Println("********** End of schedule **********") + glog.V(3).Info("\t********** End of schedule **********") } func New() (*Schedule, error) { - fmt.Println("Generating schedule for terminations") + glog.V(2).Info("Generating schedule for terminations") deployments, err := deployments.EligibleDeployments() if err != nil { return nil, err diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/LICENSE deleted file mode 100644 index af39a91e..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md deleted file mode 100644 index 0ab09984..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Azure Storage SDK for Go - -The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform operations in Azure Storage Service. To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](../arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](../management/storageservice) package. - -This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/) \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go deleted file mode 100644 index f3c9ba6c..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go +++ /dev/null @@ -1,1280 +0,0 @@ -package storage - -import ( - "bytes" - "encoding/xml" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -// BlobStorageClient contains operations for Microsoft Azure Blob Storage -// Service. -type BlobStorageClient struct { - client Client -} - -// A Container is an entry in ContainerListResponse. -type Container struct { - Name string `xml:"Name"` - Properties ContainerProperties `xml:"Properties"` - // TODO (ahmetalpbalkan) Metadata -} - -// ContainerProperties contains various properties of a container returned from -// various endpoints like ListContainers. -type ContainerProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` - LeaseStatus string `xml:"LeaseStatus"` - LeaseState string `xml:"LeaseState"` - LeaseDuration string `xml:"LeaseDuration"` - // TODO (ahmetalpbalkan) remaining fields -} - -// ContainerListResponse contains the response fields from -// ListContainers call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -type ContainerListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Containers []Container `xml:"Containers>Container"` -} - -// A Blob is an entry in BlobListResponse. -type Blob struct { - Name string `xml:"Name"` - Properties BlobProperties `xml:"Properties"` - Metadata BlobMetadata `xml:"Metadata"` -} - -// BlobMetadata is a set of custom name/value pairs. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179404.aspx -type BlobMetadata map[string]string - -type blobMetadataEntries struct { - Entries []blobMetadataEntry `xml:",any"` -} -type blobMetadataEntry struct { - XMLName xml.Name - Value string `xml:",chardata"` -} - -// UnmarshalXML converts the xml:Metadata into Metadata map -func (bm *BlobMetadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - var entries blobMetadataEntries - if err := d.DecodeElement(&entries, &start); err != nil { - return err - } - for _, entry := range entries.Entries { - if *bm == nil { - *bm = make(BlobMetadata) - } - (*bm)[strings.ToLower(entry.XMLName.Local)] = entry.Value - } - return nil -} - -// MarshalXML implements the xml.Marshaler interface. It encodes -// metadata name/value pairs as they would appear in an Azure -// ListBlobs response. -func (bm BlobMetadata) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { - entries := make([]blobMetadataEntry, 0, len(bm)) - for k, v := range bm { - entries = append(entries, blobMetadataEntry{ - XMLName: xml.Name{Local: http.CanonicalHeaderKey(k)}, - Value: v, - }) - } - return enc.EncodeElement(blobMetadataEntries{ - Entries: entries, - }, start) -} - -// BlobProperties contains various properties of a blob -// returned in various endpoints like ListBlobs or GetBlobProperties. -type BlobProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` - ContentMD5 string `xml:"Content-MD5"` - ContentLength int64 `xml:"Content-Length"` - ContentType string `xml:"Content-Type"` - ContentEncoding string `xml:"Content-Encoding"` - CacheControl string `xml:"Cache-Control"` - ContentLanguage string `xml:"Cache-Language"` - BlobType BlobType `xml:"x-ms-blob-blob-type"` - SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` - CopyID string `xml:"CopyId"` - CopyStatus string `xml:"CopyStatus"` - CopySource string `xml:"CopySource"` - CopyProgress string `xml:"CopyProgress"` - CopyCompletionTime string `xml:"CopyCompletionTime"` - CopyStatusDescription string `xml:"CopyStatusDescription"` - LeaseStatus string `xml:"LeaseStatus"` -} - -// BlobHeaders contains various properties of a blob and is an entry -// in SetBlobProperties -type BlobHeaders struct { - ContentMD5 string `header:"x-ms-blob-content-md5"` - ContentLanguage string `header:"x-ms-blob-content-language"` - ContentEncoding string `header:"x-ms-blob-content-encoding"` - ContentType string `header:"x-ms-blob-content-type"` - CacheControl string `header:"x-ms-blob-cache-control"` -} - -// BlobListResponse contains the response fields from ListBlobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -type BlobListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Blobs []Blob `xml:"Blobs>Blob"` - - // BlobPrefix is used to traverse blobs as if it were a file system. - // It is returned if ListBlobsParameters.Delimiter is specified. - // The list here can be thought of as "folders" that may contain - // other folders or blobs. - BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"` - - // Delimiter is used to traverse blobs as if it were a file system. - // It is returned if ListBlobsParameters.Delimiter is specified. - Delimiter string `xml:"Delimiter"` -} - -// ListContainersParameters defines the set of customizable parameters to make a -// List Containers call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -type ListContainersParameters struct { - Prefix string - Marker string - Include string - MaxResults uint - Timeout uint -} - -func (p ListContainersParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != "" { - out.Set("include", p.Include) - } - if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) - } - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) - } - - return out -} - -// ListBlobsParameters defines the set of customizable -// parameters to make a List Blobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -type ListBlobsParameters struct { - Prefix string - Delimiter string - Marker string - Include string - MaxResults uint - Timeout uint -} - -func (p ListBlobsParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Delimiter != "" { - out.Set("delimiter", p.Delimiter) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != "" { - out.Set("include", p.Include) - } - if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) - } - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) - } - - return out -} - -// BlobType defines the type of the Azure Blob. -type BlobType string - -// Types of page blobs -const ( - BlobTypeBlock BlobType = "BlockBlob" - BlobTypePage BlobType = "PageBlob" - BlobTypeAppend BlobType = "AppendBlob" -) - -// PageWriteType defines the type updates that are going to be -// done on the page blob. -type PageWriteType string - -// Types of operations on page blobs -const ( - PageWriteTypeUpdate PageWriteType = "update" - PageWriteTypeClear PageWriteType = "clear" -) - -const ( - blobCopyStatusPending = "pending" - blobCopyStatusSuccess = "success" - blobCopyStatusAborted = "aborted" - blobCopyStatusFailed = "failed" -) - -// lease constants. -const ( - leaseHeaderPrefix = "x-ms-lease-" - leaseID = "x-ms-lease-id" - leaseAction = "x-ms-lease-action" - leaseBreakPeriod = "x-ms-lease-break-period" - leaseDuration = "x-ms-lease-duration" - leaseProposedID = "x-ms-proposed-lease-id" - leaseTime = "x-ms-lease-time" - - acquireLease = "acquire" - renewLease = "renew" - changeLease = "change" - releaseLease = "release" - breakLease = "break" -) - -// BlockListType is used to filter out types of blocks in a Get Blocks List call -// for a block blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all -// block types. -type BlockListType string - -// Filters for listing blocks in block blobs -const ( - BlockListTypeAll BlockListType = "all" - BlockListTypeCommitted BlockListType = "committed" - BlockListTypeUncommitted BlockListType = "uncommitted" -) - -// ContainerAccessType defines the access level to the container from a public -// request. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms- -// blob-public-access" header. -type ContainerAccessType string - -// Access options for containers -const ( - ContainerAccessTypePrivate ContainerAccessType = "" - ContainerAccessTypeBlob ContainerAccessType = "blob" - ContainerAccessTypeContainer ContainerAccessType = "container" -) - -// Maximum sizes (per REST API) for various concepts -const ( - MaxBlobBlockSize = 4 * 1024 * 1024 - MaxBlobPageSize = 4 * 1024 * 1024 -) - -// BlockStatus defines states a block for a block blob can -// be in. -type BlockStatus string - -// List of statuses that can be used to refer to a block in a block list -const ( - BlockStatusUncommitted BlockStatus = "Uncommitted" - BlockStatusCommitted BlockStatus = "Committed" - BlockStatusLatest BlockStatus = "Latest" -) - -// Block is used to create Block entities for Put Block List -// call. -type Block struct { - ID string - Status BlockStatus -} - -// BlockListResponse contains the response fields from Get Block List call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx -type BlockListResponse struct { - XMLName xml.Name `xml:"BlockList"` - CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"` - UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"` -} - -// BlockResponse contains the block information returned -// in the GetBlockListCall. -type BlockResponse struct { - Name string `xml:"Name"` - Size int64 `xml:"Size"` -} - -// GetPageRangesResponse contains the reponse fields from -// Get Page Ranges call. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -type GetPageRangesResponse struct { - XMLName xml.Name `xml:"PageList"` - PageList []PageRange `xml:"PageRange"` -} - -// PageRange contains information about a page of a page blob from -// Get Pages Range call. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -type PageRange struct { - Start int64 `xml:"Start"` - End int64 `xml:"End"` -} - -var ( - errBlobCopyAborted = errors.New("storage: blob copy is aborted") - errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch") -) - -// ListContainers returns the list of containers in a storage account along with -// pagination token and other response details. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -func (b BlobStorageClient) ListContainers(params ListContainersParameters) (ContainerListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) - uri := b.client.getEndpoint(blobServiceName, "", q) - headers := b.client.getStandardHeaders() - - var out ContainerListResponse - resp, err := b.client.exec("GET", uri, headers, nil) - if err != nil { - return out, err - } - defer resp.body.Close() - - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// CreateContainer creates a blob container within the storage account -// with given name and access level. Returns error if container already exists. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx -func (b BlobStorageClient) CreateContainer(name string, access ContainerAccessType) error { - resp, err := b.createContainer(name, access) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// CreateContainerIfNotExists creates a blob container if it does not exist. Returns -// true if container is newly created or false if container already exists. -func (b BlobStorageClient) CreateContainerIfNotExists(name string, access ContainerAccessType) (bool, error) { - resp, err := b.createContainer(name, access) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { - return resp.statusCode == http.StatusCreated, nil - } - } - return false, err -} - -func (b BlobStorageClient) createContainer(name string, access ContainerAccessType) (*storageResponse, error) { - verb := "PUT" - uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) - - headers := b.client.getStandardHeaders() - if access != "" { - headers["x-ms-blob-public-access"] = string(access) - } - return b.client.exec(verb, uri, headers, nil) -} - -// ContainerExists returns true if a container with given name exists -// on the storage account, otherwise returns false. -func (b BlobStorageClient) ContainerExists(name string) (bool, error) { - verb := "HEAD" - uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) - headers := b.client.getStandardHeaders() - - resp, err := b.client.exec(verb, uri, headers, nil) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusOK, nil - } - } - return false, err -} - -// DeleteContainer deletes the container with given name on the storage -// account. If the container does not exist returns error. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx -func (b BlobStorageClient) DeleteContainer(name string) error { - resp, err := b.deleteContainer(name) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) -} - -// DeleteContainerIfExists deletes the container with given name on the storage -// account if it exists. Returns true if container is deleted with this call, or -// false if the container did not exist at the time of the Delete Container -// operation. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx -func (b BlobStorageClient) DeleteContainerIfExists(name string) (bool, error) { - resp, err := b.deleteContainer(name) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil - } - } - return false, err -} - -func (b BlobStorageClient) deleteContainer(name string) (*storageResponse, error) { - verb := "DELETE" - uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) - - headers := b.client.getStandardHeaders() - return b.client.exec(verb, uri, headers, nil) -} - -// ListBlobs returns an object that contains list of blobs in the container, -// pagination token and other information in the response of List Blobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -func (b BlobStorageClient) ListBlobs(container string, params ListBlobsParameters) (BlobListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{ - "restype": {"container"}, - "comp": {"list"}}) - uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), q) - headers := b.client.getStandardHeaders() - - var out BlobListResponse - resp, err := b.client.exec("GET", uri, headers, nil) - if err != nil { - return out, err - } - defer resp.body.Close() - - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// BlobExists returns true if a blob with given name exists on the specified -// container of the storage account. -func (b BlobStorageClient) BlobExists(container, name string) (bool, error) { - verb := "HEAD" - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - headers := b.client.getStandardHeaders() - resp, err := b.client.exec(verb, uri, headers, nil) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusOK, nil - } - } - return false, err -} - -// GetBlobURL gets the canonical URL to the blob with the specified name in the -// specified container. This method does not create a publicly accessible URL if -// the blob or container is private and this method does not check if the blob -// exists. -func (b BlobStorageClient) GetBlobURL(container, name string) string { - if container == "" { - container = "$root" - } - return b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) -} - -// GetBlob returns a stream to read the blob. Caller must call Close() the -// reader to close on the underlying connection. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx -func (b BlobStorageClient) GetBlob(container, name string) (io.ReadCloser, error) { - resp, err := b.getBlobRange(container, name, "", nil) - if err != nil { - return nil, err - } - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } - return resp.body, nil -} - -// GetBlobRange reads the specified range of a blob to a stream. The bytesRange -// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx -func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (io.ReadCloser, error) { - resp, err := b.getBlobRange(container, name, bytesRange, extraHeaders) - if err != nil { - return nil, err - } - - if err := checkRespCode(resp.statusCode, []int{http.StatusPartialContent}); err != nil { - return nil, err - } - return resp.body, nil -} - -func (b BlobStorageClient) getBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (*storageResponse, error) { - verb := "GET" - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - - headers := b.client.getStandardHeaders() - if bytesRange != "" { - headers["Range"] = fmt.Sprintf("bytes=%s", bytesRange) - } - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec(verb, uri, headers, nil) - if err != nil { - return nil, err - } - return resp, err -} - -// leasePut is common PUT code for the various aquire/release/break etc functions. -func (b BlobStorageClient) leaseCommonPut(container string, name string, headers map[string]string, expectedStatus int) (http.Header, error) { - params := url.Values{"comp": {"lease"}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - - resp, err := b.client.exec("PUT", uri, headers, nil) - if err != nil { - return nil, err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{expectedStatus}); err != nil { - return nil, err - } - - return resp.headers, nil -} - -// AcquireLease creates a lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -// returns leaseID acquired -func (b BlobStorageClient) AcquireLease(container string, name string, leaseTimeInSeconds int, proposedLeaseID string) (returnedLeaseID string, err error) { - headers := b.client.getStandardHeaders() - headers[leaseAction] = acquireLease - headers[leaseProposedID] = proposedLeaseID - headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds) - - respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusCreated) - if err != nil { - return "", err - } - - returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(leaseID)) - - if returnedLeaseID != "" { - return returnedLeaseID, nil - } - - // what should we return in case of HTTP 201 but no lease ID? - // or it just cant happen? (brave words) - return "", errors.New("LeaseID not returned") -} - -// BreakLease breaks the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -// Returns the timeout remaining in the lease in seconds -func (b BlobStorageClient) BreakLease(container string, name string) (breakTimeout int, err error) { - headers := b.client.getStandardHeaders() - headers[leaseAction] = breakLease - return b.breakLeaseCommon(container, name, headers) -} - -// BreakLeaseWithBreakPeriod breaks the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -// breakPeriodInSeconds is used to determine how long until new lease can be created. -// Returns the timeout remaining in the lease in seconds -func (b BlobStorageClient) BreakLeaseWithBreakPeriod(container string, name string, breakPeriodInSeconds int) (breakTimeout int, err error) { - headers := b.client.getStandardHeaders() - headers[leaseAction] = breakLease - headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds) - return b.breakLeaseCommon(container, name, headers) -} - -// breakLeaseCommon is common code for both version of BreakLease (with and without break period) -func (b BlobStorageClient) breakLeaseCommon(container string, name string, headers map[string]string) (breakTimeout int, err error) { - - respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusAccepted) - if err != nil { - return 0, err - } - - breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime)) - if breakTimeoutStr != "" { - breakTimeout, err = strconv.Atoi(breakTimeoutStr) - if err != nil { - return 0, err - } - } - - return breakTimeout, nil -} - -// ChangeLease changes a lease ID for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -// Returns the new LeaseID acquired -func (b BlobStorageClient) ChangeLease(container string, name string, currentLeaseID string, proposedLeaseID string) (newLeaseID string, err error) { - headers := b.client.getStandardHeaders() - headers[leaseAction] = changeLease - headers[leaseID] = currentLeaseID - headers[leaseProposedID] = proposedLeaseID - - respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusOK) - if err != nil { - return "", err - } - - newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(leaseID)) - if newLeaseID != "" { - return newLeaseID, nil - } - - return "", errors.New("LeaseID not returned") -} - -// ReleaseLease releases the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -func (b BlobStorageClient) ReleaseLease(container string, name string, currentLeaseID string) error { - headers := b.client.getStandardHeaders() - headers[leaseAction] = releaseLease - headers[leaseID] = currentLeaseID - - _, err := b.leaseCommonPut(container, name, headers, http.StatusOK) - if err != nil { - return err - } - - return nil -} - -// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -func (b BlobStorageClient) RenewLease(container string, name string, currentLeaseID string) error { - headers := b.client.getStandardHeaders() - headers[leaseAction] = renewLease - headers[leaseID] = currentLeaseID - - _, err := b.leaseCommonPut(container, name, headers, http.StatusOK) - if err != nil { - return err - } - - return nil -} - -// GetBlobProperties provides various information about the specified -// blob. See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx -func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobProperties, error) { - verb := "HEAD" - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - - headers := b.client.getStandardHeaders() - resp, err := b.client.exec(verb, uri, headers, nil) - if err != nil { - return nil, err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } - - var contentLength int64 - contentLengthStr := resp.headers.Get("Content-Length") - if contentLengthStr != "" { - contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) - if err != nil { - return nil, err - } - } - - var sequenceNum int64 - sequenceNumStr := resp.headers.Get("x-ms-blob-sequence-number") - if sequenceNumStr != "" { - sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64) - if err != nil { - return nil, err - } - } - - return &BlobProperties{ - LastModified: resp.headers.Get("Last-Modified"), - Etag: resp.headers.Get("Etag"), - ContentMD5: resp.headers.Get("Content-MD5"), - ContentLength: contentLength, - ContentEncoding: resp.headers.Get("Content-Encoding"), - ContentType: resp.headers.Get("Content-Type"), - CacheControl: resp.headers.Get("Cache-Control"), - ContentLanguage: resp.headers.Get("Content-Language"), - SequenceNumber: sequenceNum, - CopyCompletionTime: resp.headers.Get("x-ms-copy-completion-time"), - CopyStatusDescription: resp.headers.Get("x-ms-copy-status-description"), - CopyID: resp.headers.Get("x-ms-copy-id"), - CopyProgress: resp.headers.Get("x-ms-copy-progress"), - CopySource: resp.headers.Get("x-ms-copy-source"), - CopyStatus: resp.headers.Get("x-ms-copy-status"), - BlobType: BlobType(resp.headers.Get("x-ms-blob-type")), - LeaseStatus: resp.headers.Get("x-ms-lease-status"), - }, nil -} - -// SetBlobProperties replaces the BlobHeaders for the specified blob. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetBlobProperties. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691966.aspx -func (b BlobStorageClient) SetBlobProperties(container, name string, blobHeaders BlobHeaders) error { - params := url.Values{"comp": {"properties"}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - headers := b.client.getStandardHeaders() - - extraHeaders := headersFromStruct(blobHeaders) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec("PUT", uri, headers, nil) - if err != nil { - return err - } - defer resp.body.Close() - - return checkRespCode(resp.statusCode, []int{http.StatusOK}) -} - -// SetBlobMetadata replaces the metadata for the specified blob. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetBlobMetadata. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (b BlobStorageClient) SetBlobMetadata(container, name string, metadata map[string]string, extraHeaders map[string]string) error { - params := url.Values{"comp": {"metadata"}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - headers := b.client.getStandardHeaders() - for k, v := range metadata { - headers[userDefinedMetadataHeaderPrefix+k] = v - } - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec("PUT", uri, headers, nil) - if err != nil { - return err - } - defer resp.body.Close() - - return checkRespCode(resp.statusCode, []int{http.StatusOK}) -} - -// GetBlobMetadata returns all user-defined metadata for the specified blob. -// -// All metadata keys will be returned in lower case. (HTTP header -// names are case-insensitive.) -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (b BlobStorageClient) GetBlobMetadata(container, name string) (map[string]string, error) { - params := url.Values{"comp": {"metadata"}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - headers := b.client.getStandardHeaders() - - resp, err := b.client.exec("GET", uri, headers, nil) - if err != nil { - return nil, err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } - - metadata := make(map[string]string) - for k, v := range resp.headers { - // Can't trust CanonicalHeaderKey() to munge case - // reliably. "_" is allowed in identifiers: - // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx - // https://msdn.microsoft.com/library/aa664670(VS.71).aspx - // http://tools.ietf.org/html/rfc7230#section-3.2 - // ...but "_" is considered invalid by - // CanonicalMIMEHeaderKey in - // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 - // so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar". - k = strings.ToLower(k) - if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { - continue - } - // metadata["foo"] = content of the last X-Ms-Meta-Foo header - k = k[len(userDefinedMetadataHeaderPrefix):] - metadata[k] = v[len(v)-1] - } - return metadata, nil -} - -// CreateBlockBlob initializes an empty block blob with no blocks. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) CreateBlockBlob(container, name string) error { - return b.CreateBlockBlobFromReader(container, name, 0, nil, nil) -} - -// CreateBlockBlobFromReader initializes a block blob using data from -// reader. Size must be the number of bytes read from reader. To -// create an empty blob, use size==0 and reader==nil. -// -// The API rejects requests with size > 64 MiB (but this limit is not -// checked by the SDK). To write a larger blob, use CreateBlockBlob, -// PutBlock, and PutBlockList. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) CreateBlockBlobFromReader(container, name string, size uint64, blob io.Reader, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeBlock) - headers["Content-Length"] = fmt.Sprintf("%d", size) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec("PUT", uri, headers, blob) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// PutBlock saves the given data chunk to the specified block blob with -// given ID. -// -// The API rejects chunks larger than 4 MiB (but this limit is not -// checked by the SDK). -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx -func (b BlobStorageClient) PutBlock(container, name, blockID string, chunk []byte) error { - return b.PutBlockWithLength(container, name, blockID, uint64(len(chunk)), bytes.NewReader(chunk), nil) -} - -// PutBlockWithLength saves the given data stream of exactly specified size to -// the block blob with given ID. It is an alternative to PutBlocks where data -// comes as stream but the length is known in advance. -// -// The API rejects requests with size > 4 MiB (but this limit is not -// checked by the SDK). -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx -func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, size uint64, blob io.Reader, extraHeaders map[string]string) error { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"block"}, "blockid": {blockID}}) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeBlock) - headers["Content-Length"] = fmt.Sprintf("%v", size) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec("PUT", uri, headers, blob) - if err != nil { - return err - } - - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// PutBlockList saves list of blocks to the specified block blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179467.aspx -func (b BlobStorageClient) PutBlockList(container, name string, blocks []Block) error { - blockListXML := prepareBlockListRequest(blocks) - - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"blocklist"}}) - headers := b.client.getStandardHeaders() - headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML)) - - resp, err := b.client.exec("PUT", uri, headers, strings.NewReader(blockListXML)) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// GetBlockList retrieves list of blocks in the specified block blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx -func (b BlobStorageClient) GetBlockList(container, name string, blockType BlockListType) (BlockListResponse, error) { - params := url.Values{"comp": {"blocklist"}, "blocklisttype": {string(blockType)}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - headers := b.client.getStandardHeaders() - - var out BlockListResponse - resp, err := b.client.exec("GET", uri, headers, nil) - if err != nil { - return out, err - } - defer resp.body.Close() - - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// PutPageBlob initializes an empty page blob with specified name and maximum -// size in bytes (size must be aligned to a 512-byte boundary). A page blob must -// be created using this method before writing pages. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) PutPageBlob(container, name string, size int64, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypePage) - headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", size) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec("PUT", uri, headers, nil) - if err != nil { - return err - } - defer resp.body.Close() - - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// PutPage writes a range of pages to a page blob or clears the given range. -// In case of 'clear' writes, given chunk is discarded. Ranges must be aligned -// with 512-byte boundaries and chunk must be of size multiplies by 512. -// -// See https://msdn.microsoft.com/en-us/library/ee691975.aspx -func (b BlobStorageClient) PutPage(container, name string, startByte, endByte int64, writeType PageWriteType, chunk []byte, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"page"}}) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypePage) - headers["x-ms-page-write"] = string(writeType) - headers["x-ms-range"] = fmt.Sprintf("bytes=%v-%v", startByte, endByte) - for k, v := range extraHeaders { - headers[k] = v - } - var contentLength int64 - var data io.Reader - if writeType == PageWriteTypeClear { - contentLength = 0 - data = bytes.NewReader([]byte{}) - } else { - contentLength = int64(len(chunk)) - data = bytes.NewReader(chunk) - } - headers["Content-Length"] = fmt.Sprintf("%v", contentLength) - - resp, err := b.client.exec("PUT", uri, headers, data) - if err != nil { - return err - } - defer resp.body.Close() - - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// GetPageRanges returns the list of valid page ranges for a page blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -func (b BlobStorageClient) GetPageRanges(container, name string) (GetPageRangesResponse, error) { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"pagelist"}}) - headers := b.client.getStandardHeaders() - - var out GetPageRangesResponse - resp, err := b.client.exec("GET", uri, headers, nil) - if err != nil { - return out, err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return out, err - } - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// PutAppendBlob initializes an empty append blob with specified name. An -// append blob must be created using this method before appending blocks. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) PutAppendBlob(container, name string, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeAppend) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec("PUT", uri, headers, nil) - if err != nil { - return err - } - defer resp.body.Close() - - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// AppendBlock appends a block to an append blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/mt427365.aspx -func (b BlobStorageClient) AppendBlock(container, name string, chunk []byte, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"appendblock"}}) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeAppend) - headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec("PUT", uri, headers, bytes.NewReader(chunk)) - if err != nil { - return err - } - defer resp.body.Close() - - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// CopyBlob starts a blob copy operation and waits for the operation to -// complete. sourceBlob parameter must be a canonical URL to the blob (can be -// obtained using GetBlobURL method.) There is no SLA on blob copy and therefore -// this helper method works faster on smaller files. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx -func (b BlobStorageClient) CopyBlob(container, name, sourceBlob string) error { - copyID, err := b.startBlobCopy(container, name, sourceBlob) - if err != nil { - return err - } - - return b.waitForBlobCopy(container, name, copyID) -} - -func (b BlobStorageClient) startBlobCopy(container, name, sourceBlob string) (string, error) { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - - headers := b.client.getStandardHeaders() - headers["x-ms-copy-source"] = sourceBlob - - resp, err := b.client.exec("PUT", uri, headers, nil) - if err != nil { - return "", err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil { - return "", err - } - - copyID := resp.headers.Get("x-ms-copy-id") - if copyID == "" { - return "", errors.New("Got empty copy id header") - } - return copyID, nil -} - -func (b BlobStorageClient) waitForBlobCopy(container, name, copyID string) error { - for { - props, err := b.GetBlobProperties(container, name) - if err != nil { - return err - } - - if props.CopyID != copyID { - return errBlobCopyIDMismatch - } - - switch props.CopyStatus { - case blobCopyStatusSuccess: - return nil - case blobCopyStatusPending: - continue - case blobCopyStatusAborted: - return errBlobCopyAborted - case blobCopyStatusFailed: - return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", props.CopyID, props.CopyStatusDescription) - default: - return fmt.Errorf("storage: unhandled blob copy status: '%s'", props.CopyStatus) - } - } -} - -// DeleteBlob deletes the given blob from the specified container. -// If the blob does not exists at the time of the Delete Blob operation, it -// returns error. See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx -func (b BlobStorageClient) DeleteBlob(container, name string, extraHeaders map[string]string) error { - resp, err := b.deleteBlob(container, name, extraHeaders) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) -} - -// DeleteBlobIfExists deletes the given blob from the specified container If the -// blob is deleted with this call, returns true. Otherwise returns false. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx -func (b BlobStorageClient) DeleteBlobIfExists(container, name string, extraHeaders map[string]string) (bool, error) { - resp, err := b.deleteBlob(container, name, extraHeaders) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil - } - } - return false, err -} - -func (b BlobStorageClient) deleteBlob(container, name string, extraHeaders map[string]string) (*storageResponse, error) { - verb := "DELETE" - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - headers := b.client.getStandardHeaders() - for k, v := range extraHeaders { - headers[k] = v - } - - return b.client.exec(verb, uri, headers, nil) -} - -// helper method to construct the path to a container given its name -func pathForContainer(name string) string { - return fmt.Sprintf("/%s", name) -} - -// helper method to construct the path to a blob given its container and blob -// name -func pathForBlob(container, name string) string { - return fmt.Sprintf("/%s/%s", container, name) -} - -// GetBlobSASURI creates an URL to the specified blob which contains the Shared -// Access Signature with specified permissions and expiration time. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx -func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Time, permissions string) (string, error) { - var ( - signedPermissions = permissions - blobURL = b.GetBlobURL(container, name) - ) - canonicalizedResource, err := b.client.buildCanonicalizedResource(blobURL) - - if err != nil { - return "", err - } - - // "The canonicalizedresouce portion of the string is a canonical path to the signed resource. - // It must include the service name (blob, table, queue or file) for version 2015-02-21 or - // later, the storage account name, and the resource name, and must be URL-decoded. - // -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx - - // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). - canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) - - canonicalizedResource, err = url.QueryUnescape(canonicalizedResource) - if err != nil { - return "", err - } - - signedExpiry := expiry.UTC().Format(time.RFC3339) - signedResource := "b" - - stringToSign, err := blobSASStringToSign(b.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions) - if err != nil { - return "", err - } - - sig := b.client.computeHmac256(stringToSign) - sasParams := url.Values{ - "sv": {b.client.apiVersion}, - "se": {signedExpiry}, - "sr": {signedResource}, - "sp": {signedPermissions}, - "sig": {sig}, - } - - sasURL, err := url.Parse(blobURL) - if err != nil { - return "", err - } - sasURL.RawQuery = sasParams.Encode() - return sasURL.String(), nil -} - -func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string) (string, error) { - var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string - - if signedVersion >= "2015-02-21" { - canonicalizedResource = "/blob" + canonicalizedResource - } - - // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx - if signedVersion >= "2013-08-15" { - return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil - } - return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go deleted file mode 100644 index 2816e03e..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go +++ /dev/null @@ -1,551 +0,0 @@ -// Package storage provides clients for Microsoft Azure Storage Services. -package storage - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "encoding/xml" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "regexp" - "sort" - "strconv" - "strings" -) - -const ( - // DefaultBaseURL is the domain name used for storage requests when a - // default client is created. - DefaultBaseURL = "core.windows.net" - - // DefaultAPIVersion is the Azure Storage API version string used when a - // basic client is created. - DefaultAPIVersion = "2015-02-21" - - defaultUseHTTPS = true - - // StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator - StorageEmulatorAccountName = "devstoreaccount1" - - // StorageEmulatorAccountKey is the the fixed storage account used by Azure Storage Emulator - StorageEmulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" - - blobServiceName = "blob" - tableServiceName = "table" - queueServiceName = "queue" - fileServiceName = "file" - - storageEmulatorBlob = "127.0.0.1:10000" - storageEmulatorTable = "127.0.0.1:10002" - storageEmulatorQueue = "127.0.0.1:10001" -) - -// Client is the object that needs to be constructed to perform -// operations on the storage account. -type Client struct { - // HTTPClient is the http.Client used to initiate API - // requests. If it is nil, http.DefaultClient is used. - HTTPClient *http.Client - - accountName string - accountKey []byte - useHTTPS bool - baseURL string - apiVersion string -} - -type storageResponse struct { - statusCode int - headers http.Header - body io.ReadCloser -} - -type odataResponse struct { - storageResponse - odata odataErrorMessage -} - -// AzureStorageServiceError contains fields of the error response from -// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx -// Some fields might be specific to certain calls. -type AzureStorageServiceError struct { - Code string `xml:"Code"` - Message string `xml:"Message"` - AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"` - QueryParameterName string `xml:"QueryParameterName"` - QueryParameterValue string `xml:"QueryParameterValue"` - Reason string `xml:"Reason"` - StatusCode int - RequestID string -} - -type odataErrorMessageMessage struct { - Lang string `json:"lang"` - Value string `json:"value"` -} - -type odataErrorMessageInternal struct { - Code string `json:"code"` - Message odataErrorMessageMessage `json:"message"` -} - -type odataErrorMessage struct { - Err odataErrorMessageInternal `json:"odata.error"` -} - -// UnexpectedStatusCodeError is returned when a storage service responds with neither an error -// nor with an HTTP status code indicating success. -type UnexpectedStatusCodeError struct { - allowed []int - got int -} - -func (e UnexpectedStatusCodeError) Error() string { - s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) } - - got := s(e.got) - expected := []string{} - for _, v := range e.allowed { - expected = append(expected, s(v)) - } - return fmt.Sprintf("storage: status code from service response is %s; was expecting %s", got, strings.Join(expected, " or ")) -} - -// Got is the actual status code returned by Azure. -func (e UnexpectedStatusCodeError) Got() int { - return e.got -} - -// NewBasicClient constructs a Client with given storage service name and -// key. -func NewBasicClient(accountName, accountKey string) (Client, error) { - if accountName == StorageEmulatorAccountName { - return NewEmulatorClient() - } - return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS) -} - -//NewEmulatorClient contructs a Client intended to only work with Azure -//Storage Emulator -func NewEmulatorClient() (Client, error) { - return NewClient(StorageEmulatorAccountName, StorageEmulatorAccountKey, DefaultBaseURL, DefaultAPIVersion, false) -} - -// NewClient constructs a Client. This should be used if the caller wants -// to specify whether to use HTTPS, a specific REST API version or a custom -// storage endpoint than Azure Public Cloud. -func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) { - var c Client - if accountName == "" { - return c, fmt.Errorf("azure: account name required") - } else if accountKey == "" { - return c, fmt.Errorf("azure: account key required") - } else if blobServiceBaseURL == "" { - return c, fmt.Errorf("azure: base storage service url required") - } - - key, err := base64.StdEncoding.DecodeString(accountKey) - if err != nil { - return c, fmt.Errorf("azure: malformed storage account key: %v", err) - } - - return Client{ - accountName: accountName, - accountKey: key, - useHTTPS: useHTTPS, - baseURL: blobServiceBaseURL, - apiVersion: apiVersion, - }, nil -} - -func (c Client) getBaseURL(service string) string { - scheme := "http" - if c.useHTTPS { - scheme = "https" - } - host := "" - if c.accountName == StorageEmulatorAccountName { - switch service { - case blobServiceName: - host = storageEmulatorBlob - case tableServiceName: - host = storageEmulatorTable - case queueServiceName: - host = storageEmulatorQueue - } - } else { - host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL) - } - - u := &url.URL{ - Scheme: scheme, - Host: host} - return u.String() -} - -func (c Client) getEndpoint(service, path string, params url.Values) string { - u, err := url.Parse(c.getBaseURL(service)) - if err != nil { - // really should not be happening - panic(err) - } - - // API doesn't accept path segments not starting with '/' - if !strings.HasPrefix(path, "/") { - path = fmt.Sprintf("/%v", path) - } - - if c.accountName == StorageEmulatorAccountName { - path = fmt.Sprintf("/%v%v", StorageEmulatorAccountName, path) - } - - u.Path = path - u.RawQuery = params.Encode() - return u.String() -} - -// GetBlobService returns a BlobStorageClient which can operate on the blob -// service of the storage account. -func (c Client) GetBlobService() BlobStorageClient { - return BlobStorageClient{c} -} - -// GetQueueService returns a QueueServiceClient which can operate on the queue -// service of the storage account. -func (c Client) GetQueueService() QueueServiceClient { - return QueueServiceClient{c} -} - -// GetTableService returns a TableServiceClient which can operate on the table -// service of the storage account. -func (c Client) GetTableService() TableServiceClient { - return TableServiceClient{c} -} - -// GetFileService returns a FileServiceClient which can operate on the file -// service of the storage account. -func (c Client) GetFileService() FileServiceClient { - return FileServiceClient{c} -} - -func (c Client) createAuthorizationHeader(canonicalizedString string) string { - signature := c.computeHmac256(canonicalizedString) - return fmt.Sprintf("%s %s:%s", "SharedKey", c.getCanonicalizedAccountName(), signature) -} - -func (c Client) getAuthorizationHeader(verb, url string, headers map[string]string) (string, error) { - canonicalizedResource, err := c.buildCanonicalizedResource(url) - if err != nil { - return "", err - } - - canonicalizedString := c.buildCanonicalizedString(verb, headers, canonicalizedResource) - return c.createAuthorizationHeader(canonicalizedString), nil -} - -func (c Client) getStandardHeaders() map[string]string { - return map[string]string{ - "x-ms-version": c.apiVersion, - "x-ms-date": currentTimeRfc1123Formatted(), - } -} - -func (c Client) getCanonicalizedAccountName() string { - // since we may be trying to access a secondary storage account, we need to - // remove the -secondary part of the storage name - return strings.TrimSuffix(c.accountName, "-secondary") -} - -func (c Client) buildCanonicalizedHeader(headers map[string]string) string { - cm := make(map[string]string) - - for k, v := range headers { - headerName := strings.TrimSpace(strings.ToLower(k)) - match, _ := regexp.MatchString("x-ms-", headerName) - if match { - cm[headerName] = v - } - } - - if len(cm) == 0 { - return "" - } - - keys := make([]string, 0, len(cm)) - for key := range cm { - keys = append(keys, key) - } - - sort.Strings(keys) - - ch := "" - - for i, key := range keys { - if i == len(keys)-1 { - ch += fmt.Sprintf("%s:%s", key, cm[key]) - } else { - ch += fmt.Sprintf("%s:%s\n", key, cm[key]) - } - } - return ch -} - -func (c Client) buildCanonicalizedResourceTable(uri string) (string, error) { - errMsg := "buildCanonicalizedResourceTable error: %s" - u, err := url.Parse(uri) - if err != nil { - return "", fmt.Errorf(errMsg, err.Error()) - } - - cr := "/" + c.getCanonicalizedAccountName() - - if len(u.Path) > 0 { - cr += u.Path - } - - return cr, nil -} - -func (c Client) buildCanonicalizedResource(uri string) (string, error) { - errMsg := "buildCanonicalizedResource error: %s" - u, err := url.Parse(uri) - if err != nil { - return "", fmt.Errorf(errMsg, err.Error()) - } - - cr := "/" + c.getCanonicalizedAccountName() - - if len(u.Path) > 0 { - // Any portion of the CanonicalizedResource string that is derived from - // the resource's URI should be encoded exactly as it is in the URI. - // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx - cr += u.EscapedPath() - } - - params, err := url.ParseQuery(u.RawQuery) - if err != nil { - return "", fmt.Errorf(errMsg, err.Error()) - } - - if len(params) > 0 { - cr += "\n" - keys := make([]string, 0, len(params)) - for key := range params { - keys = append(keys, key) - } - - sort.Strings(keys) - - for i, key := range keys { - if len(params[key]) > 1 { - sort.Strings(params[key]) - } - - if i == len(keys)-1 { - cr += fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")) - } else { - cr += fmt.Sprintf("%s:%s\n", key, strings.Join(params[key], ",")) - } - } - } - - return cr, nil -} - -func (c Client) buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string) string { - contentLength := headers["Content-Length"] - if contentLength == "0" { - contentLength = "" - } - canonicalizedString := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", - verb, - headers["Content-Encoding"], - headers["Content-Language"], - contentLength, - headers["Content-MD5"], - headers["Content-Type"], - headers["Date"], - headers["If-Modified-Since"], - headers["If-Match"], - headers["If-None-Match"], - headers["If-Unmodified-Since"], - headers["Range"], - c.buildCanonicalizedHeader(headers), - canonicalizedResource) - - return canonicalizedString -} - -func (c Client) exec(verb, url string, headers map[string]string, body io.Reader) (*storageResponse, error) { - authHeader, err := c.getAuthorizationHeader(verb, url, headers) - if err != nil { - return nil, err - } - headers["Authorization"] = authHeader - if err != nil { - return nil, err - } - - req, err := http.NewRequest(verb, url, body) - if err != nil { - return nil, errors.New("azure/storage: error creating request: " + err.Error()) - } - - if clstr, ok := headers["Content-Length"]; ok { - // content length header is being signed, but completely ignored by golang. - // instead we have to use the ContentLength property on the request struct - // (see https://golang.org/src/net/http/request.go?s=18140:18370#L536 and - // https://golang.org/src/net/http/transfer.go?s=1739:2467#L49) - req.ContentLength, err = strconv.ParseInt(clstr, 10, 64) - if err != nil { - return nil, err - } - } - for k, v := range headers { - req.Header.Add(k, v) - } - - httpClient := c.HTTPClient - if httpClient == nil { - httpClient = http.DefaultClient - } - resp, err := httpClient.Do(req) - if err != nil { - return nil, err - } - - statusCode := resp.StatusCode - if statusCode >= 400 && statusCode <= 505 { - var respBody []byte - respBody, err = readResponseBody(resp) - if err != nil { - return nil, err - } - - if len(respBody) == 0 { - // no error in response body - err = fmt.Errorf("storage: service returned without a response body (%s)", resp.Status) - } else { - // response contains storage service error object, unmarshal - storageErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, resp.Header.Get("x-ms-request-id")) - if err != nil { // error unmarshaling the error response - err = errIn - } - err = storageErr - } - return &storageResponse{ - statusCode: resp.StatusCode, - headers: resp.Header, - body: ioutil.NopCloser(bytes.NewReader(respBody)), /* restore the body */ - }, err - } - - return &storageResponse{ - statusCode: resp.StatusCode, - headers: resp.Header, - body: resp.Body}, nil -} - -func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader) (*odataResponse, error) { - req, err := http.NewRequest(verb, url, body) - for k, v := range headers { - req.Header.Add(k, v) - } - - httpClient := c.HTTPClient - if httpClient == nil { - httpClient = http.DefaultClient - } - - resp, err := httpClient.Do(req) - if err != nil { - return nil, err - } - - respToRet := &odataResponse{} - respToRet.body = resp.Body - respToRet.statusCode = resp.StatusCode - respToRet.headers = resp.Header - - statusCode := resp.StatusCode - if statusCode >= 400 && statusCode <= 505 { - var respBody []byte - respBody, err = readResponseBody(resp) - if err != nil { - return nil, err - } - - if len(respBody) == 0 { - // no error in response body - err = fmt.Errorf("storage: service returned without a response body (%d)", resp.StatusCode) - return respToRet, err - } - // try unmarshal as odata.error json - err = json.Unmarshal(respBody, &respToRet.odata) - return respToRet, err - } - - return respToRet, nil -} - -func (c Client) createSharedKeyLite(url string, headers map[string]string) (string, error) { - can, err := c.buildCanonicalizedResourceTable(url) - - if err != nil { - return "", err - } - strToSign := headers["x-ms-date"] + "\n" + can - - hmac := c.computeHmac256(strToSign) - return fmt.Sprintf("SharedKeyLite %s:%s", c.accountName, hmac), nil -} - -func (c Client) execTable(verb, url string, headers map[string]string, body io.Reader) (*odataResponse, error) { - var err error - headers["Authorization"], err = c.createSharedKeyLite(url, headers) - if err != nil { - return nil, err - } - - return c.execInternalJSON(verb, url, headers, body) -} - -func readResponseBody(resp *http.Response) ([]byte, error) { - defer resp.Body.Close() - out, err := ioutil.ReadAll(resp.Body) - if err == io.EOF { - err = nil - } - return out, err -} - -func serviceErrFromXML(body []byte, statusCode int, requestID string) (AzureStorageServiceError, error) { - var storageErr AzureStorageServiceError - if err := xml.Unmarshal(body, &storageErr); err != nil { - return storageErr, err - } - storageErr.StatusCode = statusCode - storageErr.RequestID = requestID - return storageErr, nil -} - -func (e AzureStorageServiceError) Error() string { - return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s, QueryParameterName=%s, QueryParameterValue=%s", - e.StatusCode, e.Code, e.Message, e.RequestID, e.QueryParameterName, e.QueryParameterValue) -} - -// checkRespCode returns UnexpectedStatusError if the given response code is not -// one of the allowed status codes; otherwise nil. -func checkRespCode(respCode int, allowed []int) error { - for _, v := range allowed { - if respCode == v { - return nil - } - } - return UnexpectedStatusCodeError{allowed, respCode} -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go deleted file mode 100644 index 2397587c..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go +++ /dev/null @@ -1,352 +0,0 @@ -package storage - -import ( - "encoding/xml" - "fmt" - "net/http" - "net/url" - "strings" -) - -// FileServiceClient contains operations for Microsoft Azure File Service. -type FileServiceClient struct { - client Client -} - -// A Share is an entry in ShareListResponse. -type Share struct { - Name string `xml:"Name"` - Properties ShareProperties `xml:"Properties"` -} - -// ShareProperties contains various properties of a share returned from -// various endpoints like ListShares. -type ShareProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` - Quota string `xml:"Quota"` -} - -// ShareListResponse contains the response fields from -// ListShares call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx -type ShareListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Shares []Share `xml:"Shares>Share"` -} - -// ListSharesParameters defines the set of customizable parameters to make a -// List Shares call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx -type ListSharesParameters struct { - Prefix string - Marker string - Include string - MaxResults uint - Timeout uint -} - -// ShareHeaders contains various properties of a file and is an entry -// in SetShareProperties -type ShareHeaders struct { - Quota string `header:"x-ms-share-quota"` -} - -func (p ListSharesParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != "" { - out.Set("include", p.Include) - } - if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) - } - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) - } - - return out -} - -// pathForFileShare returns the URL path segment for a File Share resource -func pathForFileShare(name string) string { - return fmt.Sprintf("/%s", name) -} - -// ListShares returns the list of shares in a storage account along with -// pagination token and other response details. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -func (f FileServiceClient) ListShares(params ListSharesParameters) (ShareListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) - uri := f.client.getEndpoint(fileServiceName, "", q) - headers := f.client.getStandardHeaders() - - var out ShareListResponse - resp, err := f.client.exec("GET", uri, headers, nil) - if err != nil { - return out, err - } - defer resp.body.Close() - - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// CreateShare operation creates a new share under the specified account. If the -// share with the same name already exists, the operation fails. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx -func (f FileServiceClient) CreateShare(name string) error { - resp, err := f.createShare(name) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// ShareExists returns true if a share with given name exists -// on the storage account, otherwise returns false. -func (f FileServiceClient) ShareExists(name string) (bool, error) { - uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}}) - headers := f.client.getStandardHeaders() - - resp, err := f.client.exec("HEAD", uri, headers, nil) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusOK, nil - } - } - return false, err -} - -// GetShareURL gets the canonical URL to the share with the specified name in the -// specified container. This method does not create a publicly accessible URL if -// the file is private and this method does not check if the file -// exists. -func (f FileServiceClient) GetShareURL(name string) string { - return f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{}) -} - -// CreateShareIfNotExists creates a new share under the specified account if -// it does not exist. Returns true if container is newly created or false if -// container already exists. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx -func (f FileServiceClient) CreateShareIfNotExists(name string) (bool, error) { - resp, err := f.createShare(name) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { - return resp.statusCode == http.StatusCreated, nil - } - } - return false, err -} - -// CreateShare creates a Azure File Share and returns its response -func (f FileServiceClient) createShare(name string) (*storageResponse, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}}) - headers := f.client.getStandardHeaders() - return f.client.exec("PUT", uri, headers, nil) -} - -// GetShareProperties provides various information about the specified -// file. See https://msdn.microsoft.com/en-us/library/azure/dn689099.aspx -func (f FileServiceClient) GetShareProperties(name string) (*ShareProperties, error) { - uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}}) - - headers := f.client.getStandardHeaders() - resp, err := f.client.exec("HEAD", uri, headers, nil) - if err != nil { - return nil, err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } - - return &ShareProperties{ - LastModified: resp.headers.Get("Last-Modified"), - Etag: resp.headers.Get("Etag"), - Quota: resp.headers.Get("x-ms-share-quota"), - }, nil -} - -// SetShareProperties replaces the ShareHeaders for the specified file. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by SetShareProperties. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://msdn.microsoft.com/en-us/library/azure/mt427368.aspx -func (f FileServiceClient) SetShareProperties(name string, shareHeaders ShareHeaders) error { - params := url.Values{} - params.Set("restype", "share") - params.Set("comp", "properties") - - uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), params) - headers := f.client.getStandardHeaders() - - extraHeaders := headersFromStruct(shareHeaders) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := f.client.exec("PUT", uri, headers, nil) - if err != nil { - return err - } - defer resp.body.Close() - - return checkRespCode(resp.statusCode, []int{http.StatusOK}) -} - -// DeleteShare operation marks the specified share for deletion. The share -// and any files contained within it are later deleted during garbage -// collection. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx -func (f FileServiceClient) DeleteShare(name string) error { - resp, err := f.deleteShare(name) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) -} - -// DeleteShareIfExists operation marks the specified share for deletion if it -// exists. The share and any files contained within it are later deleted during -// garbage collection. Returns true if share existed and deleted with this call, -// false otherwise. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx -func (f FileServiceClient) DeleteShareIfExists(name string) (bool, error) { - resp, err := f.deleteShare(name) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil - } - } - return false, err -} - -// deleteShare makes the call to Delete Share operation endpoint and returns -// the response -func (f FileServiceClient) deleteShare(name string) (*storageResponse, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err - } - uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}}) - return f.client.exec("DELETE", uri, f.client.getStandardHeaders(), nil) -} - -// SetShareMetadata replaces the metadata for the specified Share. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetShareMetadata. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (f FileServiceClient) SetShareMetadata(name string, metadata map[string]string, extraHeaders map[string]string) error { - params := url.Values{} - params.Set("restype", "share") - params.Set("comp", "metadata") - - uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), params) - headers := f.client.getStandardHeaders() - for k, v := range metadata { - headers[userDefinedMetadataHeaderPrefix+k] = v - } - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := f.client.exec("PUT", uri, headers, nil) - if err != nil { - return err - } - defer resp.body.Close() - - return checkRespCode(resp.statusCode, []int{http.StatusOK}) -} - -// GetShareMetadata returns all user-defined metadata for the specified share. -// -// All metadata keys will be returned in lower case. (HTTP header -// names are case-insensitive.) -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (f FileServiceClient) GetShareMetadata(name string) (map[string]string, error) { - params := url.Values{} - params.Set("restype", "share") - params.Set("comp", "metadata") - - uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), params) - headers := f.client.getStandardHeaders() - - resp, err := f.client.exec("GET", uri, headers, nil) - if err != nil { - return nil, err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } - - metadata := make(map[string]string) - for k, v := range resp.headers { - // Can't trust CanonicalHeaderKey() to munge case - // reliably. "_" is allowed in identifiers: - // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx - // https://msdn.microsoft.com/library/aa664670(VS.71).aspx - // http://tools.ietf.org/html/rfc7230#section-3.2 - // ...but "_" is considered invalid by - // CanonicalMIMEHeaderKey in - // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 - // so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar". - k = strings.ToLower(k) - if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { - continue - } - // metadata["foo"] = content of the last X-Ms-Meta-Foo header - k = k[len(userDefinedMetadataHeaderPrefix):] - metadata[k] = v[len(v)-1] - } - return metadata, nil -} - -//checkForStorageEmulator determines if the client is setup for use with -//Azure Storage Emulator, and returns a relevant error -func (f FileServiceClient) checkForStorageEmulator() error { - if f.client.accountName == StorageEmulatorAccountName { - return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator") - } - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go deleted file mode 100644 index 3ecf4aca..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go +++ /dev/null @@ -1,306 +0,0 @@ -package storage - -import ( - "encoding/xml" - "fmt" - "net/http" - "net/url" - "strconv" - "strings" -) - -const ( - // casing is per Golang's http.Header canonicalizing the header names. - approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count" - userDefinedMetadataHeaderPrefix = "X-Ms-Meta-" -) - -// QueueServiceClient contains operations for Microsoft Azure Queue Storage -// Service. -type QueueServiceClient struct { - client Client -} - -func pathForQueue(queue string) string { return fmt.Sprintf("/%s", queue) } -func pathForQueueMessages(queue string) string { return fmt.Sprintf("/%s/messages", queue) } -func pathForMessage(queue, name string) string { return fmt.Sprintf("/%s/messages/%s", queue, name) } - -type putMessageRequest struct { - XMLName xml.Name `xml:"QueueMessage"` - MessageText string `xml:"MessageText"` -} - -// PutMessageParameters is the set of options can be specified for Put Messsage -// operation. A zero struct does not use any preferences for the request. -type PutMessageParameters struct { - VisibilityTimeout int - MessageTTL int -} - -func (p PutMessageParameters) getParameters() url.Values { - out := url.Values{} - if p.VisibilityTimeout != 0 { - out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) - } - if p.MessageTTL != 0 { - out.Set("messagettl", strconv.Itoa(p.MessageTTL)) - } - return out -} - -// GetMessagesParameters is the set of options can be specified for Get -// Messsages operation. A zero struct does not use any preferences for the -// request. -type GetMessagesParameters struct { - NumOfMessages int - VisibilityTimeout int -} - -func (p GetMessagesParameters) getParameters() url.Values { - out := url.Values{} - if p.NumOfMessages != 0 { - out.Set("numofmessages", strconv.Itoa(p.NumOfMessages)) - } - if p.VisibilityTimeout != 0 { - out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) - } - return out -} - -// PeekMessagesParameters is the set of options can be specified for Peek -// Messsage operation. A zero struct does not use any preferences for the -// request. -type PeekMessagesParameters struct { - NumOfMessages int -} - -func (p PeekMessagesParameters) getParameters() url.Values { - out := url.Values{"peekonly": {"true"}} // Required for peek operation - if p.NumOfMessages != 0 { - out.Set("numofmessages", strconv.Itoa(p.NumOfMessages)) - } - return out -} - -// GetMessagesResponse represents a response returned from Get Messages -// operation. -type GetMessagesResponse struct { - XMLName xml.Name `xml:"QueueMessagesList"` - QueueMessagesList []GetMessageResponse `xml:"QueueMessage"` -} - -// GetMessageResponse represents a QueueMessage object returned from Get -// Messages operation response. -type GetMessageResponse struct { - MessageID string `xml:"MessageId"` - InsertionTime string `xml:"InsertionTime"` - ExpirationTime string `xml:"ExpirationTime"` - PopReceipt string `xml:"PopReceipt"` - TimeNextVisible string `xml:"TimeNextVisible"` - DequeueCount int `xml:"DequeueCount"` - MessageText string `xml:"MessageText"` -} - -// PeekMessagesResponse represents a response returned from Get Messages -// operation. -type PeekMessagesResponse struct { - XMLName xml.Name `xml:"QueueMessagesList"` - QueueMessagesList []PeekMessageResponse `xml:"QueueMessage"` -} - -// PeekMessageResponse represents a QueueMessage object returned from Peek -// Messages operation response. -type PeekMessageResponse struct { - MessageID string `xml:"MessageId"` - InsertionTime string `xml:"InsertionTime"` - ExpirationTime string `xml:"ExpirationTime"` - DequeueCount int `xml:"DequeueCount"` - MessageText string `xml:"MessageText"` -} - -// QueueMetadataResponse represents user defined metadata and queue -// properties on a specific queue. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx -type QueueMetadataResponse struct { - ApproximateMessageCount int - UserDefinedMetadata map[string]string -} - -// SetMetadata operation sets user-defined metadata on the specified queue. -// Metadata is associated with the queue as name-value pairs. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179348.aspx -func (c QueueServiceClient) SetMetadata(name string, metadata map[string]string) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) - headers := c.client.getStandardHeaders() - for k, v := range metadata { - headers[userDefinedMetadataHeaderPrefix+k] = v - } - - resp, err := c.client.exec("PUT", uri, headers, nil) - if err != nil { - return err - } - defer resp.body.Close() - - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) -} - -// GetMetadata operation retrieves user-defined metadata and queue -// properties on the specified queue. Metadata is associated with -// the queue as name-values pairs. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx -// -// Because the way Golang's http client (and http.Header in particular) -// canonicalize header names, the returned metadata names would always -// be all lower case. -func (c QueueServiceClient) GetMetadata(name string) (QueueMetadataResponse, error) { - qm := QueueMetadataResponse{} - qm.UserDefinedMetadata = make(map[string]string) - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) - headers := c.client.getStandardHeaders() - resp, err := c.client.exec("GET", uri, headers, nil) - if err != nil { - return qm, err - } - defer resp.body.Close() - - for k, v := range resp.headers { - if len(v) != 1 { - return qm, fmt.Errorf("Unexpected number of values (%d) in response header '%s'", len(v), k) - } - - value := v[0] - - if k == approximateMessagesCountHeader { - qm.ApproximateMessageCount, err = strconv.Atoi(value) - if err != nil { - return qm, fmt.Errorf("Unexpected value in response header '%s': '%s' ", k, value) - } - } else if strings.HasPrefix(k, userDefinedMetadataHeaderPrefix) { - name := strings.TrimPrefix(k, userDefinedMetadataHeaderPrefix) - qm.UserDefinedMetadata[strings.ToLower(name)] = value - } - } - - return qm, checkRespCode(resp.statusCode, []int{http.StatusOK}) -} - -// CreateQueue operation creates a queue under the given account. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179342.aspx -func (c QueueServiceClient) CreateQueue(name string) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) - headers := c.client.getStandardHeaders() - resp, err := c.client.exec("PUT", uri, headers, nil) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// DeleteQueue operation permanently deletes the specified queue. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179436.aspx -func (c QueueServiceClient) DeleteQueue(name string) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) - resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) -} - -// QueueExists returns true if a queue with given name exists. -func (c QueueServiceClient) QueueExists(name string) (bool, error) { - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": {"metadata"}}) - resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) - if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) { - return resp.statusCode == http.StatusOK, nil - } - - return false, err -} - -// PutMessage operation adds a new message to the back of the message queue. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179346.aspx -func (c QueueServiceClient) PutMessage(queue string, message string, params PutMessageParameters) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) - req := putMessageRequest{MessageText: message} - body, nn, err := xmlMarshal(req) - if err != nil { - return err - } - headers := c.client.getStandardHeaders() - headers["Content-Length"] = strconv.Itoa(nn) - resp, err := c.client.exec("POST", uri, headers, body) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// ClearMessages operation deletes all messages from the specified queue. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179454.aspx -func (c QueueServiceClient) ClearMessages(queue string) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), url.Values{}) - resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) -} - -// GetMessages operation retrieves one or more messages from the front of the -// queue. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179474.aspx -func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParameters) (GetMessagesResponse, error) { - var r GetMessagesResponse - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) - resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) - if err != nil { - return r, err - } - defer resp.body.Close() - err = xmlUnmarshal(resp.body, &r) - return r, err -} - -// PeekMessages retrieves one or more messages from the front of the queue, but -// does not alter the visibility of the message. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179472.aspx -func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParameters) (PeekMessagesResponse, error) { - var r PeekMessagesResponse - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) - resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) - if err != nil { - return r, err - } - defer resp.body.Close() - err = xmlUnmarshal(resp.body, &r) - return r, err -} - -// DeleteMessage operation deletes the specified message. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx -func (c QueueServiceClient) DeleteMessage(queue, messageID, popReceipt string) error { - uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), url.Values{ - "popreceipt": {popReceipt}}) - resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go deleted file mode 100644 index 39e99750..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go +++ /dev/null @@ -1,129 +0,0 @@ -package storage - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "net/url" -) - -// TableServiceClient contains operations for Microsoft Azure Table Storage -// Service. -type TableServiceClient struct { - client Client -} - -// AzureTable is the typedef of the Azure Table name -type AzureTable string - -const ( - tablesURIPath = "/Tables" -) - -type createTableRequest struct { - TableName string `json:"TableName"` -} - -func pathForTable(table AzureTable) string { return fmt.Sprintf("%s", table) } - -func (c *TableServiceClient) getStandardHeaders() map[string]string { - return map[string]string{ - "x-ms-version": "2015-02-21", - "x-ms-date": currentTimeRfc1123Formatted(), - "Accept": "application/json;odata=nometadata", - "Accept-Charset": "UTF-8", - "Content-Type": "application/json", - } -} - -// QueryTables returns the tables created in the -// *TableServiceClient storage account. -func (c *TableServiceClient) QueryTables() ([]AzureTable, error) { - uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) - - headers := c.getStandardHeaders() - headers["Content-Length"] = "0" - - resp, err := c.client.execTable("GET", uri, headers, nil) - if err != nil { - return nil, err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } - - buf := new(bytes.Buffer) - buf.ReadFrom(resp.body) - - var respArray queryTablesResponse - if err := json.Unmarshal(buf.Bytes(), &respArray); err != nil { - return nil, err - } - - s := make([]AzureTable, len(respArray.TableName)) - for i, elem := range respArray.TableName { - s[i] = AzureTable(elem.TableName) - } - - return s, nil -} - -// CreateTable creates the table given the specific -// name. This function fails if the name is not compliant -// with the specification or the tables already exists. -func (c *TableServiceClient) CreateTable(table AzureTable) error { - uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) - - headers := c.getStandardHeaders() - - req := createTableRequest{TableName: string(table)} - buf := new(bytes.Buffer) - - if err := json.NewEncoder(buf).Encode(req); err != nil { - return err - } - - headers["Content-Length"] = fmt.Sprintf("%d", buf.Len()) - - resp, err := c.client.execTable("POST", uri, headers, buf) - - if err != nil { - return err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { - return err - } - - return nil -} - -// DeleteTable deletes the table given the specific -// name. This function fails if the table is not present. -// Be advised: DeleteTable deletes all the entries -// that may be present. -func (c *TableServiceClient) DeleteTable(table AzureTable) error { - uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) - uri += fmt.Sprintf("('%s')", string(table)) - - headers := c.getStandardHeaders() - - headers["Content-Length"] = "0" - - resp, err := c.client.execTable("DELETE", uri, headers, nil) - - if err != nil { - return err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - - } - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go deleted file mode 100644 index 1b5919cd..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go +++ /dev/null @@ -1,355 +0,0 @@ -package storage - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "reflect" -) - -const ( - partitionKeyNode = "PartitionKey" - rowKeyNode = "RowKey" - tag = "table" - tagIgnore = "-" - continuationTokenPartitionKeyHeader = "X-Ms-Continuation-Nextpartitionkey" - continuationTokenRowHeader = "X-Ms-Continuation-Nextrowkey" - maxTopParameter = 1000 -) - -type queryTablesResponse struct { - TableName []struct { - TableName string `json:"TableName"` - } `json:"value"` -} - -const ( - tableOperationTypeInsert = iota - tableOperationTypeUpdate = iota - tableOperationTypeMerge = iota - tableOperationTypeInsertOrReplace = iota - tableOperationTypeInsertOrMerge = iota -) - -type tableOperation int - -// TableEntity interface specifies -// the functions needed to support -// marshaling and unmarshaling into -// Azure Tables. The struct must only contain -// simple types because Azure Tables do not -// support hierarchy. -type TableEntity interface { - PartitionKey() string - RowKey() string - SetPartitionKey(string) error - SetRowKey(string) error -} - -// ContinuationToken is an opaque (ie not useful to inspect) -// struct that Get... methods can return if there are more -// entries to be returned than the ones already -// returned. Just pass it to the same function to continue -// receiving the remaining entries. -type ContinuationToken struct { - NextPartitionKey string - NextRowKey string -} - -type getTableEntriesResponse struct { - Elements []map[string]interface{} `json:"value"` -} - -// QueryTableEntities queries the specified table and returns the unmarshaled -// entities of type retType. -// top parameter limits the returned entries up to top. Maximum top -// allowed by Azure API is 1000. In case there are more than top entries to be -// returned the function will return a non nil *ContinuationToken. You can call the -// same function again passing the received ContinuationToken as previousContToken -// parameter in order to get the following entries. The query parameter -// is the odata query. To retrieve all the entries pass the empty string. -// The function returns a pointer to a TableEntity slice, the *ContinuationToken -// if there are more entries to be returned and an error in case something went -// wrong. -// -// Example: -// entities, cToken, err = tSvc.QueryTableEntities("table", cToken, reflect.TypeOf(entity), 20, "") -func (c *TableServiceClient) QueryTableEntities(tableName AzureTable, previousContToken *ContinuationToken, retType reflect.Type, top int, query string) ([]TableEntity, *ContinuationToken, error) { - if top > maxTopParameter { - return nil, nil, fmt.Errorf("top accepts at maximum %d elements. Requested %d instead", maxTopParameter, top) - } - - uri := c.client.getEndpoint(tableServiceName, pathForTable(tableName), url.Values{}) - uri += fmt.Sprintf("?$top=%d", top) - if query != "" { - uri += fmt.Sprintf("&$filter=%s", url.QueryEscape(query)) - } - - if previousContToken != nil { - uri += fmt.Sprintf("&NextPartitionKey=%s&NextRowKey=%s", previousContToken.NextPartitionKey, previousContToken.NextRowKey) - } - - headers := c.getStandardHeaders() - - headers["Content-Length"] = "0" - - resp, err := c.client.execTable("GET", uri, headers, nil) - - if err != nil { - return nil, nil, err - } - - contToken := extractContinuationTokenFromHeaders(resp.headers) - - if err != nil { - return nil, contToken, err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, contToken, err - } - - retEntries, err := deserializeEntity(retType, resp.body) - if err != nil { - return nil, contToken, err - } - - return retEntries, contToken, nil -} - -// InsertEntity inserts an entity in the specified table. -// The function fails if there is an entity with the same -// PartitionKey and RowKey in the table. -func (c *TableServiceClient) InsertEntity(table AzureTable, entity TableEntity) error { - var err error - - if sc, err := c.execTable(table, entity, false, "POST"); err != nil { - return checkRespCode(sc, []int{http.StatusCreated}) - } - - return err -} - -func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, specifyKeysInURL bool, method string) (int, error) { - uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{}) - if specifyKeysInURL { - uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey())) - } - - headers := c.getStandardHeaders() - - var buf bytes.Buffer - - if err := injectPartitionAndRowKeys(entity, &buf); err != nil { - return 0, err - } - - headers["Content-Length"] = fmt.Sprintf("%d", buf.Len()) - - var err error - var resp *odataResponse - - resp, err = c.client.execTable(method, uri, headers, &buf) - - if err != nil { - return 0, err - } - - defer resp.body.Close() - - return resp.statusCode, nil -} - -// UpdateEntity updates the contents of an entity with the -// one passed as parameter. The function fails if there is no entity -// with the same PartitionKey and RowKey in the table. -func (c *TableServiceClient) UpdateEntity(table AzureTable, entity TableEntity) error { - var err error - - if sc, err := c.execTable(table, entity, true, "PUT"); err != nil { - return checkRespCode(sc, []int{http.StatusNoContent}) - } - return err -} - -// MergeEntity merges the contents of an entity with the -// one passed as parameter. -// The function fails if there is no entity -// with the same PartitionKey and RowKey in the table. -func (c *TableServiceClient) MergeEntity(table AzureTable, entity TableEntity) error { - var err error - - if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil { - return checkRespCode(sc, []int{http.StatusNoContent}) - } - return err -} - -// DeleteEntityWithoutCheck deletes the entity matching by -// PartitionKey and RowKey. There is no check on IfMatch -// parameter so the entity is always deleted. -// The function fails if there is no entity -// with the same PartitionKey and RowKey in the table. -func (c *TableServiceClient) DeleteEntityWithoutCheck(table AzureTable, entity TableEntity) error { - return c.DeleteEntity(table, entity, "*") -} - -// DeleteEntity deletes the entity matching by -// PartitionKey, RowKey and ifMatch field. -// The function fails if there is no entity -// with the same PartitionKey and RowKey in the table or -// the ifMatch is different. -func (c *TableServiceClient) DeleteEntity(table AzureTable, entity TableEntity, ifMatch string) error { - uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{}) - uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey())) - - headers := c.getStandardHeaders() - - headers["Content-Length"] = "0" - headers["If-Match"] = ifMatch - - resp, err := c.client.execTable("DELETE", uri, headers, nil) - - if err != nil { - return err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - } - - return nil -} - -// InsertOrReplaceEntity inserts an entity in the specified table -// or replaced the existing one. -func (c *TableServiceClient) InsertOrReplaceEntity(table AzureTable, entity TableEntity) error { - var err error - - if sc, err := c.execTable(table, entity, true, "PUT"); err != nil { - return checkRespCode(sc, []int{http.StatusNoContent}) - } - return err -} - -// InsertOrMergeEntity inserts an entity in the specified table -// or merges the existing one. -func (c *TableServiceClient) InsertOrMergeEntity(table AzureTable, entity TableEntity) error { - var err error - - if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil { - return checkRespCode(sc, []int{http.StatusNoContent}) - } - return err -} - -func injectPartitionAndRowKeys(entity TableEntity, buf *bytes.Buffer) error { - if err := json.NewEncoder(buf).Encode(entity); err != nil { - return err - } - - dec := make(map[string]interface{}) - if err := json.NewDecoder(buf).Decode(&dec); err != nil { - return err - } - - // Inject PartitionKey and RowKey - dec[partitionKeyNode] = entity.PartitionKey() - dec[rowKeyNode] = entity.RowKey() - - // Remove tagged fields - // The tag is defined in the const section - // This is useful to avoid storing the PartitionKey and RowKey twice. - numFields := reflect.ValueOf(entity).Elem().NumField() - for i := 0; i < numFields; i++ { - f := reflect.ValueOf(entity).Elem().Type().Field(i) - - if f.Tag.Get(tag) == tagIgnore { - // we must look for its JSON name in the dictionary - // as the user can rename it using a tag - jsonName := f.Name - if f.Tag.Get("json") != "" { - jsonName = f.Tag.Get("json") - } - delete(dec, jsonName) - } - } - - buf.Reset() - - if err := json.NewEncoder(buf).Encode(&dec); err != nil { - return err - } - - return nil -} - -func deserializeEntity(retType reflect.Type, reader io.Reader) ([]TableEntity, error) { - buf := new(bytes.Buffer) - - var ret getTableEntriesResponse - if err := json.NewDecoder(reader).Decode(&ret); err != nil { - return nil, err - } - - tEntries := make([]TableEntity, len(ret.Elements)) - - for i, entry := range ret.Elements { - - buf.Reset() - if err := json.NewEncoder(buf).Encode(entry); err != nil { - return nil, err - } - - dec := make(map[string]interface{}) - if err := json.NewDecoder(buf).Decode(&dec); err != nil { - return nil, err - } - - var pKey, rKey string - // strip pk and rk - for key, val := range dec { - switch key { - case partitionKeyNode: - pKey = val.(string) - case rowKeyNode: - rKey = val.(string) - } - } - - delete(dec, partitionKeyNode) - delete(dec, rowKeyNode) - - buf.Reset() - if err := json.NewEncoder(buf).Encode(dec); err != nil { - return nil, err - } - - // Create a empty retType instance - tEntries[i] = reflect.New(retType.Elem()).Interface().(TableEntity) - // Popolate it with the values - if err := json.NewDecoder(buf).Decode(&tEntries[i]); err != nil { - return nil, err - } - - // Reset PartitionKey and RowKey - tEntries[i].SetPartitionKey(pKey) - tEntries[i].SetRowKey(rKey) - } - - return tEntries, nil -} - -func extractContinuationTokenFromHeaders(h http.Header) *ContinuationToken { - ct := ContinuationToken{h.Get(continuationTokenPartitionKeyHeader), h.Get(continuationTokenRowHeader)} - - if ct.NextPartitionKey != "" && ct.NextRowKey != "" { - return &ct - } - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go deleted file mode 100644 index d71c6ce5..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go +++ /dev/null @@ -1,85 +0,0 @@ -package storage - -import ( - "bytes" - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "reflect" - "time" -) - -func (c Client) computeHmac256(message string) string { - h := hmac.New(sha256.New, c.accountKey) - h.Write([]byte(message)) - return base64.StdEncoding.EncodeToString(h.Sum(nil)) -} - -func currentTimeRfc1123Formatted() string { - return timeRfc1123Formatted(time.Now().UTC()) -} - -func timeRfc1123Formatted(t time.Time) string { - return t.Format(http.TimeFormat) -} - -func mergeParams(v1, v2 url.Values) url.Values { - out := url.Values{} - for k, v := range v1 { - out[k] = v - } - for k, v := range v2 { - vals, ok := out[k] - if ok { - vals = append(vals, v...) - out[k] = vals - } else { - out[k] = v - } - } - return out -} - -func prepareBlockListRequest(blocks []Block) string { - s := `` - for _, v := range blocks { - s += fmt.Sprintf("<%s>%s", v.Status, v.ID, v.Status) - } - s += `` - return s -} - -func xmlUnmarshal(body io.Reader, v interface{}) error { - data, err := ioutil.ReadAll(body) - if err != nil { - return err - } - return xml.Unmarshal(data, v) -} - -func xmlMarshal(v interface{}) (io.Reader, int, error) { - b, err := xml.Marshal(v) - if err != nil { - return nil, 0, err - } - return bytes.NewReader(b), len(b), nil -} - -func headersFromStruct(v interface{}) map[string]string { - headers := make(map[string]string) - value := reflect.ValueOf(v) - for i := 0; i < value.NumField(); i++ { - key := value.Type().Field(i).Tag.Get("header") - val := value.Field(i).String() - if val != "" { - headers[key] = val - } - } - return headers -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/.gitignore b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/.gitignore deleted file mode 100644 index 66be63a0..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -logrus diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/.travis.yml deleted file mode 100644 index 2d8c0866..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: - - 1.2 - - 1.3 - - 1.4 - - tip -install: - - go get -t ./... diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/CHANGELOG.md deleted file mode 100644 index eb72bff9..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/CHANGELOG.md +++ /dev/null @@ -1,7 +0,0 @@ -# 0.7.3 - -formatter/\*: allow configuration of timestamp layout - -# 0.7.2 - -formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/LICENSE deleted file mode 100644 index f090cb42..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/README.md deleted file mode 100644 index d55f9092..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/README.md +++ /dev/null @@ -1,349 +0,0 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc] - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not -yet stable (pre 1.0). Logrus itself is completely stable and has been used in -many large deployments. The core API is unlikely to change much but please -version control your Logrus to make sure you aren't fetching latest `master` on -every build.** - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not -attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: - -```text -time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 -time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 -time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true -time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 -time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 -time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -exit status 1 -``` - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/Sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) - - // Output to stderr instead of stdout, could also be a file. - log.SetOutput(os.Stderr) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") - - // A common pattern is to re-use fields between logging statements by re-using - // the logrus.Entry returned from WithFields() - contextLogger := log.WithFields(log.Fields{ - "common": "this is a common field", - "other": "I also should be logged always", - }) - - contextLogger.Info("I'll be logged with common and other field") - contextLogger.Info("Me too") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "github.com/Sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stderr - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging though logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in -`init`: - -```go -import ( - log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" - "github.com/Sirupsen/logrus/hooks/syslog" - "log/syslog" -) - -func init() { - log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) - - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - if err != nil { - log.Error("Unable to connect to local syslog daemon") - } else { - log.AddHook(hook) - } -} -``` - - -| Hook | Description | -| ----- | ----------- | -| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. | -| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | -| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | -| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | -| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) | - -#### Level logging - -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/Sirupsen/logrus" -) - -init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(logrus.JSONFormatter) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(logrus.TextFormatter) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true` -* `logrus.JSONFormatter`. Logs fields as JSON. -* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net). - - ```go - logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"}) - ``` - -Third party logging formatters: - -* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Logger as an `io.Writer` - -Logrus can be transormed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - - -[godoc]: https://godoc.org/github.com/Sirupsen/logrus diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/entry.go deleted file mode 100644 index 17fe6f70..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/entry.go +++ /dev/null @@ -1,252 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "io" - "os" - "time" -) - -// An entry is the final or intermediate Logrus logging entry. It contains all -// the fields passed with WithField{,s}. It's finally logged when Debug, Info, -// Warn, Error, Fatal or Panic is called on it. These objects can be reused and -// passed around as much as you wish to avoid field duplication. -type Entry struct { - Logger *Logger - - // Contains all the fields set by the user. - Data Fields - - // Time at which the log entry was created - Time time.Time - - // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic - Level Level - - // Message passed to Debug, Info, Warn, Error, Fatal or Panic - Message string -} - -func NewEntry(logger *Logger) *Entry { - return &Entry{ - Logger: logger, - // Default is three fields, give a little extra room - Data: make(Fields, 5), - } -} - -// Returns a reader for the entry, which is a proxy to the formatter. -func (entry *Entry) Reader() (*bytes.Buffer, error) { - serialized, err := entry.Logger.Formatter.Format(entry) - return bytes.NewBuffer(serialized), err -} - -// Returns the string representation from the reader and ultimately the -// formatter. -func (entry *Entry) String() (string, error) { - reader, err := entry.Reader() - if err != nil { - return "", err - } - - return reader.String(), err -} - -// Add a single field to the Entry. -func (entry *Entry) WithField(key string, value interface{}) *Entry { - return entry.WithFields(Fields{key: value}) -} - -// Add a map of fields to the Entry. -func (entry *Entry) WithFields(fields Fields) *Entry { - data := Fields{} - for k, v := range entry.Data { - data[k] = v - } - for k, v := range fields { - data[k] = v - } - return &Entry{Logger: entry.Logger, Data: data} -} - -func (entry *Entry) log(level Level, msg string) { - entry.Time = time.Now() - entry.Level = level - entry.Message = msg - - if err := entry.Logger.Hooks.Fire(level, entry); err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) - entry.Logger.mu.Unlock() - } - - reader, err := entry.Reader() - if err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - entry.Logger.mu.Unlock() - } - - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - - _, err = io.Copy(entry.Logger.Out, reader) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } - - // To avoid Entry#log() returning a value that only would make sense for - // panic() to use in Entry#Panic(), we avoid the allocation by checking - // directly here. - if level <= PanicLevel { - panic(entry) - } -} - -func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.log(DebugLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Print(args ...interface{}) { - entry.Info(args...) -} - -func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.log(InfoLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.log(WarnLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warning(args ...interface{}) { - entry.Warn(args...) -} - -func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.log(ErrorLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.log(FatalLevel, fmt.Sprint(args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.log(PanicLevel, fmt.Sprint(args...)) - } - panic(fmt.Sprint(args...)) -} - -// Entry Printf family functions - -func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Printf(format string, args ...interface{}) { - entry.Infof(format, args...) -} - -func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Warningf(format string, args ...interface{}) { - entry.Warnf(format, args...) -} - -func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(fmt.Sprintf(format, args...)) - } -} - -// Entry Println family functions - -func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Println(args ...interface{}) { - entry.Infoln(args...) -} - -func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Warningln(args ...interface{}) { - entry.Warnln(args...) -} - -func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(entry.sprintlnn(args...)) - } -} - -// Sprintlnn => Sprint no newline. This is to get the behavior of how -// fmt.Sprintln where spaces are always added between operands, regardless of -// their type. Instead of vendoring the Sprintln implementation to spare a -// string allocation, we do the simplest thing. -func (entry *Entry) sprintlnn(args ...interface{}) string { - msg := fmt.Sprintln(args...) - return msg[:len(msg)-1] -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/exported.go deleted file mode 100644 index a67e1b80..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/exported.go +++ /dev/null @@ -1,188 +0,0 @@ -package logrus - -import ( - "io" -) - -var ( - // std is the name of the standard logger in stdlib `log` - std = New() -) - -func StandardLogger() *Logger { - return std -} - -// SetOutput sets the standard logger output. -func SetOutput(out io.Writer) { - std.mu.Lock() - defer std.mu.Unlock() - std.Out = out -} - -// SetFormatter sets the standard logger formatter. -func SetFormatter(formatter Formatter) { - std.mu.Lock() - defer std.mu.Unlock() - std.Formatter = formatter -} - -// SetLevel sets the standard logger level. -func SetLevel(level Level) { - std.mu.Lock() - defer std.mu.Unlock() - std.Level = level -} - -// GetLevel returns the standard logger level. -func GetLevel() Level { - std.mu.Lock() - defer std.mu.Unlock() - return std.Level -} - -// AddHook adds a hook to the standard logger hooks. -func AddHook(hook Hook) { - std.mu.Lock() - defer std.mu.Unlock() - std.Hooks.Add(hook) -} - -// WithField creates an entry from the standard logger and adds a field to -// it. If you want multiple fields, use `WithFields`. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithField(key string, value interface{}) *Entry { - return std.WithField(key, value) -} - -// WithFields creates an entry from the standard logger and adds multiple -// fields to it. This is simply a helper for `WithField`, invoking it -// once for each field. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithFields(fields Fields) *Entry { - return std.WithFields(fields) -} - -// Debug logs a message at level Debug on the standard logger. -func Debug(args ...interface{}) { - std.Debug(args...) -} - -// Print logs a message at level Info on the standard logger. -func Print(args ...interface{}) { - std.Print(args...) -} - -// Info logs a message at level Info on the standard logger. -func Info(args ...interface{}) { - std.Info(args...) -} - -// Warn logs a message at level Warn on the standard logger. -func Warn(args ...interface{}) { - std.Warn(args...) -} - -// Warning logs a message at level Warn on the standard logger. -func Warning(args ...interface{}) { - std.Warning(args...) -} - -// Error logs a message at level Error on the standard logger. -func Error(args ...interface{}) { - std.Error(args...) -} - -// Panic logs a message at level Panic on the standard logger. -func Panic(args ...interface{}) { - std.Panic(args...) -} - -// Fatal logs a message at level Fatal on the standard logger. -func Fatal(args ...interface{}) { - std.Fatal(args...) -} - -// Debugf logs a message at level Debug on the standard logger. -func Debugf(format string, args ...interface{}) { - std.Debugf(format, args...) -} - -// Printf logs a message at level Info on the standard logger. -func Printf(format string, args ...interface{}) { - std.Printf(format, args...) -} - -// Infof logs a message at level Info on the standard logger. -func Infof(format string, args ...interface{}) { - std.Infof(format, args...) -} - -// Warnf logs a message at level Warn on the standard logger. -func Warnf(format string, args ...interface{}) { - std.Warnf(format, args...) -} - -// Warningf logs a message at level Warn on the standard logger. -func Warningf(format string, args ...interface{}) { - std.Warningf(format, args...) -} - -// Errorf logs a message at level Error on the standard logger. -func Errorf(format string, args ...interface{}) { - std.Errorf(format, args...) -} - -// Panicf logs a message at level Panic on the standard logger. -func Panicf(format string, args ...interface{}) { - std.Panicf(format, args...) -} - -// Fatalf logs a message at level Fatal on the standard logger. -func Fatalf(format string, args ...interface{}) { - std.Fatalf(format, args...) -} - -// Debugln logs a message at level Debug on the standard logger. -func Debugln(args ...interface{}) { - std.Debugln(args...) -} - -// Println logs a message at level Info on the standard logger. -func Println(args ...interface{}) { - std.Println(args...) -} - -// Infoln logs a message at level Info on the standard logger. -func Infoln(args ...interface{}) { - std.Infoln(args...) -} - -// Warnln logs a message at level Warn on the standard logger. -func Warnln(args ...interface{}) { - std.Warnln(args...) -} - -// Warningln logs a message at level Warn on the standard logger. -func Warningln(args ...interface{}) { - std.Warningln(args...) -} - -// Errorln logs a message at level Error on the standard logger. -func Errorln(args ...interface{}) { - std.Errorln(args...) -} - -// Panicln logs a message at level Panic on the standard logger. -func Panicln(args ...interface{}) { - std.Panicln(args...) -} - -// Fatalln logs a message at level Fatal on the standard logger. -func Fatalln(args ...interface{}) { - std.Fatalln(args...) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/formatter.go deleted file mode 100644 index 104d689f..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/formatter.go +++ /dev/null @@ -1,48 +0,0 @@ -package logrus - -import "time" - -const DefaultTimestampFormat = time.RFC3339 - -// The Formatter interface is used to implement a custom Formatter. It takes an -// `Entry`. It exposes all the fields, including the default ones: -// -// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. -// * `entry.Data["time"]`. The timestamp. -// * `entry.Data["level"]. The level the entry was logged at. -// -// Any additional fields added with `WithField` or `WithFields` are also in -// `entry.Data`. Format is expected to return an array of bytes which are then -// logged to `logger.Out`. -type Formatter interface { - Format(*Entry) ([]byte, error) -} - -// This is to not silently overwrite `time`, `msg` and `level` fields when -// dumping it. If this code wasn't there doing: -// -// logrus.WithField("level", 1).Info("hello") -// -// Would just silently drop the user provided level. Instead with this code -// it'll logged as: -// -// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} -// -// It's not exported because it's still using Data in an opinionated way. It's to -// avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields) { - _, ok := data["time"] - if ok { - data["fields.time"] = data["time"] - } - - _, ok = data["msg"] - if ok { - data["fields.msg"] = data["msg"] - } - - _, ok = data["level"] - if ok { - data["fields.level"] = data["level"] - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/formatters/logstash/logstash.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/formatters/logstash/logstash.go deleted file mode 100644 index 8ea93ddf..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/formatters/logstash/logstash.go +++ /dev/null @@ -1,56 +0,0 @@ -package logstash - -import ( - "encoding/json" - "fmt" - - "github.com/Sirupsen/logrus" -) - -// Formatter generates json in logstash format. -// Logstash site: http://logstash.net/ -type LogstashFormatter struct { - Type string // if not empty use for logstash type field. - - // TimestampFormat sets the format used for timestamps. - TimestampFormat string -} - -func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) { - entry.Data["@version"] = 1 - - if f.TimestampFormat == "" { - f.TimestampFormat = logrus.DefaultTimestampFormat - } - - entry.Data["@timestamp"] = entry.Time.Format(f.TimestampFormat) - - // set message field - v, ok := entry.Data["message"] - if ok { - entry.Data["fields.message"] = v - } - entry.Data["message"] = entry.Message - - // set level field - v, ok = entry.Data["level"] - if ok { - entry.Data["fields.level"] = v - } - entry.Data["level"] = entry.Level.String() - - // set type field - if f.Type != "" { - v, ok = entry.Data["type"] - if ok { - entry.Data["fields.type"] = v - } - entry.Data["type"] = f.Type - } - - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/hooks.go deleted file mode 100644 index 0da2b365..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/hooks.go +++ /dev/null @@ -1,34 +0,0 @@ -package logrus - -// A hook to be fired when logging on the logging levels returned from -// `Levels()` on your implementation of the interface. Note that this is not -// fired in a goroutine or a channel with workers, you should handle such -// functionality yourself if your call is non-blocking and you don't wish for -// the logging calls for levels returned from `Levels()` to block. -type Hook interface { - Levels() []Level - Fire(*Entry) error -} - -// Internal type for storing the hooks on a logger instance. -type levelHooks map[Level][]Hook - -// Add a hook to an instance of logger. This is called with -// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. -func (hooks levelHooks) Add(hook Hook) { - for _, level := range hook.Levels() { - hooks[level] = append(hooks[level], hook) - } -} - -// Fire all the hooks for the passed level. Used by `entry.log` to fire -// appropriate hooks for a log entry. -func (hooks levelHooks) Fire(level Level, entry *Entry) error { - for _, hook := range hooks[level] { - if err := hook.Fire(entry); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/json_formatter.go deleted file mode 100644 index dcc4f1d9..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/json_formatter.go +++ /dev/null @@ -1,40 +0,0 @@ -package logrus - -import ( - "encoding/json" - "fmt" -) - -type JSONFormatter struct { - // TimestampFormat sets the format used for marshaling timestamps. - TimestampFormat string -} - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+3) - for k, v := range entry.Data { - switch v := v.(type) { - case error: - // Otherwise errors are ignored by `encoding/json` - // https://github.com/Sirupsen/logrus/issues/137 - data[k] = v.Error() - default: - data[k] = v - } - } - prefixFieldClashes(data) - - if f.TimestampFormat == "" { - f.TimestampFormat = DefaultTimestampFormat - } - - data["time"] = entry.Time.Format(f.TimestampFormat) - data["msg"] = entry.Message - data["level"] = entry.Level.String() - - serialized, err := json.Marshal(data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/logger.go deleted file mode 100644 index da928a37..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/logger.go +++ /dev/null @@ -1,203 +0,0 @@ -package logrus - -import ( - "io" - "os" - "sync" -) - -type Logger struct { - // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a - // file, or leave it default which is `os.Stdout`. You can also set this to - // something more adventorous, such as logging to Kafka. - Out io.Writer - // Hooks for the logger instance. These allow firing events based on logging - // levels and log entries. For example, to send errors to an error tracking - // service, log to StatsD or dump the core on fatal errors. - Hooks levelHooks - // All log entries pass through the formatter before logged to Out. The - // included formatters are `TextFormatter` and `JSONFormatter` for which - // TextFormatter is the default. In development (when a TTY is attached) it - // logs with colors, but to a file it wouldn't. You can easily implement your - // own that implements the `Formatter` interface, see the `README` or included - // formatters for examples. - Formatter Formatter - // The logging level the logger should log at. This is typically (and defaults - // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be - // logged. `logrus.Debug` is useful in - Level Level - // Used to sync writing to the log. - mu sync.Mutex -} - -// Creates a new logger. Configuration should be set by changing `Formatter`, -// `Out` and `Hooks` directly on the default logger instance. You can also just -// instantiate your own: -// -// var log = &Logger{ -// Out: os.Stderr, -// Formatter: new(JSONFormatter), -// Hooks: make(levelHooks), -// Level: logrus.DebugLevel, -// } -// -// It's recommended to make this a global instance called `log`. -func New() *Logger { - return &Logger{ - Out: os.Stdout, - Formatter: new(TextFormatter), - Hooks: make(levelHooks), - Level: InfoLevel, - } -} - -// Adds a field to the log entry, note that you it doesn't log until you call -// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. -// Ff you want multiple fields, use `WithFields`. -func (logger *Logger) WithField(key string, value interface{}) *Entry { - return NewEntry(logger).WithField(key, value) -} - -// Adds a struct of fields to the log entry. All it does is call `WithField` for -// each `Field`. -func (logger *Logger) WithFields(fields Fields) *Entry { - return NewEntry(logger).WithFields(fields) -} - -func (logger *Logger) Debugf(format string, args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugf(format, args...) - } -} - -func (logger *Logger) Infof(format string, args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infof(format, args...) - } -} - -func (logger *Logger) Printf(format string, args ...interface{}) { - NewEntry(logger).Printf(format, args...) -} - -func (logger *Logger) Warnf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } -} - -func (logger *Logger) Warningf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } -} - -func (logger *Logger) Errorf(format string, args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorf(format, args...) - } -} - -func (logger *Logger) Fatalf(format string, args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalf(format, args...) - } -} - -func (logger *Logger) Panicf(format string, args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicf(format, args...) - } -} - -func (logger *Logger) Debug(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debug(args...) - } -} - -func (logger *Logger) Info(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Info(args...) - } -} - -func (logger *Logger) Print(args ...interface{}) { - NewEntry(logger).Info(args...) -} - -func (logger *Logger) Warn(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } -} - -func (logger *Logger) Warning(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } -} - -func (logger *Logger) Error(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Error(args...) - } -} - -func (logger *Logger) Fatal(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatal(args...) - } -} - -func (logger *Logger) Panic(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panic(args...) - } -} - -func (logger *Logger) Debugln(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugln(args...) - } -} - -func (logger *Logger) Infoln(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infoln(args...) - } -} - -func (logger *Logger) Println(args ...interface{}) { - NewEntry(logger).Println(args...) -} - -func (logger *Logger) Warnln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } -} - -func (logger *Logger) Warningln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } -} - -func (logger *Logger) Errorln(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorln(args...) - } -} - -func (logger *Logger) Fatalln(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalln(args...) - } -} - -func (logger *Logger) Panicln(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicln(args...) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/logrus.go deleted file mode 100644 index 43ee12e9..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/logrus.go +++ /dev/null @@ -1,94 +0,0 @@ -package logrus - -import ( - "fmt" - "log" -) - -// Fields type, used to pass to `WithFields`. -type Fields map[string]interface{} - -// Level type -type Level uint8 - -// Convert the Level to a string. E.g. PanicLevel becomes "panic". -func (level Level) String() string { - switch level { - case DebugLevel: - return "debug" - case InfoLevel: - return "info" - case WarnLevel: - return "warning" - case ErrorLevel: - return "error" - case FatalLevel: - return "fatal" - case PanicLevel: - return "panic" - } - - return "unknown" -} - -// ParseLevel takes a string level and returns the Logrus log level constant. -func ParseLevel(lvl string) (Level, error) { - switch lvl { - case "panic": - return PanicLevel, nil - case "fatal": - return FatalLevel, nil - case "error": - return ErrorLevel, nil - case "warn", "warning": - return WarnLevel, nil - case "info": - return InfoLevel, nil - case "debug": - return DebugLevel, nil - } - - var l Level - return l, fmt.Errorf("not a valid logrus Level: %q", lvl) -} - -// These are the different logging levels. You can set the logging level to log -// on your instance of logger, obtained with `logrus.New()`. -const ( - // PanicLevel level, highest level of severity. Logs and then calls panic with the - // message passed to Debug, Info, ... - PanicLevel Level = iota - // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the - // logging level is set to Panic. - FatalLevel - // ErrorLevel level. Logs. Used for errors that should definitely be noted. - // Commonly used for hooks to send errors to an error tracking service. - ErrorLevel - // WarnLevel level. Non-critical entries that deserve eyes. - WarnLevel - // InfoLevel level. General operational entries about what's going on inside the - // application. - InfoLevel - // DebugLevel level. Usually only enabled when debugging. Very verbose logging. - DebugLevel -) - -// Won't compile if StdLogger can't be realized by a log.Logger -var _ StdLogger = &log.Logger{} - -// StdLogger is what your logrus-enabled library should take, that way -// it'll accept a stdlib logger and a logrus logger. There's no standard -// interface, this is the closest we get, unfortunately. -type StdLogger interface { - Print(...interface{}) - Printf(string, ...interface{}) - Println(...interface{}) - - Fatal(...interface{}) - Fatalf(string, ...interface{}) - Fatalln(...interface{}) - - Panic(...interface{}) - Panicf(string, ...interface{}) - Panicln(...interface{}) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_darwin.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_darwin.go deleted file mode 100644 index 8fe02a4a..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_darwin.go +++ /dev/null @@ -1,12 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go deleted file mode 100644 index 0428ee5d..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin. -*/ -package logrus - -import ( - "syscall" -) - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]uint8 - Ispeed uint32 - Ospeed uint32 -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_linux.go deleted file mode 100644 index a2c0b40d..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_linux.go +++ /dev/null @@ -1,12 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TCGETS - -type Termios syscall.Termios diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go deleted file mode 100644 index b8bebc13..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go +++ /dev/null @@ -1,21 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux darwin freebsd openbsd - -package logrus - -import ( - "syscall" - "unsafe" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stdout - var termios Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go deleted file mode 100644 index af609a53..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go +++ /dev/null @@ -1,7 +0,0 @@ -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_windows.go deleted file mode 100644 index 2e09f6f7..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/terminal_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package logrus - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stdout - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/text_formatter.go deleted file mode 100644 index 612417ff..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/text_formatter.go +++ /dev/null @@ -1,149 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "sort" - "strings" - "time" -) - -const ( - nocolor = 0 - red = 31 - green = 32 - yellow = 33 - blue = 34 - gray = 37 -) - -var ( - baseTimestamp time.Time - isTerminal bool -) - -func init() { - baseTimestamp = time.Now() - isTerminal = IsTerminal() -} - -func miniTS() int { - return int(time.Since(baseTimestamp) / time.Second) -} - -type TextFormatter struct { - // Set to true to bypass checking for a TTY before outputting colors. - ForceColors bool - - // Force disabling colors. - DisableColors bool - - // Disable timestamp logging. useful when output is redirected to logging - // system that already adds timestamps. - DisableTimestamp bool - - // Enable logging the full timestamp when a TTY is attached instead of just - // the time passed since beginning of execution. - FullTimestamp bool - - // TimestampFormat to use for display when a full timestamp is printed - TimestampFormat string - - // The fields are sorted by default for a consistent output. For applications - // that log extremely frequently and don't use the JSON formatter this may not - // be desired. - DisableSorting bool -} - -func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - var keys []string = make([]string, 0, len(entry.Data)) - for k := range entry.Data { - keys = append(keys, k) - } - - if !f.DisableSorting { - sort.Strings(keys) - } - - b := &bytes.Buffer{} - - prefixFieldClashes(entry.Data) - - isColored := (f.ForceColors || isTerminal) && !f.DisableColors - - if f.TimestampFormat == "" { - f.TimestampFormat = DefaultTimestampFormat - } - if isColored { - f.printColored(b, entry, keys) - } else { - if !f.DisableTimestamp { - f.appendKeyValue(b, "time", entry.Time.Format(f.TimestampFormat)) - } - f.appendKeyValue(b, "level", entry.Level.String()) - f.appendKeyValue(b, "msg", entry.Message) - for _, key := range keys { - f.appendKeyValue(b, key, entry.Data[key]) - } - } - - b.WriteByte('\n') - return b.Bytes(), nil -} - -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string) { - var levelColor int - switch entry.Level { - case DebugLevel: - levelColor = gray - case WarnLevel: - levelColor = yellow - case ErrorLevel, FatalLevel, PanicLevel: - levelColor = red - default: - levelColor = blue - } - - levelText := strings.ToUpper(entry.Level.String())[0:4] - - if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) - } else { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(f.TimestampFormat), entry.Message) - } - for _, k := range keys { - v := entry.Data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v) - } -} - -func needsQuoting(text string) bool { - for _, ch := range text { - if !((ch >= 'a' && ch <= 'z') || - (ch >= 'A' && ch <= 'Z') || - (ch >= '0' && ch <= '9') || - ch == '-' || ch == '.') { - return false - } - } - return true -} - -func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) { - switch value.(type) { - case string: - if needsQuoting(value.(string)) { - fmt.Fprintf(b, "%v=%s ", key, value) - } else { - fmt.Fprintf(b, "%v=%q ", key, value) - } - case error: - if needsQuoting(value.(error).Error()) { - fmt.Fprintf(b, "%v=%s ", key, value) - } else { - fmt.Fprintf(b, "%v=%q ", key, value) - } - default: - fmt.Fprintf(b, "%v=%v ", key, value) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/writer.go deleted file mode 100644 index 1e30b1c7..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/Sirupsen/logrus/writer.go +++ /dev/null @@ -1,31 +0,0 @@ -package logrus - -import ( - "bufio" - "io" - "runtime" -) - -func (logger *Logger) Writer() *io.PipeWriter { - reader, writer := io.Pipe() - - go logger.writerScanner(reader) - runtime.SetFinalizer(writer, writerFinalizer) - - return writer -} - -func (logger *Logger) writerScanner(reader *io.PipeReader) { - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - logger.Print(scanner.Text()) - } - if err := scanner.Err(); err != nil { - logger.Errorf("Error while reading from Writer: %s", err) - } - reader.Close() -} - -func writerFinalizer(writer *io.PipeWriter) { - writer.Close() -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/LICENSE.txt deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/NOTICE.txt deleted file mode 100644 index 5f14d116..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/NOTICE.txt +++ /dev/null @@ -1,3 +0,0 @@ -AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. -Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go deleted file mode 100644 index e50771f8..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go +++ /dev/null @@ -1,145 +0,0 @@ -// Package awserr represents API error interface accessors for the SDK. -package awserr - -// An Error wraps lower level errors with code, message and an original error. -// The underlying concrete error type may also satisfy other interfaces which -// can be to used to obtain more specific information about the error. -// -// Calling Error() or String() will always include the full information about -// an error based on its underlying type. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Get error details -// log.Println("Error:", awsErr.Code(), awsErr.Message()) -// -// // Prints out full error message, including original error if there was one. -// log.Println("Error:", awsErr.Error()) -// -// // Get original error -// if origErr := awsErr.OrigErr(); origErr != nil { -// // operate on original error. -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type Error interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErr() error -} - -// BatchError is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occured in the batch. -// -// Deprecated: Replaced with BatchedErrors. Only defined for backwards -// compatibility. -type BatchError interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// BatchedErrors is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occured in the batch. -// -// Replaces BatchError -type BatchedErrors interface { - // Satisfy the base Error interface. - Error - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// New returns an Error object described by the code, message, and origErr. -// -// If origErr satisfies the Error interface it will not be wrapped within a new -// Error object and will instead be returned. -func New(code, message string, origErr error) Error { - var errs []error - if origErr != nil { - errs = append(errs, origErr) - } - return newBaseError(code, message, errs) -} - -// NewBatchError returns an BatchedErrors with a collection of errors as an -// array of errors. -func NewBatchError(code, message string, errs []error) BatchedErrors { - return newBaseError(code, message, errs) -} - -// A RequestFailure is an interface to extract request failure information from -// an Error such as the request ID of the failed request returned by a service. -// RequestFailures may not always have a requestID value if the request failed -// prior to reaching the service such as a connection error. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if reqerr, ok := err.(RequestFailure); ok { -// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) -// } else { -// log.Println("Error:", err.Error()) -// } -// } -// -// Combined with awserr.Error: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Generic AWS Error with Code, Message, and original error (if any) -// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) -// -// if reqErr, ok := err.(awserr.RequestFailure); ok { -// // A service error occurred -// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type RequestFailure interface { - Error - - // The status code of the HTTP response. - StatusCode() int - - // The request ID returned by the service for a request failure. This will - // be empty if no request ID is available such as the request failed due - // to a connection error. - RequestID() string -} - -// NewRequestFailure returns a new request error wrapper for the given Error -// provided. -func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { - return newRequestError(err, statusCode, reqID) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go deleted file mode 100644 index e2d333b8..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go +++ /dev/null @@ -1,194 +0,0 @@ -package awserr - -import "fmt" - -// SprintError returns a string of the formatted error code. -// -// Both extra and origErr are optional. If they are included their lines -// will be added, but if they are not included their lines will be ignored. -func SprintError(code, message, extra string, origErr error) string { - msg := fmt.Sprintf("%s: %s", code, message) - if extra != "" { - msg = fmt.Sprintf("%s\n\t%s", msg, extra) - } - if origErr != nil { - msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) - } - return msg -} - -// A baseError wraps the code and message which defines an error. It also -// can be used to wrap an original error object. -// -// Should be used as the root for errors satisfying the awserr.Error. Also -// for any error which does not fit into a specific error wrapper type. -type baseError struct { - // Classification of error - code string - - // Detailed information about error - message string - - // Optional original error this error is based off of. Allows building - // chained errors. - errs []error -} - -// newBaseError returns an error object for the code, message, and errors. -// -// code is a short no whitespace phrase depicting the classification of -// the error that is being created. -// -// message is the free flow string containing detailed information about the -// error. -// -// origErrs is the error objects which will be nested under the new errors to -// be returned. -func newBaseError(code, message string, origErrs []error) *baseError { - b := &baseError{ - code: code, - message: message, - errs: origErrs, - } - - return b -} - -// Error returns the string representation of the error. -// -// See ErrorWithExtra for formatting. -// -// Satisfies the error interface. -func (b baseError) Error() string { - size := len(b.errs) - if size > 0 { - return SprintError(b.code, b.message, "", errorList(b.errs)) - } - - return SprintError(b.code, b.message, "", nil) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (b baseError) String() string { - return b.Error() -} - -// Code returns the short phrase depicting the classification of the error. -func (b baseError) Code() string { - return b.code -} - -// Message returns the error details message. -func (b baseError) Message() string { - return b.message -} - -// OrigErr returns the original error if one was set. Nil is returned if no -// error was set. This only returns the first element in the list. If the full -// list is needed, use BatchedErrors. -func (b baseError) OrigErr() error { - switch len(b.errs) { - case 0: - return nil - case 1: - return b.errs[0] - default: - if err, ok := b.errs[0].(Error); ok { - return NewBatchError(err.Code(), err.Message(), b.errs[1:]) - } - return NewBatchError("BatchedErrors", - "multiple errors occured", b.errs) - } -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (b baseError) OrigErrs() []error { - return b.errs -} - -// So that the Error interface type can be included as an anonymous field -// in the requestError struct and not conflict with the error.Error() method. -type awsError Error - -// A requestError wraps a request or service error. -// -// Composed of baseError for code, message, and original error. -type requestError struct { - awsError - statusCode int - requestID string -} - -// newRequestError returns a wrapped error with additional information for -// request status code, and service requestID. -// -// Should be used to wrap all request which involve service requests. Even if -// the request failed without a service response, but had an HTTP status code -// that may be meaningful. -// -// Also wraps original errors via the baseError. -func newRequestError(err Error, statusCode int, requestID string) *requestError { - return &requestError{ - awsError: err, - statusCode: statusCode, - requestID: requestID, - } -} - -// Error returns the string representation of the error. -// Satisfies the error interface. -func (r requestError) Error() string { - extra := fmt.Sprintf("status code: %d, request id: %s", - r.statusCode, r.requestID) - return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (r requestError) String() string { - return r.Error() -} - -// StatusCode returns the wrapped status code for the error -func (r requestError) StatusCode() int { - return r.statusCode -} - -// RequestID returns the wrapped requestID -func (r requestError) RequestID() string { - return r.requestID -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (r requestError) OrigErrs() []error { - if b, ok := r.awsError.(BatchedErrors); ok { - return b.OrigErrs() - } - return []error{r.OrigErr()} -} - -// An error list that satisfies the golang interface -type errorList []error - -// Error returns the string representation of the error. -// -// Satisfies the error interface. -func (e errorList) Error() string { - msg := "" - // How do we want to handle the array size being zero - if size := len(e); size > 0 { - for i := 0; i < size; i++ { - msg += fmt.Sprintf("%s", e[i].Error()) - // We check the next index to see if it is within the slice. - // If it is, then we append a newline. We do this, because unit tests - // could be broken with the additional '\n' - if i+1 < size { - msg += "\n" - } - } - } - return msg -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go deleted file mode 100644 index 8429470b..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go +++ /dev/null @@ -1,100 +0,0 @@ -package awsutil - -import ( - "io" - "reflect" -) - -// Copy deeply copies a src structure to dst. Useful for copying request and -// response structures. -// -// Can copy between structs of different type, but will only copy fields which -// are assignable, and exist in both structs. Fields which are not assignable, -// or do not exist in both structs are ignored. -func Copy(dst, src interface{}) { - dstval := reflect.ValueOf(dst) - if !dstval.IsValid() { - panic("Copy dst cannot be nil") - } - - rcopy(dstval, reflect.ValueOf(src), true) -} - -// CopyOf returns a copy of src while also allocating the memory for dst. -// src must be a pointer type or this operation will fail. -func CopyOf(src interface{}) (dst interface{}) { - dsti := reflect.New(reflect.TypeOf(src).Elem()) - dst = dsti.Interface() - rcopy(dsti, reflect.ValueOf(src), true) - return -} - -// rcopy performs a recursive copy of values from the source to destination. -// -// root is used to skip certain aspects of the copy which are not valid -// for the root node of a object. -func rcopy(dst, src reflect.Value, root bool) { - if !src.IsValid() { - return - } - - switch src.Kind() { - case reflect.Ptr: - if _, ok := src.Interface().(io.Reader); ok { - if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { - dst.Elem().Set(src) - } else if dst.CanSet() { - dst.Set(src) - } - } else { - e := src.Type().Elem() - if dst.CanSet() && !src.IsNil() { - dst.Set(reflect.New(e)) - } - if src.Elem().IsValid() { - // Keep the current root state since the depth hasn't changed - rcopy(dst.Elem(), src.Elem(), root) - } - } - case reflect.Struct: - t := dst.Type() - for i := 0; i < t.NumField(); i++ { - name := t.Field(i).Name - srcVal := src.FieldByName(name) - dstVal := dst.FieldByName(name) - if srcVal.IsValid() && dstVal.CanSet() { - rcopy(dstVal, srcVal, false) - } - } - case reflect.Slice: - if src.IsNil() { - break - } - - s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) - dst.Set(s) - for i := 0; i < src.Len(); i++ { - rcopy(dst.Index(i), src.Index(i), false) - } - case reflect.Map: - if src.IsNil() { - break - } - - s := reflect.MakeMap(src.Type()) - dst.Set(s) - for _, k := range src.MapKeys() { - v := src.MapIndex(k) - v2 := reflect.New(v.Type()).Elem() - rcopy(v2, v, false) - dst.SetMapIndex(k, v2) - } - default: - // Assign the value if possible. If its not assignable, the value would - // need to be converted and the impact of that may be unexpected, or is - // not compatible with the dst type. - if src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go deleted file mode 100644 index 59fa4a55..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go +++ /dev/null @@ -1,27 +0,0 @@ -package awsutil - -import ( - "reflect" -) - -// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. -// In addition to this, this method will also dereference the input values if -// possible so the DeepEqual performed will not fail if one parameter is a -// pointer and the other is not. -// -// DeepEqual will not perform indirection of nested values of the input parameters. -func DeepEqual(a, b interface{}) bool { - ra := reflect.Indirect(reflect.ValueOf(a)) - rb := reflect.Indirect(reflect.ValueOf(b)) - - if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { - // If the elements are both nil, and of the same type the are equal - // If they are of different types they are not equal - return reflect.TypeOf(a) == reflect.TypeOf(b) - } else if raValid != rbValid { - // Both values must be valid to be equal - return false - } - - return reflect.DeepEqual(ra.Interface(), rb.Interface()) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go deleted file mode 100644 index 4d2a01e8..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go +++ /dev/null @@ -1,222 +0,0 @@ -package awsutil - -import ( - "reflect" - "regexp" - "strconv" - "strings" - - "github.com/jmespath/go-jmespath" -) - -var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) - -// rValuesAtPath returns a slice of values found in value v. The values -// in v are explored recursively so all nested values are collected. -func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { - pathparts := strings.Split(path, "||") - if len(pathparts) > 1 { - for _, pathpart := range pathparts { - vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) - if len(vals) > 0 { - return vals - } - } - return nil - } - - values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} - components := strings.Split(path, ".") - for len(values) > 0 && len(components) > 0 { - var index *int64 - var indexStar bool - c := strings.TrimSpace(components[0]) - if c == "" { // no actual component, illegal syntax - return nil - } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { - // TODO normalize case for user - return nil // don't support unexported fields - } - - // parse this component - if m := indexRe.FindStringSubmatch(c); m != nil { - c = m[1] - if m[2] == "" { - index = nil - indexStar = true - } else { - i, _ := strconv.ParseInt(m[2], 10, 32) - index = &i - indexStar = false - } - } - - nextvals := []reflect.Value{} - for _, value := range values { - // pull component name out of struct member - if value.Kind() != reflect.Struct { - continue - } - - if c == "*" { // pull all members - for i := 0; i < value.NumField(); i++ { - if f := reflect.Indirect(value.Field(i)); f.IsValid() { - nextvals = append(nextvals, f) - } - } - continue - } - - value = value.FieldByNameFunc(func(name string) bool { - if c == name { - return true - } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { - return true - } - return false - }) - - if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { - if !value.IsNil() { - value.Set(reflect.Zero(value.Type())) - } - return []reflect.Value{value} - } - - if createPath && value.Kind() == reflect.Ptr && value.IsNil() { - // TODO if the value is the terminus it should not be created - // if the value to be set to its position is nil. - value.Set(reflect.New(value.Type().Elem())) - value = value.Elem() - } else { - value = reflect.Indirect(value) - } - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - - if indexStar || index != nil { - nextvals = []reflect.Value{} - for _, value := range values { - value := reflect.Indirect(value) - if value.Kind() != reflect.Slice { - continue - } - - if indexStar { // grab all indices - for i := 0; i < value.Len(); i++ { - idx := reflect.Indirect(value.Index(i)) - if idx.IsValid() { - nextvals = append(nextvals, idx) - } - } - continue - } - - // pull out index - i := int(*index) - if i >= value.Len() { // check out of bounds - if createPath { - // TODO resize slice - } else { - continue - } - } else if i < 0 { // support negative indexing - i = value.Len() + i - } - value = reflect.Indirect(value.Index(i)) - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - } - - components = components[1:] - } - return values -} - -// ValuesAtPath returns a list of values at the case insensitive lexical -// path inside of a structure. -func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { - result, err := jmespath.Search(path, i) - if err != nil { - return nil, err - } - - v := reflect.ValueOf(result) - if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { - return nil, nil - } - if s, ok := result.([]interface{}); ok { - return s, err - } - if v.Kind() == reflect.Map && v.Len() == 0 { - return nil, nil - } - if v.Kind() == reflect.Slice { - out := make([]interface{}, v.Len()) - for i := 0; i < v.Len(); i++ { - out[i] = v.Index(i).Interface() - } - return out, nil - } - - return []interface{}{result}, nil -} - -// SetValueAtPath sets a value at the case insensitive lexical path inside -// of a structure. -func SetValueAtPath(i interface{}, path string, v interface{}) { - if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { - for _, rval := range rvals { - if rval.Kind() == reflect.Ptr && rval.IsNil() { - continue - } - setValue(rval, v) - } - } -} - -func setValue(dstVal reflect.Value, src interface{}) { - if dstVal.Kind() == reflect.Ptr { - dstVal = reflect.Indirect(dstVal) - } - srcVal := reflect.ValueOf(src) - - if !srcVal.IsValid() { // src is literal nil - if dstVal.CanAddr() { - // Convert to pointer so that pointer's value can be nil'ed - // dstVal = dstVal.Addr() - } - dstVal.Set(reflect.Zero(dstVal.Type())) - - } else if srcVal.Kind() == reflect.Ptr { - if srcVal.IsNil() { - srcVal = reflect.Zero(dstVal.Type()) - } else { - srcVal = reflect.ValueOf(src).Elem() - } - dstVal.Set(srcVal) - } else { - dstVal.Set(srcVal) - } - -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go deleted file mode 100644 index fc38172f..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go +++ /dev/null @@ -1,107 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strings" -) - -// Prettify returns the string representation of a value. -func Prettify(i interface{}) string { - var buf bytes.Buffer - prettify(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -// prettify will recursively walk value v to build a textual -// representation of the value. -func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - strtype := v.Type().String() - if strtype == "time.Time" { - fmt.Fprintf(buf, "%s", v.Interface()) - break - } else if strings.HasPrefix(strtype, "io.") { - buf.WriteString("") - break - } - - buf.WriteString("{\n") - - names := []string{} - for i := 0; i < v.Type().NumField(); i++ { - name := v.Type().Field(i).Name - f := v.Field(i) - if name[0:1] == strings.ToLower(name[0:1]) { - continue // ignore unexported fields - } - if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { - continue // ignore unset fields - } - names = append(names, name) - } - - for i, n := range names { - val := v.FieldByName(n) - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(n + ": ") - prettify(val, indent+2, buf) - - if i < len(names)-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - prettify(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - prettify(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - if !v.IsValid() { - fmt.Fprint(buf, "") - return - } - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - case io.ReadSeeker, io.Reader: - format = "buffer(%p)" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go deleted file mode 100644 index b6432f1a..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go +++ /dev/null @@ -1,89 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "reflect" - "strings" -) - -// StringValue returns the string representation of a value. -func StringValue(i interface{}) string { - var buf bytes.Buffer - stringValue(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - buf.WriteString("{\n") - - names := []string{} - for i := 0; i < v.Type().NumField(); i++ { - name := v.Type().Field(i).Name - f := v.Field(i) - if name[0:1] == strings.ToLower(name[0:1]) { - continue // ignore unexported fields - } - if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { - continue // ignore unset fields - } - names = append(names, name) - } - - for i, n := range names { - val := v.FieldByName(n) - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(n + ": ") - stringValue(val, indent+2, buf) - - if i < len(names)-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - stringValue(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - stringValue(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/client.go deleted file mode 100644 index c8d0564d..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ /dev/null @@ -1,120 +0,0 @@ -package client - -import ( - "fmt" - "io/ioutil" - "net/http/httputil" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" -) - -// A Config provides configuration to a service client instance. -type Config struct { - Config *aws.Config - Handlers request.Handlers - Endpoint, SigningRegion string -} - -// ConfigProvider provides a generic way for a service client to receive -// the ClientConfig without circular dependencies. -type ConfigProvider interface { - ClientConfig(serviceName string, cfgs ...*aws.Config) Config -} - -// A Client implements the base client request and response handling -// used by all service clients. -type Client struct { - request.Retryer - metadata.ClientInfo - - Config aws.Config - Handlers request.Handlers -} - -// New will return a pointer to a new initialized service client. -func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { - svc := &Client{ - Config: cfg, - ClientInfo: info, - Handlers: handlers, - } - - switch retryer, ok := cfg.Retryer.(request.Retryer); { - case ok: - svc.Retryer = retryer - case cfg.Retryer != nil && cfg.Logger != nil: - s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) - cfg.Logger.Log(s) - fallthrough - default: - maxRetries := aws.IntValue(cfg.MaxRetries) - if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { - maxRetries = 3 - } - svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} - } - - svc.AddDebugHandlers() - - for _, option := range options { - option(svc) - } - - return svc -} - -// NewRequest returns a new Request pointer for the service API -// operation and parameters. -func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { - return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) -} - -// AddDebugHandlers injects debug logging handlers into the service to log request -// debug information. -func (c *Client) AddDebugHandlers() { - if !c.Config.LogLevel.AtLeast(aws.LogDebug) { - return - } - - c.Handlers.Send.PushFront(logRequest) - c.Handlers.Send.PushBack(logResponse) -} - -const logReqMsg = `DEBUG: Request %s/%s Details: ----[ REQUEST POST-SIGN ]----------------------------- -%s ------------------------------------------------------` - -func logRequest(r *request.Request) { - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody) - - if logBody { - // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's - // Body as a NoOpCloser and will not be reset after read by the HTTP - // client reader. - r.Body.Seek(r.BodyStart, 0) - r.HTTPRequest.Body = ioutil.NopCloser(r.Body) - } - - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) -} - -const logRespMsg = `DEBUG: Response %s/%s Details: ----[ RESPONSE ]-------------------------------------- -%s ------------------------------------------------------` - -func logResponse(r *request.Request) { - var msg = "no response data" - if r.HTTPResponse != nil { - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody) - msg = string(dumpedBody) - } else if r.Error != nil { - msg = r.Error.Error() - } - r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg)) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go deleted file mode 100644 index 43a3676b..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ /dev/null @@ -1,90 +0,0 @@ -package client - -import ( - "math/rand" - "sync" - "time" - - "github.com/aws/aws-sdk-go/aws/request" -) - -// DefaultRetryer implements basic retry logic using exponential backoff for -// most services. If you want to implement custom retry logic, implement the -// request.Retryer interface or create a structure type that composes this -// struct and override the specific methods. For example, to override only -// the MaxRetries method: -// -// type retryer struct { -// service.DefaultRetryer -// } -// -// // This implementation always has 100 max retries -// func (d retryer) MaxRetries() uint { return 100 } -type DefaultRetryer struct { - NumMaxRetries int -} - -// MaxRetries returns the number of maximum returns the service will use to make -// an individual API request. -func (d DefaultRetryer) MaxRetries() int { - return d.NumMaxRetries -} - -var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) - -// RetryRules returns the delay duration before retrying this request again -func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { - // Set the upper limit of delay in retrying at ~five minutes - minTime := 30 - throttle := d.shouldThrottle(r) - if throttle { - minTime = 500 - } - - retryCount := r.RetryCount - if retryCount > 13 { - retryCount = 13 - } else if throttle && retryCount > 8 { - retryCount = 8 - } - - delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime) - return time.Duration(delay) * time.Millisecond -} - -// ShouldRetry returns true if the request should be retried. -func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { - if r.HTTPResponse.StatusCode >= 500 { - return true - } - return r.IsErrorRetryable() || d.shouldThrottle(r) -} - -// ShouldThrottle returns true if the request should be throttled. -func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { - if r.HTTPResponse.StatusCode == 502 || - r.HTTPResponse.StatusCode == 503 || - r.HTTPResponse.StatusCode == 504 { - return true - } - return r.IsErrorThrottle() -} - -// lockedSource is a thread-safe implementation of rand.Source -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go deleted file mode 100644 index 4778056d..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ /dev/null @@ -1,12 +0,0 @@ -package metadata - -// ClientInfo wraps immutable data from the client.Client structure. -type ClientInfo struct { - ServiceName string - APIVersion string - Endpoint string - SigningName string - SigningRegion string - JSONVersion string - TargetPrefix string -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/config.go deleted file mode 100644 index da72935b..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ /dev/null @@ -1,363 +0,0 @@ -package aws - -import ( - "net/http" - "time" - - "github.com/aws/aws-sdk-go/aws/credentials" -) - -// UseServiceDefaultRetries instructs the config to use the service's own default -// number of retries. This will be the default action if Config.MaxRetries -// is nil also. -const UseServiceDefaultRetries = -1 - -// RequestRetryer is an alias for a type that implements the request.Retryer interface. -type RequestRetryer interface{} - -// A Config provides service configuration for service clients. By default, -// all clients will use the {defaults.DefaultConfig} structure. -type Config struct { - // Enables verbose error printing of all credential chain errors. - // Should be used when wanting to see all errors while attempting to retreive - // credentials. - CredentialsChainVerboseErrors *bool - - // The credentials object to use when signing requests. Defaults to - // a chain of credential providers to search for credentials in environment - // variables, shared credential file, and EC2 Instance Roles. - Credentials *credentials.Credentials - - // An optional endpoint URL (hostname only or fully qualified URI) - // that overrides the default generated endpoint for a client. Set this - // to `""` to use the default generated endpoint. - // - // @note You must still provide a `Region` value when specifying an - // endpoint for a client. - Endpoint *string - - // The region to send requests to. This parameter is required and must - // be configured globally or on a per-client basis unless otherwise - // noted. A full list of regions is found in the "Regions and Endpoints" - // document. - // - // @see http://docs.aws.amazon.com/general/latest/gr/rande.html - // AWS Regions and Endpoints - Region *string - - // Set this to `true` to disable SSL when sending requests. Defaults - // to `false`. - DisableSSL *bool - - // The HTTP client to use when sending requests. Defaults to - // `http.DefaultClient`. - HTTPClient *http.Client - - // An integer value representing the logging level. The default log level - // is zero (LogOff), which represents no logging. To enable logging set - // to a LogLevel Value. - LogLevel *LogLevelType - - // The logger writer interface to write logging messages to. Defaults to - // standard out. - Logger Logger - - // The maximum number of times that a request will be retried for failures. - // Defaults to -1, which defers the max retry setting to the service specific - // configuration. - MaxRetries *int - - // Retryer guides how HTTP requests should be retried in case of recoverable failures. - // - // When nil or the value does not implement the request.Retryer interface, - // the request.DefaultRetryer will be used. - // - // When both Retryer and MaxRetries are non-nil, the former is used and - // the latter ignored. - // - // To set the Retryer field in a type-safe manner and with chaining, use - // the request.WithRetryer helper function: - // - // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) - // - Retryer RequestRetryer - - // Disables semantic parameter validation, which validates input for missing - // required fields and/or other semantic request input errors. - DisableParamValidation *bool - - // Disables the computation of request and response checksums, e.g., - // CRC32 checksums in Amazon DynamoDB. - DisableComputeChecksums *bool - - // Set this to `true` to force the request to use path-style addressing, - // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will - // use virtual hosted bucket addressing when possible - // (`http://BUCKET.s3.amazonaws.com/KEY`). - // - // @note This configuration option is specific to the Amazon S3 service. - // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html - // Amazon S3: Virtual Hosting of Buckets - S3ForcePathStyle *bool - - // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` - // header to PUT requests over 2MB of content. 100-Continue instructs the - // HTTP client not to send the body until the service responds with a - // `continue` status. This is useful to prevent sending the request body - // until after the request is authenticated, and validated. - // - // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html - // - // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s - // `ExpectContinueTimeout` for information on adjusting the continue wait timeout. - // https://golang.org/pkg/net/http/#Transport - // - // You should use this flag to disble 100-Continue if you experiance issues - // with proxies or thrid party S3 compatible services. - S3Disable100Continue *bool - - // Set this to `true` to enable S3 Accelerate feature. For all operations compatible - // with S3 Accelerate will use the accelerate endpoint for requests. Requests not compatible - // will fall back to normal S3 requests. - // - // The bucket must be enable for accelerate to be used with S3 client with accelerate - // enabled. If the bucket is not enabled for accelerate an error will be returned. - // The bucket name must be DNS compatible to also work with accelerate. - S3UseAccelerate *bool - - // Set this to `true` to disable the EC2Metadata client from overriding the - // default http.Client's Timeout. This is helpful if you do not want the EC2Metadata - // client to create a new http.Client. This options is only meaningful if you're not - // already using a custom HTTP client with the SDK. Enabled by default. - // - // Must be set and provided to the session.New() in order to disable the EC2Metadata - // overriding the timeout for default credentials chain. - // - // Example: - // sess := session.New(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true)) - // svc := s3.New(sess) - // - EC2MetadataDisableTimeoutOverride *bool - - // SleepDelay is an override for the func the SDK will call when sleeping - // during the lifecycle of a request. Specifically this will be used for - // request delays. This value should only be used for testing. To adjust - // the delay of a request see the aws/client.DefaultRetryer and - // aws/request.Retryer. - SleepDelay func(time.Duration) -} - -// NewConfig returns a new Config pointer that can be chained with builder methods to -// set multiple configuration values inline without using pointers. -// -// sess := session.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10)) -// -func NewConfig() *Config { - return &Config{} -} - -// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning -// a Config pointer. -func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { - c.CredentialsChainVerboseErrors = &verboseErrs - return c -} - -// WithCredentials sets a config Credentials value returning a Config pointer -// for chaining. -func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { - c.Credentials = creds - return c -} - -// WithEndpoint sets a config Endpoint value returning a Config pointer for -// chaining. -func (c *Config) WithEndpoint(endpoint string) *Config { - c.Endpoint = &endpoint - return c -} - -// WithRegion sets a config Region value returning a Config pointer for -// chaining. -func (c *Config) WithRegion(region string) *Config { - c.Region = ®ion - return c -} - -// WithDisableSSL sets a config DisableSSL value returning a Config pointer -// for chaining. -func (c *Config) WithDisableSSL(disable bool) *Config { - c.DisableSSL = &disable - return c -} - -// WithHTTPClient sets a config HTTPClient value returning a Config pointer -// for chaining. -func (c *Config) WithHTTPClient(client *http.Client) *Config { - c.HTTPClient = client - return c -} - -// WithMaxRetries sets a config MaxRetries value returning a Config pointer -// for chaining. -func (c *Config) WithMaxRetries(max int) *Config { - c.MaxRetries = &max - return c -} - -// WithDisableParamValidation sets a config DisableParamValidation value -// returning a Config pointer for chaining. -func (c *Config) WithDisableParamValidation(disable bool) *Config { - c.DisableParamValidation = &disable - return c -} - -// WithDisableComputeChecksums sets a config DisableComputeChecksums value -// returning a Config pointer for chaining. -func (c *Config) WithDisableComputeChecksums(disable bool) *Config { - c.DisableComputeChecksums = &disable - return c -} - -// WithLogLevel sets a config LogLevel value returning a Config pointer for -// chaining. -func (c *Config) WithLogLevel(level LogLevelType) *Config { - c.LogLevel = &level - return c -} - -// WithLogger sets a config Logger value returning a Config pointer for -// chaining. -func (c *Config) WithLogger(logger Logger) *Config { - c.Logger = logger - return c -} - -// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config -// pointer for chaining. -func (c *Config) WithS3ForcePathStyle(force bool) *Config { - c.S3ForcePathStyle = &force - return c -} - -// WithS3Disable100Continue sets a config S3Disable100Continue value returning -// a Config pointer for chaining. -func (c *Config) WithS3Disable100Continue(disable bool) *Config { - c.S3Disable100Continue = &disable - return c -} - -// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config -// pointer for chaining. -func (c *Config) WithS3UseAccelerate(enable bool) *Config { - c.S3UseAccelerate = &enable - return c -} - -// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value -// returning a Config pointer for chaining. -func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { - c.EC2MetadataDisableTimeoutOverride = &enable - return c -} - -// WithSleepDelay overrides the function used to sleep while waiting for the -// next retry. Defaults to time.Sleep. -func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { - c.SleepDelay = fn - return c -} - -// MergeIn merges the passed in configs into the existing config object. -func (c *Config) MergeIn(cfgs ...*Config) { - for _, other := range cfgs { - mergeInConfig(c, other) - } -} - -func mergeInConfig(dst *Config, other *Config) { - if other == nil { - return - } - - if other.CredentialsChainVerboseErrors != nil { - dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors - } - - if other.Credentials != nil { - dst.Credentials = other.Credentials - } - - if other.Endpoint != nil { - dst.Endpoint = other.Endpoint - } - - if other.Region != nil { - dst.Region = other.Region - } - - if other.DisableSSL != nil { - dst.DisableSSL = other.DisableSSL - } - - if other.HTTPClient != nil { - dst.HTTPClient = other.HTTPClient - } - - if other.LogLevel != nil { - dst.LogLevel = other.LogLevel - } - - if other.Logger != nil { - dst.Logger = other.Logger - } - - if other.MaxRetries != nil { - dst.MaxRetries = other.MaxRetries - } - - if other.Retryer != nil { - dst.Retryer = other.Retryer - } - - if other.DisableParamValidation != nil { - dst.DisableParamValidation = other.DisableParamValidation - } - - if other.DisableComputeChecksums != nil { - dst.DisableComputeChecksums = other.DisableComputeChecksums - } - - if other.S3ForcePathStyle != nil { - dst.S3ForcePathStyle = other.S3ForcePathStyle - } - - if other.S3Disable100Continue != nil { - dst.S3Disable100Continue = other.S3Disable100Continue - } - - if other.S3UseAccelerate != nil { - dst.S3UseAccelerate = other.S3UseAccelerate - } - - if other.EC2MetadataDisableTimeoutOverride != nil { - dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride - } - - if other.SleepDelay != nil { - dst.SleepDelay = other.SleepDelay - } -} - -// Copy will return a shallow copy of the Config object. If any additional -// configurations are provided they will be merged into the new config returned. -func (c *Config) Copy(cfgs ...*Config) *Config { - dst := &Config{} - dst.MergeIn(c) - - for _, cfg := range cfgs { - dst.MergeIn(cfg) - } - - return dst -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go deleted file mode 100644 index 3b73a7da..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go +++ /dev/null @@ -1,369 +0,0 @@ -package aws - -import "time" - -// String returns a pointer to the string value passed in. -func String(v string) *string { - return &v -} - -// StringValue returns the value of the string pointer passed in or -// "" if the pointer is nil. -func StringValue(v *string) string { - if v != nil { - return *v - } - return "" -} - -// StringSlice converts a slice of string values into a slice of -// string pointers -func StringSlice(src []string) []*string { - dst := make([]*string, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// StringValueSlice converts a slice of string pointers into a slice of -// string values -func StringValueSlice(src []*string) []string { - dst := make([]string, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// StringMap converts a string map of string values into a string -// map of string pointers -func StringMap(src map[string]string) map[string]*string { - dst := make(map[string]*string) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// StringValueMap converts a string map of string pointers into a string -// map of string values -func StringValueMap(src map[string]*string) map[string]string { - dst := make(map[string]string) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Bool returns a pointer to the bool value passed in. -func Bool(v bool) *bool { - return &v -} - -// BoolValue returns the value of the bool pointer passed in or -// false if the pointer is nil. -func BoolValue(v *bool) bool { - if v != nil { - return *v - } - return false -} - -// BoolSlice converts a slice of bool values into a slice of -// bool pointers -func BoolSlice(src []bool) []*bool { - dst := make([]*bool, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// BoolValueSlice converts a slice of bool pointers into a slice of -// bool values -func BoolValueSlice(src []*bool) []bool { - dst := make([]bool, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// BoolMap converts a string map of bool values into a string -// map of bool pointers -func BoolMap(src map[string]bool) map[string]*bool { - dst := make(map[string]*bool) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// BoolValueMap converts a string map of bool pointers into a string -// map of bool values -func BoolValueMap(src map[string]*bool) map[string]bool { - dst := make(map[string]bool) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int returns a pointer to the int value passed in. -func Int(v int) *int { - return &v -} - -// IntValue returns the value of the int pointer passed in or -// 0 if the pointer is nil. -func IntValue(v *int) int { - if v != nil { - return *v - } - return 0 -} - -// IntSlice converts a slice of int values into a slice of -// int pointers -func IntSlice(src []int) []*int { - dst := make([]*int, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// IntValueSlice converts a slice of int pointers into a slice of -// int values -func IntValueSlice(src []*int) []int { - dst := make([]int, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// IntMap converts a string map of int values into a string -// map of int pointers -func IntMap(src map[string]int) map[string]*int { - dst := make(map[string]*int) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// IntValueMap converts a string map of int pointers into a string -// map of int values -func IntValueMap(src map[string]*int) map[string]int { - dst := make(map[string]int) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int64 returns a pointer to the int64 value passed in. -func Int64(v int64) *int64 { - return &v -} - -// Int64Value returns the value of the int64 pointer passed in or -// 0 if the pointer is nil. -func Int64Value(v *int64) int64 { - if v != nil { - return *v - } - return 0 -} - -// Int64Slice converts a slice of int64 values into a slice of -// int64 pointers -func Int64Slice(src []int64) []*int64 { - dst := make([]*int64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int64ValueSlice converts a slice of int64 pointers into a slice of -// int64 values -func Int64ValueSlice(src []*int64) []int64 { - dst := make([]int64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int64Map converts a string map of int64 values into a string -// map of int64 pointers -func Int64Map(src map[string]int64) map[string]*int64 { - dst := make(map[string]*int64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int64ValueMap converts a string map of int64 pointers into a string -// map of int64 values -func Int64ValueMap(src map[string]*int64) map[string]int64 { - dst := make(map[string]int64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Float64 returns a pointer to the float64 value passed in. -func Float64(v float64) *float64 { - return &v -} - -// Float64Value returns the value of the float64 pointer passed in or -// 0 if the pointer is nil. -func Float64Value(v *float64) float64 { - if v != nil { - return *v - } - return 0 -} - -// Float64Slice converts a slice of float64 values into a slice of -// float64 pointers -func Float64Slice(src []float64) []*float64 { - dst := make([]*float64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Float64ValueSlice converts a slice of float64 pointers into a slice of -// float64 values -func Float64ValueSlice(src []*float64) []float64 { - dst := make([]float64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Float64Map converts a string map of float64 values into a string -// map of float64 pointers -func Float64Map(src map[string]float64) map[string]*float64 { - dst := make(map[string]*float64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Float64ValueMap converts a string map of float64 pointers into a string -// map of float64 values -func Float64ValueMap(src map[string]*float64) map[string]float64 { - dst := make(map[string]float64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Time returns a pointer to the time.Time value passed in. -func Time(v time.Time) *time.Time { - return &v -} - -// TimeValue returns the value of the time.Time pointer passed in or -// time.Time{} if the pointer is nil. -func TimeValue(v *time.Time) time.Time { - if v != nil { - return *v - } - return time.Time{} -} - -// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". -// The result is undefined if the Unix time cannot be represented by an int64. -// Which includes calling TimeUnixMilli on a zero Time is undefined. -// -// This utility is useful for service API's such as CloudWatch Logs which require -// their unix time values to be in milliseconds. -// -// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. -func TimeUnixMilli(t time.Time) int64 { - return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) -} - -// TimeSlice converts a slice of time.Time values into a slice of -// time.Time pointers -func TimeSlice(src []time.Time) []*time.Time { - dst := make([]*time.Time, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// TimeValueSlice converts a slice of time.Time pointers into a slice of -// time.Time values -func TimeValueSlice(src []*time.Time) []time.Time { - dst := make([]time.Time, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// TimeMap converts a string map of time.Time values into a string -// map of time.Time pointers -func TimeMap(src map[string]time.Time) map[string]*time.Time { - dst := make(map[string]*time.Time) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// TimeValueMap converts a string map of time.Time pointers into a string -// map of time.Time values -func TimeValueMap(src map[string]*time.Time) map[string]time.Time { - dst := make(map[string]time.Time) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go deleted file mode 100644 index 8456e29b..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ /dev/null @@ -1,152 +0,0 @@ -package corehandlers - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "regexp" - "runtime" - "strconv" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// Interface for matching types which also have a Len method. -type lener interface { - Len() int -} - -// BuildContentLengthHandler builds the content length of a request based on the body, -// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable -// to determine request body length and no "Content-Length" was specified it will panic. -// -// The Content-Length will only be aded to the request if the length of the body -// is greater than 0. If the body is empty or the current `Content-Length` -// header is <= 0, the header will also be stripped. -var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { - var length int64 - - if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { - length, _ = strconv.ParseInt(slength, 10, 64) - } else { - switch body := r.Body.(type) { - case nil: - length = 0 - case lener: - length = int64(body.Len()) - case io.Seeker: - r.BodyStart, _ = body.Seek(0, 1) - end, _ := body.Seek(0, 2) - body.Seek(r.BodyStart, 0) // make sure to seek back to original location - length = end - r.BodyStart - default: - panic("Cannot get length of body, must provide `ContentLength`") - } - } - - if length > 0 { - r.HTTPRequest.ContentLength = length - r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) - } else { - r.HTTPRequest.ContentLength = 0 - r.HTTPRequest.Header.Del("Content-Length") - } -}} - -// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. -var SDKVersionUserAgentHandler = request.NamedHandler{ - Name: "core.SDKVersionUserAgentHandler", - Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, - runtime.Version(), runtime.GOOS, runtime.GOARCH), -} - -var reStatusCode = regexp.MustCompile(`^(\d{3})`) - -// SendHandler is a request handler to send service request using HTTP client. -var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) { - var err error - r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest) - if err != nil { - // Prevent leaking if an HTTPResponse was returned. Clean up - // the body. - if r.HTTPResponse != nil { - r.HTTPResponse.Body.Close() - } - // Capture the case where url.Error is returned for error processing - // response. e.g. 301 without location header comes back as string - // error and r.HTTPResponse is nil. Other url redirect errors will - // comeback in a similar method. - if e, ok := err.(*url.Error); ok && e.Err != nil { - if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { - code, _ := strconv.ParseInt(s[1], 10, 64) - r.HTTPResponse = &http.Response{ - StatusCode: int(code), - Status: http.StatusText(int(code)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - return - } - } - if r.HTTPResponse == nil { - // Add a dummy request response object to ensure the HTTPResponse - // value is consistent. - r.HTTPResponse = &http.Response{ - StatusCode: int(0), - Status: http.StatusText(int(0)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - } - // Catch all other request errors. - r.Error = awserr.New("RequestError", "send request failed", err) - r.Retryable = aws.Bool(true) // network errors are retryable - } -}} - -// ValidateResponseHandler is a request handler to validate service response. -var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { - if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { - // this may be replaced by an UnmarshalError handler - r.Error = awserr.New("UnknownError", "unknown error", nil) - } -}} - -// AfterRetryHandler performs final checks to determine if the request should -// be retried and how long to delay. -var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable == nil { - r.Retryable = aws.Bool(r.ShouldRetry(r)) - } - - if r.WillRetry() { - r.RetryDelay = r.RetryRules(r) - r.Config.SleepDelay(r.RetryDelay) - - // when the expired token exception occurs the credentials - // need to be expired locally so that the next request to - // get credentials will trigger a credentials refresh. - if r.IsErrorExpired() { - r.Config.Credentials.Expire() - } - - r.RetryCount++ - r.Error = nil - } -}} - -// ValidateEndpointHandler is a request handler to validate a request had the -// appropriate Region and Endpoint set. Will set r.Error if the endpoint or -// region is not valid. -var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { - if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { - r.Error = aws.ErrMissingRegion - } else if r.ClientInfo.Endpoint == "" { - r.Error = aws.ErrMissingEndpoint - } -}} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go deleted file mode 100644 index 7d50b155..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go +++ /dev/null @@ -1,17 +0,0 @@ -package corehandlers - -import "github.com/aws/aws-sdk-go/aws/request" - -// ValidateParametersHandler is a request handler to validate the input parameters. -// Validating parameters only has meaning if done prior to the request being sent. -var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { - if !r.ParamsFilled() { - return - } - - if v, ok := r.Params.(request.Validator); ok { - if err := v.Validate(); err != nil { - r.Error = err - } - } -}} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go deleted file mode 100644 index 857311f6..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go +++ /dev/null @@ -1,100 +0,0 @@ -package credentials - -import ( - "github.com/aws/aws-sdk-go/aws/awserr" -) - -var ( - // ErrNoValidProvidersFoundInChain Is returned when there are no valid - // providers in the ChainProvider. - // - // This has been deprecated. For verbose error messaging set - // aws.Config.CredentialsChainVerboseErrors to true - // - // @readonly - ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", - `no valid providers in chain. Deprecated. - For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, - nil) -) - -// A ChainProvider will search for a provider which returns credentials -// and cache that provider until Retrieve is called again. -// -// The ChainProvider provides a way of chaining multiple providers together -// which will pick the first available using priority order of the Providers -// in the list. -// -// If none of the Providers retrieve valid credentials Value, ChainProvider's -// Retrieve() will return the error ErrNoValidProvidersFoundInChain. -// -// If a Provider is found which returns valid credentials Value ChainProvider -// will cache that Provider for all calls to IsExpired(), until Retrieve is -// called again. -// -// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. -// In this example EnvProvider will first check if any credentials are available -// vai the environment variables. If there are none ChainProvider will check -// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider -// does not return any credentials ChainProvider will return the error -// ErrNoValidProvidersFoundInChain -// -// creds := NewChainCredentials( -// []Provider{ -// &EnvProvider{}, -// &EC2RoleProvider{ -// Client: ec2metadata.New(sess), -// }, -// }) -// -// // Usage of ChainCredentials with aws.Config -// svc := ec2.New(&aws.Config{Credentials: creds}) -// -type ChainProvider struct { - Providers []Provider - curr Provider - VerboseErrors bool -} - -// NewChainCredentials returns a pointer to a new Credentials object -// wrapping a chain of providers. -func NewChainCredentials(providers []Provider) *Credentials { - return NewCredentials(&ChainProvider{ - Providers: append([]Provider{}, providers...), - }) -} - -// Retrieve returns the credentials value or error if no provider returned -// without error. -// -// If a provider is found it will be cached and any calls to IsExpired() -// will return the expired state of the cached provider. -func (c *ChainProvider) Retrieve() (Value, error) { - var errs []error - for _, p := range c.Providers { - creds, err := p.Retrieve() - if err == nil { - c.curr = p - return creds, nil - } - errs = append(errs, err) - } - c.curr = nil - - var err error - err = ErrNoValidProvidersFoundInChain - if c.VerboseErrors { - err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) - } - return Value{}, err -} - -// IsExpired will returned the expired state of the currently cached provider -// if there is one. If there is no current provider, true will be returned. -func (c *ChainProvider) IsExpired() bool { - if c.curr != nil { - return c.curr.IsExpired() - } - - return true -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go deleted file mode 100644 index 7b8ebf5f..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ /dev/null @@ -1,223 +0,0 @@ -// Package credentials provides credential retrieval and management -// -// The Credentials is the primary method of getting access to and managing -// credentials Values. Using dependency injection retrieval of the credential -// values is handled by a object which satisfies the Provider interface. -// -// By default the Credentials.Get() will cache the successful result of a -// Provider's Retrieve() until Provider.IsExpired() returns true. At which -// point Credentials will call Provider's Retrieve() to get new credential Value. -// -// The Provider is responsible for determining when credentials Value have expired. -// It is also important to note that Credentials will always call Retrieve the -// first time Credentials.Get() is called. -// -// Example of using the environment variable credentials. -// -// creds := NewEnvCredentials() -// -// // Retrieve the credentials value -// credValue, err := creds.Get() -// if err != nil { -// // handle error -// } -// -// Example of forcing credentials to expire and be refreshed on the next Get(). -// This may be helpful to proactively expire credentials and refresh them sooner -// than they would naturally expire on their own. -// -// creds := NewCredentials(&EC2RoleProvider{}) -// creds.Expire() -// credsValue, err := creds.Get() -// // New credentials will be retrieved instead of from cache. -// -// -// Custom Provider -// -// Each Provider built into this package also provides a helper method to generate -// a Credentials pointer setup with the provider. To use a custom Provider just -// create a type which satisfies the Provider interface and pass it to the -// NewCredentials method. -// -// type MyProvider struct{} -// func (m *MyProvider) Retrieve() (Value, error) {...} -// func (m *MyProvider) IsExpired() bool {...} -// -// creds := NewCredentials(&MyProvider{}) -// credValue, err := creds.Get() -// -package credentials - -import ( - "sync" - "time" -) - -// AnonymousCredentials is an empty Credential object that can be used as -// dummy placeholder credentials for requests that do not need signed. -// -// This Credentials can be used to configure a service to not sign requests -// when making service API calls. For example, when accessing public -// s3 buckets. -// -// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials}) -// // Access public S3 buckets. -// -// @readonly -var AnonymousCredentials = NewStaticCredentials("", "", "") - -// A Value is the AWS credentials value for individual credential fields. -type Value struct { - // AWS Access key ID - AccessKeyID string - - // AWS Secret Access Key - SecretAccessKey string - - // AWS Session Token - SessionToken string - - // Provider used to get credentials - ProviderName string -} - -// A Provider is the interface for any component which will provide credentials -// Value. A provider is required to manage its own Expired state, and what to -// be expired means. -// -// The Provider should not need to implement its own mutexes, because -// that will be managed by Credentials. -type Provider interface { - // Refresh returns nil if it successfully retrieved the value. - // Error is returned if the value were not obtainable, or empty. - Retrieve() (Value, error) - - // IsExpired returns if the credentials are no longer valid, and need - // to be retrieved. - IsExpired() bool -} - -// A Expiry provides shared expiration logic to be used by credentials -// providers to implement expiry functionality. -// -// The best method to use this struct is as an anonymous field within the -// provider's struct. -// -// Example: -// type EC2RoleProvider struct { -// Expiry -// ... -// } -type Expiry struct { - // The date/time when to expire on - expiration time.Time - - // If set will be used by IsExpired to determine the current time. - // Defaults to time.Now if CurrentTime is not set. Available for testing - // to be able to mock out the current time. - CurrentTime func() time.Time -} - -// SetExpiration sets the expiration IsExpired will check when called. -// -// If window is greater than 0 the expiration time will be reduced by the -// window value. -// -// Using a window is helpful to trigger credentials to expire sooner than -// the expiration time given to ensure no requests are made with expired -// tokens. -func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { - e.expiration = expiration - if window > 0 { - e.expiration = e.expiration.Add(-window) - } -} - -// IsExpired returns if the credentials are expired. -func (e *Expiry) IsExpired() bool { - if e.CurrentTime == nil { - e.CurrentTime = time.Now - } - return e.expiration.Before(e.CurrentTime()) -} - -// A Credentials provides synchronous safe retrieval of AWS credentials Value. -// Credentials will cache the credentials value until they expire. Once the value -// expires the next Get will attempt to retrieve valid credentials. -// -// Credentials is safe to use across multiple goroutines and will manage the -// synchronous state so the Providers do not need to implement their own -// synchronization. -// -// The first Credentials.Get() will always call Provider.Retrieve() to get the -// first instance of the credentials Value. All calls to Get() after that -// will return the cached credentials Value until IsExpired() returns true. -type Credentials struct { - creds Value - forceRefresh bool - m sync.Mutex - - provider Provider -} - -// NewCredentials returns a pointer to a new Credentials with the provider set. -func NewCredentials(provider Provider) *Credentials { - return &Credentials{ - provider: provider, - forceRefresh: true, - } -} - -// Get returns the credentials value, or error if the credentials Value failed -// to be retrieved. -// -// Will return the cached credentials Value if it has not expired. If the -// credentials Value has expired the Provider's Retrieve() will be called -// to refresh the credentials. -// -// If Credentials.Expire() was called the credentials Value will be force -// expired, and the next call to Get() will cause them to be refreshed. -func (c *Credentials) Get() (Value, error) { - c.m.Lock() - defer c.m.Unlock() - - if c.isExpired() { - creds, err := c.provider.Retrieve() - if err != nil { - return Value{}, err - } - c.creds = creds - c.forceRefresh = false - } - - return c.creds, nil -} - -// Expire expires the credentials and forces them to be retrieved on the -// next call to Get(). -// -// This will override the Provider's expired state, and force Credentials -// to call the Provider's Retrieve(). -func (c *Credentials) Expire() { - c.m.Lock() - defer c.m.Unlock() - - c.forceRefresh = true -} - -// IsExpired returns if the credentials are no longer valid, and need -// to be retrieved. -// -// If the Credentials were forced to be expired with Expire() this will -// reflect that override. -func (c *Credentials) IsExpired() bool { - c.m.Lock() - defer c.m.Unlock() - - return c.isExpired() -} - -// isExpired helper method wrapping the definition of expired credentials. -func (c *Credentials) isExpired() bool { - return c.forceRefresh || c.provider.IsExpired() -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go deleted file mode 100644 index aa9d689a..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ /dev/null @@ -1,178 +0,0 @@ -package ec2rolecreds - -import ( - "bufio" - "encoding/json" - "fmt" - "path" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/ec2metadata" -) - -// ProviderName provides a name of EC2Role provider -const ProviderName = "EC2RoleProvider" - -// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if -// those credentials are expired. -// -// Example how to configure the EC2RoleProvider with custom http Client, Endpoint -// or ExpiryWindow -// -// p := &ec2rolecreds.EC2RoleProvider{ -// // Pass in a custom timeout to be used when requesting -// // IAM EC2 Role credentials. -// Client: ec2metadata.New(sess, aws.Config{ -// HTTPClient: &http.Client{Timeout: 10 * time.Second}, -// }), -// -// // Do not use early expiry of credentials. If a non zero value is -// // specified the credentials will be expired early -// ExpiryWindow: 0, -// } -type EC2RoleProvider struct { - credentials.Expiry - - // Required EC2Metadata client to use when connecting to EC2 metadata service. - Client *ec2metadata.EC2Metadata - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration -} - -// NewCredentials returns a pointer to a new Credentials object wrapping -// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. -// The ConfigProvider is satisfied by the session.Session type. -func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { - p := &EC2RoleProvider{ - Client: ec2metadata.New(c), - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping -// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 -// metadata service. -func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { - p := &EC2RoleProvider{ - Client: client, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// Retrieve retrieves credentials from the EC2 service. -// Error will be returned if the request fails, or unable to extract -// the desired credentials. -func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { - credsList, err := requestCredList(m.Client) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - if len(credsList) == 0 { - return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) - } - credsName := credsList[0] - - roleCreds, err := requestCred(m.Client, credsName) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) - - return credentials.Value{ - AccessKeyID: roleCreds.AccessKeyID, - SecretAccessKey: roleCreds.SecretAccessKey, - SessionToken: roleCreds.Token, - ProviderName: ProviderName, - }, nil -} - -// A ec2RoleCredRespBody provides the shape for unmarshalling credential -// request responses. -type ec2RoleCredRespBody struct { - // Success State - Expiration time.Time - AccessKeyID string - SecretAccessKey string - Token string - - // Error state - Code string - Message string -} - -const iamSecurityCredsPath = "/iam/security-credentials" - -// requestCredList requests a list of credentials from the EC2 service. -// If there are no credentials, or there is an error making or receiving the request -func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { - resp, err := client.GetMetadata(iamSecurityCredsPath) - if err != nil { - return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) - } - - credsList := []string{} - s := bufio.NewScanner(strings.NewReader(resp)) - for s.Scan() { - credsList = append(credsList, s.Text()) - } - - if err := s.Err(); err != nil { - return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) - } - - return credsList, nil -} - -// requestCred requests the credentials for a specific credentials from the EC2 service. -// -// If the credentials cannot be found, or there is an error reading the response -// and error will be returned. -func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) - if err != nil { - return ec2RoleCredRespBody{}, - awserr.New("EC2RoleRequestError", - fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), - err) - } - - respCreds := ec2RoleCredRespBody{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { - return ec2RoleCredRespBody{}, - awserr.New("SerializationError", - fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), - err) - } - - if respCreds.Code != "Success" { - // If an error code was returned something failed requesting the role. - return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) - } - - return respCreds, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go deleted file mode 100644 index 96655bc4..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go +++ /dev/null @@ -1,77 +0,0 @@ -package credentials - -import ( - "os" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// EnvProviderName provides a name of Env provider -const EnvProviderName = "EnvProvider" - -var ( - // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be - // found in the process's environment. - // - // @readonly - ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) - - // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key - // can't be found in the process's environment. - // - // @readonly - ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) -) - -// A EnvProvider retrieves credentials from the environment variables of the -// running process. Environment credentials never expire. -// -// Environment variables used: -// -// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY -// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY -type EnvProvider struct { - retrieved bool -} - -// NewEnvCredentials returns a pointer to a new Credentials object -// wrapping the environment variable provider. -func NewEnvCredentials() *Credentials { - return NewCredentials(&EnvProvider{}) -} - -// Retrieve retrieves the keys from the environment. -func (e *EnvProvider) Retrieve() (Value, error) { - e.retrieved = false - - id := os.Getenv("AWS_ACCESS_KEY_ID") - if id == "" { - id = os.Getenv("AWS_ACCESS_KEY") - } - - secret := os.Getenv("AWS_SECRET_ACCESS_KEY") - if secret == "" { - secret = os.Getenv("AWS_SECRET_KEY") - } - - if id == "" { - return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound - } - - if secret == "" { - return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound - } - - e.retrieved = true - return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: os.Getenv("AWS_SESSION_TOKEN"), - ProviderName: EnvProviderName, - }, nil -} - -// IsExpired returns if the credentials have been retrieved. -func (e *EnvProvider) IsExpired() bool { - return !e.retrieved -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini deleted file mode 100644 index 7fc91d9d..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini +++ /dev/null @@ -1,12 +0,0 @@ -[default] -aws_access_key_id = accessKey -aws_secret_access_key = secret -aws_session_token = token - -[no_token] -aws_access_key_id = accessKey -aws_secret_access_key = secret - -[with_colon] -aws_access_key_id: accessKey -aws_secret_access_key: secret diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go deleted file mode 100644 index 7fb7cbf0..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go +++ /dev/null @@ -1,151 +0,0 @@ -package credentials - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/go-ini/ini" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// SharedCredsProviderName provides a name of SharedCreds provider -const SharedCredsProviderName = "SharedCredentialsProvider" - -var ( - // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. - // - // @readonly - ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) -) - -// A SharedCredentialsProvider retrieves credentials from the current user's home -// directory, and keeps track if those credentials are expired. -// -// Profile ini file example: $HOME/.aws/credentials -type SharedCredentialsProvider struct { - // Path to the shared credentials file. - // - // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the - // env value is empty will default to current user's home directory. - // Linux/OSX: "$HOME/.aws/credentials" - // Windows: "%USERPROFILE%\.aws\credentials" - Filename string - - // AWS Profile to extract credentials from the shared credentials file. If empty - // will default to environment variable "AWS_PROFILE" or "default" if - // environment variable is also not set. - Profile string - - // retrieved states if the credentials have been successfully retrieved. - retrieved bool -} - -// NewSharedCredentials returns a pointer to a new Credentials object -// wrapping the Profile file provider. -func NewSharedCredentials(filename, profile string) *Credentials { - return NewCredentials(&SharedCredentialsProvider{ - Filename: filename, - Profile: profile, - }) -} - -// Retrieve reads and extracts the shared credentials from the current -// users home directory. -func (p *SharedCredentialsProvider) Retrieve() (Value, error) { - p.retrieved = false - - filename, err := p.filename() - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, err - } - - creds, err := loadProfile(filename, p.profile()) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, err - } - - p.retrieved = true - return creds, nil -} - -// IsExpired returns if the shared credentials have expired. -func (p *SharedCredentialsProvider) IsExpired() bool { - return !p.retrieved -} - -// loadProfiles loads from the file pointed to by shared credentials filename for profile. -// The credentials retrieved from the profile will be returned or error. Error will be -// returned if it fails to read from the file, or the data is invalid. -func loadProfile(filename, profile string) (Value, error) { - config, err := ini.Load(filename) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) - } - iniProfile, err := config.GetSection(profile) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) - } - - id, err := iniProfile.GetKey("aws_access_key_id") - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", - fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), - err) - } - - secret, err := iniProfile.GetKey("aws_secret_access_key") - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", - fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), - nil) - } - - // Default to empty string if not found - token := iniProfile.Key("aws_session_token") - - return Value{ - AccessKeyID: id.String(), - SecretAccessKey: secret.String(), - SessionToken: token.String(), - ProviderName: SharedCredsProviderName, - }, nil -} - -// filename returns the filename to use to read AWS shared credentials. -// -// Will return an error if the user's home directory path cannot be found. -func (p *SharedCredentialsProvider) filename() (string, error) { - if p.Filename == "" { - if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" { - return p.Filename, nil - } - - homeDir := os.Getenv("HOME") // *nix - if homeDir == "" { // Windows - homeDir = os.Getenv("USERPROFILE") - } - if homeDir == "" { - return "", ErrSharedCredentialsHomeNotFound - } - - p.Filename = filepath.Join(homeDir, ".aws", "credentials") - } - - return p.Filename, nil -} - -// profile returns the AWS shared credentials profile. If empty will read -// environment variable "AWS_PROFILE". If that is not set profile will -// return "default". -func (p *SharedCredentialsProvider) profile() string { - if p.Profile == "" { - p.Profile = os.Getenv("AWS_PROFILE") - } - if p.Profile == "" { - p.Profile = "default" - } - - return p.Profile -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go deleted file mode 100644 index 6f075604..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go +++ /dev/null @@ -1,48 +0,0 @@ -package credentials - -import ( - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// StaticProviderName provides a name of Static provider -const StaticProviderName = "StaticProvider" - -var ( - // ErrStaticCredentialsEmpty is emitted when static credentials are empty. - // - // @readonly - ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) -) - -// A StaticProvider is a set of credentials which are set programmatically, -// and will never expire. -type StaticProvider struct { - Value -} - -// NewStaticCredentials returns a pointer to a new Credentials object -// wrapping a static credentials value provider. -func NewStaticCredentials(id, secret, token string) *Credentials { - return NewCredentials(&StaticProvider{Value: Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: token, - }}) -} - -// Retrieve returns the credentials or error if the credentials are invalid. -func (s *StaticProvider) Retrieve() (Value, error) { - if s.AccessKeyID == "" || s.SecretAccessKey == "" { - return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty - } - - s.Value.ProviderName = StaticProviderName - return s.Value, nil -} - -// IsExpired returns if the credentials are expired. -// -// For StaticProvider, the credentials never expired. -func (s *StaticProvider) IsExpired() bool { - return false -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go deleted file mode 100644 index 12be1a5d..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ /dev/null @@ -1,98 +0,0 @@ -// Package defaults is a collection of helpers to retrieve the SDK's default -// configuration and handlers. -// -// Generally this package shouldn't be used directly, but session.Session -// instead. This package is useful when you need to reset the defaults -// of a session or service client to the SDK defaults before setting -// additional parameters. -package defaults - -import ( - "net/http" - "os" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/endpoints" -) - -// A Defaults provides a collection of default values for SDK clients. -type Defaults struct { - Config *aws.Config - Handlers request.Handlers -} - -// Get returns the SDK's default values with Config and handlers pre-configured. -func Get() Defaults { - cfg := Config() - handlers := Handlers() - cfg.Credentials = CredChain(cfg, handlers) - - return Defaults{ - Config: cfg, - Handlers: handlers, - } -} - -// Config returns the default configuration without credentials. -// To retrieve a config with credentials also included use -// `defaults.Get().Config` instead. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the configuration of an -// existing service client or session. -func Config() *aws.Config { - return aws.NewConfig(). - WithCredentials(credentials.AnonymousCredentials). - WithRegion(os.Getenv("AWS_REGION")). - WithHTTPClient(http.DefaultClient). - WithMaxRetries(aws.UseServiceDefaultRetries). - WithLogger(aws.NewDefaultLogger()). - WithLogLevel(aws.LogOff). - WithSleepDelay(time.Sleep) -} - -// Handlers returns the default request handlers. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the request handlers of an -// existing service client or session. -func Handlers() request.Handlers { - var handlers request.Handlers - - handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) - handlers.Validate.AfterEachFn = request.HandlerListStopOnError - handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) - handlers.Build.AfterEachFn = request.HandlerListStopOnError - handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) - handlers.Send.PushBackNamed(corehandlers.SendHandler) - handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) - handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) - - return handlers -} - -// CredChain returns the default credential chain. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the credentials of an -// existing service client or session's Config. -func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { - endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, *cfg.Region, true) - - return credentials.NewCredentials(&credentials.ChainProvider{ - VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - &ec2rolecreds.EC2RoleProvider{ - Client: ec2metadata.NewClient(*cfg, handlers, endpoint, signingRegion), - ExpiryWindow: 5 * time.Minute, - }, - }}) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go deleted file mode 100644 index 669c813a..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ /dev/null @@ -1,140 +0,0 @@ -package ec2metadata - -import ( - "encoding/json" - "fmt" - "path" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// GetMetadata uses the path provided to request information from the EC2 -// instance metdata service. The content will be returned as a string, or -// error if the request failed. -func (c *EC2Metadata) GetMetadata(p string) (string, error) { - op := &request.Operation{ - Name: "GetMetadata", - HTTPMethod: "GET", - HTTPPath: path.Join("/", "meta-data", p), - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - - return output.Content, req.Send() -} - -// GetDynamicData uses the path provided to request information from the EC2 -// instance metadata service for dynamic data. The content will be returned -// as a string, or error if the request failed. -func (c *EC2Metadata) GetDynamicData(p string) (string, error) { - op := &request.Operation{ - Name: "GetDynamicData", - HTTPMethod: "GET", - HTTPPath: path.Join("/", "dynamic", p), - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - - return output.Content, req.Send() -} - -// GetInstanceIdentityDocument retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { - resp, err := c.GetDynamicData("instance-identity/document") - if err != nil { - return EC2InstanceIdentityDocument{}, - awserr.New("EC2MetadataRequestError", - "failed to get EC2 instance identity document", err) - } - - doc := EC2InstanceIdentityDocument{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { - return EC2InstanceIdentityDocument{}, - awserr.New("SerializationError", - "failed to decode EC2 instance identity document", err) - } - - return doc, nil -} - -// IAMInfo retrieves IAM info from the metadata API -func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { - resp, err := c.GetMetadata("iam/info") - if err != nil { - return EC2IAMInfo{}, - awserr.New("EC2MetadataRequestError", - "failed to get EC2 IAM info", err) - } - - info := EC2IAMInfo{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { - return EC2IAMInfo{}, - awserr.New("SerializationError", - "failed to decode EC2 IAM info", err) - } - - if info.Code != "Success" { - errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) - return EC2IAMInfo{}, - awserr.New("EC2MetadataError", errMsg, nil) - } - - return info, nil -} - -// Region returns the region the instance is running in. -func (c *EC2Metadata) Region() (string, error) { - resp, err := c.GetMetadata("placement/availability-zone") - if err != nil { - return "", err - } - - // returns region without the suffix. Eg: us-west-2a becomes us-west-2 - return resp[:len(resp)-1], nil -} - -// Available returns if the application has access to the EC2 Metadata service. -// Can be used to determine if application is running within an EC2 Instance and -// the metadata service is available. -func (c *EC2Metadata) Available() bool { - if _, err := c.GetMetadata("instance-id"); err != nil { - return false - } - - return true -} - -// An EC2IAMInfo provides the shape for unmarshalling -// an IAM info from the metadata API -type EC2IAMInfo struct { - Code string - LastUpdated time.Time - InstanceProfileArn string - InstanceProfileID string -} - -// An EC2InstanceIdentityDocument provides the shape for unmarshalling -// an instance identity document -type EC2InstanceIdentityDocument struct { - DevpayProductCodes []string `json:"devpayProductCodes"` - AvailabilityZone string `json:"availabilityZone"` - PrivateIP string `json:"privateIp"` - Version string `json:"version"` - Region string `json:"region"` - InstanceID string `json:"instanceId"` - BillingProducts []string `json:"billingProducts"` - InstanceType string `json:"instanceType"` - AccountID string `json:"accountId"` - PendingTime time.Time `json:"pendingTime"` - ImageID string `json:"imageId"` - KernelID string `json:"kernelId"` - RamdiskID string `json:"ramdiskId"` - Architecture string `json:"architecture"` -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go deleted file mode 100644 index 5b4379db..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ /dev/null @@ -1,124 +0,0 @@ -// Package ec2metadata provides the client for making API calls to the -// EC2 Metadata service. -package ec2metadata - -import ( - "bytes" - "errors" - "io" - "net/http" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" -) - -// ServiceName is the name of the service. -const ServiceName = "ec2metadata" - -// A EC2Metadata is an EC2 Metadata service Client. -type EC2Metadata struct { - *client.Client -} - -// New creates a new instance of the EC2Metadata client with a session. -// This client is safe to use across multiple goroutines. -// -// -// Example: -// // Create a EC2Metadata client from just a session. -// svc := ec2metadata.New(mySession) -// -// // Create a EC2Metadata client with additional configuration -// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { - c := p.ClientConfig(ServiceName, cfgs...) - return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) -} - -// NewClient returns a new EC2Metadata client. Should be used to create -// a client when not using a session. Generally using just New with a session -// is preferred. -// -// If an unmodified HTTP client is provided from the stdlib default, or no client -// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. -// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. -func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { - if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { - // If the http client is unmodified and this feature is not disabled - // set custom timeouts for EC2Metadata requests. - cfg.HTTPClient = &http.Client{ - // use a shorter timeout than default because the metadata - // service is local if it is running, and to fail faster - // if not running on an ec2 instance. - Timeout: 5 * time.Second, - } - } - - svc := &EC2Metadata{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - Endpoint: endpoint, - APIVersion: "latest", - }, - handlers, - ), - } - - svc.Handlers.Unmarshal.PushBack(unmarshalHandler) - svc.Handlers.UnmarshalError.PushBack(unmarshalError) - svc.Handlers.Validate.Clear() - svc.Handlers.Validate.PushBack(validateEndpointHandler) - - // Add additional options to the service config - for _, option := range opts { - option(svc.Client) - } - - return svc -} - -func httpClientZero(c *http.Client) bool { - return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) -} - -type metadataOutput struct { - Content string -} - -func unmarshalHandler(r *request.Request) { - defer r.HTTPResponse.Body.Close() - b := &bytes.Buffer{} - if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) - return - } - - if data, ok := r.Data.(*metadataOutput); ok { - data.Content = b.String() - } -} - -func unmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - b := &bytes.Buffer{} - if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) - return - } - - // Response body format is not consistent between metadata endpoints. - // Grab the error message as a string and include that as the source error - r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) -} - -func validateEndpointHandler(r *request.Request) { - if r.ClientInfo.Endpoint == "" { - r.Error = aws.ErrMissingEndpoint - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/errors.go deleted file mode 100644 index 57663616..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/errors.go +++ /dev/null @@ -1,17 +0,0 @@ -package aws - -import "github.com/aws/aws-sdk-go/aws/awserr" - -var ( - // ErrMissingRegion is an error that is returned if region configuration is - // not found. - // - // @readonly - ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) - - // ErrMissingEndpoint is an error that is returned if an endpoint cannot be - // resolved for a service. - // - // @readonly - ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) -) diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/logger.go deleted file mode 100644 index db87188e..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/logger.go +++ /dev/null @@ -1,112 +0,0 @@ -package aws - -import ( - "log" - "os" -) - -// A LogLevelType defines the level logging should be performed at. Used to instruct -// the SDK which statements should be logged. -type LogLevelType uint - -// LogLevel returns the pointer to a LogLevel. Should be used to workaround -// not being able to take the address of a non-composite literal. -func LogLevel(l LogLevelType) *LogLevelType { - return &l -} - -// Value returns the LogLevel value or the default value LogOff if the LogLevel -// is nil. Safe to use on nil value LogLevelTypes. -func (l *LogLevelType) Value() LogLevelType { - if l != nil { - return *l - } - return LogOff -} - -// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be -// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If -// LogLevel is nill, will default to LogOff comparison. -func (l *LogLevelType) Matches(v LogLevelType) bool { - c := l.Value() - return c&v == v -} - -// AtLeast returns true if this LogLevel is at least high enough to satisfies v. -// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default -// to LogOff comparison. -func (l *LogLevelType) AtLeast(v LogLevelType) bool { - c := l.Value() - return c >= v -} - -const ( - // LogOff states that no logging should be performed by the SDK. This is the - // default state of the SDK, and should be use to disable all logging. - LogOff LogLevelType = iota * 0x1000 - - // LogDebug state that debug output should be logged by the SDK. This should - // be used to inspect request made and responses received. - LogDebug -) - -// Debug Logging Sub Levels -const ( - // LogDebugWithSigning states that the SDK should log request signing and - // presigning events. This should be used to log the signing details of - // requests for debugging. Will also enable LogDebug. - LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) - - // LogDebugWithHTTPBody states the SDK should log HTTP request and response - // HTTP bodys in addition to the headers and path. This should be used to - // see the body content of requests and responses made while using the SDK - // Will also enable LogDebug. - LogDebugWithHTTPBody - - // LogDebugWithRequestRetries states the SDK should log when service requests will - // be retried. This should be used to log when you want to log when service - // requests are being retried. Will also enable LogDebug. - LogDebugWithRequestRetries - - // LogDebugWithRequestErrors states the SDK should log when service requests fail - // to build, send, validate, or unmarshal. - LogDebugWithRequestErrors -) - -// A Logger is a minimalistic interface for the SDK to log messages to. Should -// be used to provide custom logging writers for the SDK to use. -type Logger interface { - Log(...interface{}) -} - -// A LoggerFunc is a convenience type to convert a function taking a variadic -// list of arguments and wrap it so the Logger interface can be used. -// -// Example: -// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { -// fmt.Fprintln(os.Stdout, args...) -// })}) -type LoggerFunc func(...interface{}) - -// Log calls the wrapped function with the arguments provided -func (f LoggerFunc) Log(args ...interface{}) { - f(args...) -} - -// NewDefaultLogger returns a Logger which will write log messages to stdout, and -// use same formatting runes as the stdlib log.Logger -func NewDefaultLogger() Logger { - return &defaultLogger{ - logger: log.New(os.Stdout, "", log.LstdFlags), - } -} - -// A defaultLogger provides a minimalistic logger satisfying the Logger interface. -type defaultLogger struct { - logger *log.Logger -} - -// Log logs the parameters to the stdlib logger. See log.Println. -func (l defaultLogger) Log(args ...interface{}) { - l.logger.Println(args...) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go deleted file mode 100644 index 5279c19c..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ /dev/null @@ -1,187 +0,0 @@ -package request - -import ( - "fmt" - "strings" -) - -// A Handlers provides a collection of request handlers for various -// stages of handling requests. -type Handlers struct { - Validate HandlerList - Build HandlerList - Sign HandlerList - Send HandlerList - ValidateResponse HandlerList - Unmarshal HandlerList - UnmarshalMeta HandlerList - UnmarshalError HandlerList - Retry HandlerList - AfterRetry HandlerList -} - -// Copy returns of this handler's lists. -func (h *Handlers) Copy() Handlers { - return Handlers{ - Validate: h.Validate.copy(), - Build: h.Build.copy(), - Sign: h.Sign.copy(), - Send: h.Send.copy(), - ValidateResponse: h.ValidateResponse.copy(), - Unmarshal: h.Unmarshal.copy(), - UnmarshalError: h.UnmarshalError.copy(), - UnmarshalMeta: h.UnmarshalMeta.copy(), - Retry: h.Retry.copy(), - AfterRetry: h.AfterRetry.copy(), - } -} - -// Clear removes callback functions for all handlers -func (h *Handlers) Clear() { - h.Validate.Clear() - h.Build.Clear() - h.Send.Clear() - h.Sign.Clear() - h.Unmarshal.Clear() - h.UnmarshalMeta.Clear() - h.UnmarshalError.Clear() - h.ValidateResponse.Clear() - h.Retry.Clear() - h.AfterRetry.Clear() -} - -// A HandlerListRunItem represents an entry in the HandlerList which -// is being run. -type HandlerListRunItem struct { - Index int - Handler NamedHandler - Request *Request -} - -// A HandlerList manages zero or more handlers in a list. -type HandlerList struct { - list []NamedHandler - - // Called after each request handler in the list is called. If set - // and the func returns true the HandlerList will continue to iterate - // over the request handlers. If false is returned the HandlerList - // will stop iterating. - // - // Should be used if extra logic to be performed between each handler - // in the list. This can be used to terminate a list's iteration - // based on a condition such as error like, HandlerListStopOnError. - // Or for logging like HandlerListLogItem. - AfterEachFn func(item HandlerListRunItem) bool -} - -// A NamedHandler is a struct that contains a name and function callback. -type NamedHandler struct { - Name string - Fn func(*Request) -} - -// copy creates a copy of the handler list. -func (l *HandlerList) copy() HandlerList { - n := HandlerList{ - AfterEachFn: l.AfterEachFn, - } - n.list = append([]NamedHandler{}, l.list...) - return n -} - -// Clear clears the handler list. -func (l *HandlerList) Clear() { - l.list = []NamedHandler{} -} - -// Len returns the number of handlers in the list. -func (l *HandlerList) Len() int { - return len(l.list) -} - -// PushBack pushes handler f to the back of the handler list. -func (l *HandlerList) PushBack(f func(*Request)) { - l.list = append(l.list, NamedHandler{"__anonymous", f}) -} - -// PushFront pushes handler f to the front of the handler list. -func (l *HandlerList) PushFront(f func(*Request)) { - l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...) -} - -// PushBackNamed pushes named handler f to the back of the handler list. -func (l *HandlerList) PushBackNamed(n NamedHandler) { - l.list = append(l.list, n) -} - -// PushFrontNamed pushes named handler f to the front of the handler list. -func (l *HandlerList) PushFrontNamed(n NamedHandler) { - l.list = append([]NamedHandler{n}, l.list...) -} - -// Remove removes a NamedHandler n -func (l *HandlerList) Remove(n NamedHandler) { - newlist := []NamedHandler{} - for _, m := range l.list { - if m.Name != n.Name { - newlist = append(newlist, m) - } - } - l.list = newlist -} - -// Run executes all handlers in the list with a given request object. -func (l *HandlerList) Run(r *Request) { - for i, h := range l.list { - h.Fn(r) - item := HandlerListRunItem{ - Index: i, Handler: h, Request: r, - } - if l.AfterEachFn != nil && !l.AfterEachFn(item) { - return - } - } -} - -// HandlerListLogItem logs the request handler and the state of the -// request's Error value. Always returns true to continue iterating -// request handlers in a HandlerList. -func HandlerListLogItem(item HandlerListRunItem) bool { - if item.Request.Config.Logger == nil { - return true - } - item.Request.Config.Logger.Log("DEBUG: RequestHandler", - item.Index, item.Handler.Name, item.Request.Error) - - return true -} - -// HandlerListStopOnError returns false to stop the HandlerList iterating -// over request handlers if Request.Error is not nil. True otherwise -// to continue iterating. -func HandlerListStopOnError(item HandlerListRunItem) bool { - return item.Request.Error == nil -} - -// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request -// header. If the extra parameters are provided they will be added as metadata to the -// name/version pair resulting in the following format. -// "name/version (extra0; extra1; ...)" -// The user agent part will be concatenated with this current request's user agent string. -func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { - ua := fmt.Sprintf("%s/%s", name, version) - if len(extra) > 0 { - ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) - } - return func(r *Request) { - AddToUserAgent(r, ua) - } -} - -// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. -// The input string will be concatenated with the current request's user agent string. -func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { - return func(r *Request) { - AddToUserAgent(r, s) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go deleted file mode 100644 index a4087f20..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build go1.5 - -package request - -import ( - "io" - "net/http" - "net/url" -) - -func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { - req := &http.Request{ - URL: &url.URL{}, - Header: http.Header{}, - Close: r.Close, - Body: body, - Host: r.Host, - Method: r.Method, - Proto: r.Proto, - ContentLength: r.ContentLength, - // Cancel will be deprecated in 1.7 and will be replaced with Context - Cancel: r.Cancel, - } - - *req.URL = *r.URL - for k, v := range r.Header { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - - return req -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go deleted file mode 100644 index 75da021e..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build !go1.5 - -package request - -import ( - "io" - "net/http" - "net/url" -) - -func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { - req := &http.Request{ - URL: &url.URL{}, - Header: http.Header{}, - Close: r.Close, - Body: body, - Host: r.Host, - Method: r.Method, - Proto: r.Proto, - ContentLength: r.ContentLength, - } - - *req.URL = *r.URL - for k, v := range r.Header { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - - return req -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go deleted file mode 100644 index da6396d2..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go +++ /dev/null @@ -1,49 +0,0 @@ -package request - -import ( - "io" - "sync" -) - -// offsetReader is a thread-safe io.ReadCloser to prevent racing -// with retrying requests -type offsetReader struct { - buf io.ReadSeeker - lock sync.RWMutex - closed bool -} - -func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { - reader := &offsetReader{} - buf.Seek(offset, 0) - - reader.buf = buf - return reader -} - -// Close is a thread-safe close. Uses the write lock. -func (o *offsetReader) Close() error { - o.lock.Lock() - defer o.lock.Unlock() - o.closed = true - return nil -} - -// Read is a thread-safe read using a read lock. -func (o *offsetReader) Read(p []byte) (int, error) { - o.lock.RLock() - defer o.lock.RUnlock() - - if o.closed { - return 0, io.EOF - } - - return o.buf.Read(p) -} - -// CloseAndCopy will return a new offsetReader with a copy of the old buffer -// and close the old buffer. -func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { - o.Close() - return newOffsetReader(o.buf, offset) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/request.go deleted file mode 100644 index d04e95bd..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ /dev/null @@ -1,330 +0,0 @@ -package request - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "reflect" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client/metadata" -) - -// A Request is the service request to be made. -type Request struct { - Config aws.Config - ClientInfo metadata.ClientInfo - Handlers Handlers - - Retryer - Time time.Time - ExpireTime time.Duration - Operation *Operation - HTTPRequest *http.Request - HTTPResponse *http.Response - Body io.ReadSeeker - BodyStart int64 // offset from beginning of Body that the request body starts - Params interface{} - Error error - Data interface{} - RequestID string - RetryCount int - Retryable *bool - RetryDelay time.Duration - NotHoist bool - SignedHeaderVals http.Header - LastSignedAt time.Time - - built bool -} - -// An Operation is the service API operation to be made. -type Operation struct { - Name string - HTTPMethod string - HTTPPath string - *Paginator -} - -// Paginator keeps track of pagination configuration for an API operation. -type Paginator struct { - InputTokens []string - OutputTokens []string - LimitToken string - TruncationToken string -} - -// New returns a new Request pointer for the service API -// operation and parameters. -// -// Params is any value of input parameters to be the request payload. -// Data is pointer value to an object which the request's response -// payload will be deserialized to. -func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, - retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { - - method := operation.HTTPMethod - if method == "" { - method = "POST" - } - p := operation.HTTPPath - if p == "" { - p = "/" - } - - httpReq, _ := http.NewRequest(method, "", nil) - - var err error - httpReq.URL, err = url.Parse(clientInfo.Endpoint + p) - if err != nil { - httpReq.URL = &url.URL{} - err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) - } - - r := &Request{ - Config: cfg, - ClientInfo: clientInfo, - Handlers: handlers.Copy(), - - Retryer: retryer, - Time: time.Now(), - ExpireTime: 0, - Operation: operation, - HTTPRequest: httpReq, - Body: nil, - Params: params, - Error: err, - Data: data, - } - r.SetBufferBody([]byte{}) - - return r -} - -// WillRetry returns if the request's can be retried. -func (r *Request) WillRetry() bool { - return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() -} - -// ParamsFilled returns if the request's parameters have been populated -// and the parameters are valid. False is returned if no parameters are -// provided or invalid. -func (r *Request) ParamsFilled() bool { - return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() -} - -// DataFilled returns true if the request's data for response deserialization -// target has been set and is a valid. False is returned if data is not -// set, or is invalid. -func (r *Request) DataFilled() bool { - return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() -} - -// SetBufferBody will set the request's body bytes that will be sent to -// the service API. -func (r *Request) SetBufferBody(buf []byte) { - r.SetReaderBody(bytes.NewReader(buf)) -} - -// SetStringBody sets the body of the request to be backed by a string. -func (r *Request) SetStringBody(s string) { - r.SetReaderBody(strings.NewReader(s)) -} - -// SetReaderBody will set the request's body reader. -func (r *Request) SetReaderBody(reader io.ReadSeeker) { - r.HTTPRequest.Body = newOffsetReader(reader, 0) - r.Body = reader -} - -// Presign returns the request's signed URL. Error will be returned -// if the signing fails. -func (r *Request) Presign(expireTime time.Duration) (string, error) { - r.ExpireTime = expireTime - r.NotHoist = false - r.Sign() - if r.Error != nil { - return "", r.Error - } - return r.HTTPRequest.URL.String(), nil -} - -// PresignRequest behaves just like presign, but hoists all headers and signs them. -// Also returns the signed hash back to the user -func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { - r.ExpireTime = expireTime - r.NotHoist = true - r.Sign() - if r.Error != nil { - return "", nil, r.Error - } - return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil -} - -func debugLogReqError(r *Request, stage string, retrying bool, err error) { - if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { - return - } - - retryStr := "not retrying" - if retrying { - retryStr = "will retry" - } - - r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", - stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) -} - -// Build will build the request's object so it can be signed and sent -// to the service. Build will also validate all the request's parameters. -// Anny additional build Handlers set on this request will be run -// in the order they were set. -// -// The request will only be built once. Multiple calls to build will have -// no effect. -// -// If any Validate or Build errors occur the build will stop and the error -// which occurred will be returned. -func (r *Request) Build() error { - if !r.built { - r.Handlers.Validate.Run(r) - if r.Error != nil { - debugLogReqError(r, "Validate Request", false, r.Error) - return r.Error - } - r.Handlers.Build.Run(r) - if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) - return r.Error - } - r.built = true - } - - return r.Error -} - -// Sign will sign the request returning error if errors are encountered. -// -// Send will build the request prior to signing. All Sign Handlers will -// be executed in the order they were set. -func (r *Request) Sign() error { - r.Build() - if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) - return r.Error - } - - r.Handlers.Sign.Run(r) - return r.Error -} - -// Send will send the request returning error if errors are encountered. -// -// Send will sign the request prior to sending. All Send Handlers will -// be executed in the order they were set. -// -// Canceling a request is non-deterministic. If a request has been canceled, -// then the transport will choose, randomly, one of the state channels during -// reads or getting the connection. -// -// readLoop() and getConn(req *Request, cm connectMethod) -// https://github.com/golang/go/blob/master/src/net/http/transport.go -func (r *Request) Send() error { - for { - if aws.BoolValue(r.Retryable) { - if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { - r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", - r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) - } - - var body io.ReadCloser - if reader, ok := r.HTTPRequest.Body.(*offsetReader); ok { - body = reader.CloseAndCopy(r.BodyStart) - } else { - if r.Config.Logger != nil { - r.Config.Logger.Log("Request body type has been overwritten. May cause race conditions") - } - r.Body.Seek(r.BodyStart, 0) - body = ioutil.NopCloser(r.Body) - } - - r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, body) - if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { - // Closing response body. Since we are setting a new request to send off, this - // response will get squashed and leaked. - r.HTTPResponse.Body.Close() - } - } - - r.Sign() - if r.Error != nil { - return r.Error - } - - r.Retryable = nil - - r.Handlers.Send.Run(r) - if r.Error != nil { - if strings.Contains(r.Error.Error(), "net/http: request canceled") { - return r.Error - } - - err := r.Error - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Send Request", false, r.Error) - return r.Error - } - debugLogReqError(r, "Send Request", true, err) - continue - } - - r.Handlers.UnmarshalMeta.Run(r) - r.Handlers.ValidateResponse.Run(r) - if r.Error != nil { - err := r.Error - r.Handlers.UnmarshalError.Run(r) - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Validate Response", false, r.Error) - return r.Error - } - debugLogReqError(r, "Validate Response", true, err) - continue - } - - r.Handlers.Unmarshal.Run(r) - if r.Error != nil { - err := r.Error - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Unmarshal Response", false, r.Error) - return r.Error - } - debugLogReqError(r, "Unmarshal Response", true, err) - continue - } - - break - } - - return nil -} - -// AddToUserAgent adds the string to the end of the request's current user agent. -func AddToUserAgent(r *Request, s string) { - curUA := r.HTTPRequest.Header.Get("User-Agent") - if len(curUA) > 0 { - s = curUA + " " + s - } - r.HTTPRequest.Header.Set("User-Agent", s) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go deleted file mode 100644 index 2939ec47..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ /dev/null @@ -1,104 +0,0 @@ -package request - -import ( - "reflect" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" -) - -//type Paginater interface { -// HasNextPage() bool -// NextPage() *Request -// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error -//} - -// HasNextPage returns true if this request has more pages of data available. -func (r *Request) HasNextPage() bool { - return len(r.nextPageTokens()) > 0 -} - -// nextPageTokens returns the tokens to use when asking for the next page of -// data. -func (r *Request) nextPageTokens() []interface{} { - if r.Operation.Paginator == nil { - return nil - } - - if r.Operation.TruncationToken != "" { - tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) - if len(tr) == 0 { - return nil - } - - switch v := tr[0].(type) { - case *bool: - if !aws.BoolValue(v) { - return nil - } - case bool: - if v == false { - return nil - } - } - } - - tokens := []interface{}{} - tokenAdded := false - for _, outToken := range r.Operation.OutputTokens { - v, _ := awsutil.ValuesAtPath(r.Data, outToken) - if len(v) > 0 { - tokens = append(tokens, v[0]) - tokenAdded = true - } else { - tokens = append(tokens, nil) - } - } - if !tokenAdded { - return nil - } - - return tokens -} - -// NextPage returns a new Request that can be executed to return the next -// page of result data. Call .Send() on this request to execute it. -func (r *Request) NextPage() *Request { - tokens := r.nextPageTokens() - if len(tokens) == 0 { - return nil - } - - data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() - nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) - for i, intok := range nr.Operation.InputTokens { - awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) - } - return nr -} - -// EachPage iterates over each page of a paginated request object. The fn -// parameter should be a function with the following sample signature: -// -// func(page *T, lastPage bool) bool { -// return true // return false to stop iterating -// } -// -// Where "T" is the structure type matching the output structure of the given -// operation. For example, a request object generated by -// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput -// as the structure "T". The lastPage value represents whether the page is -// the last page of data or not. The return value of this function should -// return true to keep iterating or false to stop. -func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { - for page := r; page != nil; page = page.NextPage() { - if err := page.Send(); err != nil { - return err - } - if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { - return page.Error - } - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go deleted file mode 100644 index 8cc8b015..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ /dev/null @@ -1,101 +0,0 @@ -package request - -import ( - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// Retryer is an interface to control retry logic for a given service. -// The default implementation used by most services is the service.DefaultRetryer -// structure, which contains basic retry logic using exponential backoff. -type Retryer interface { - RetryRules(*Request) time.Duration - ShouldRetry(*Request) bool - MaxRetries() int -} - -// WithRetryer sets a config Retryer value to the given Config returning it -// for chaining. -func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { - cfg.Retryer = retryer - return cfg -} - -// retryableCodes is a collection of service response codes which are retry-able -// without any further action. -var retryableCodes = map[string]struct{}{ - "RequestError": {}, - "RequestTimeout": {}, -} - -var throttleCodes = map[string]struct{}{ - "ProvisionedThroughputExceededException": {}, - "Throttling": {}, - "ThrottlingException": {}, - "RequestLimitExceeded": {}, - "RequestThrottled": {}, - "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once - "TooManyRequestsException": {}, // Lambda functions -} - -// credsExpiredCodes is a collection of error codes which signify the credentials -// need to be refreshed. Expired tokens require refreshing of credentials, and -// resigning before the request can be retried. -var credsExpiredCodes = map[string]struct{}{ - "ExpiredToken": {}, - "ExpiredTokenException": {}, - "RequestExpired": {}, // EC2 Only -} - -func isCodeThrottle(code string) bool { - _, ok := throttleCodes[code] - return ok -} - -func isCodeRetryable(code string) bool { - if _, ok := retryableCodes[code]; ok { - return true - } - - return isCodeExpiredCreds(code) -} - -func isCodeExpiredCreds(code string) bool { - _, ok := credsExpiredCodes[code] - return ok -} - -// IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if the request has no Error set. -func (r *Request) IsErrorRetryable() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeRetryable(err.Code()) - } - } - return false -} - -// IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if the request has no Error set -func (r *Request) IsErrorThrottle() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeThrottle(err.Code()) - } - } - return false -} - -// IsErrorExpired returns whether the error code is a credential expiry error. -// Returns false if the request has no Error set. -func (r *Request) IsErrorExpired() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeExpiredCreds(err.Code()) - } - } - return false -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go deleted file mode 100644 index 2520286b..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go +++ /dev/null @@ -1,234 +0,0 @@ -package request - -import ( - "bytes" - "fmt" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -const ( - // InvalidParameterErrCode is the error code for invalid parameters errors - InvalidParameterErrCode = "InvalidParameter" - // ParamRequiredErrCode is the error code for required parameter errors - ParamRequiredErrCode = "ParamRequiredError" - // ParamMinValueErrCode is the error code for fields with too low of a - // number value. - ParamMinValueErrCode = "ParamMinValueError" - // ParamMinLenErrCode is the error code for fields without enough elements. - ParamMinLenErrCode = "ParamMinLenError" -) - -// Validator provides a way for types to perform validation logic on their -// input values that external code can use to determine if a type's values -// are valid. -type Validator interface { - Validate() error -} - -// An ErrInvalidParams provides wrapping of invalid parameter errors found when -// validating API operation input parameters. -type ErrInvalidParams struct { - // Context is the base context of the invalid parameter group. - Context string - errs []ErrInvalidParam -} - -// Add adds a new invalid parameter error to the collection of invalid -// parameters. The context of the invalid parameter will be updated to reflect -// this collection. -func (e *ErrInvalidParams) Add(err ErrInvalidParam) { - err.SetContext(e.Context) - e.errs = append(e.errs, err) -} - -// AddNested adds the invalid parameter errors from another ErrInvalidParams -// value into this collection. The nested errors will have their nested context -// updated and base context to reflect the merging. -// -// Use for nested validations errors. -func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { - for _, err := range nested.errs { - err.SetContext(e.Context) - err.AddNestedContext(nestedCtx) - e.errs = append(e.errs, err) - } -} - -// Len returns the number of invalid parameter errors -func (e ErrInvalidParams) Len() int { - return len(e.errs) -} - -// Code returns the code of the error -func (e ErrInvalidParams) Code() string { - return InvalidParameterErrCode -} - -// Message returns the message of the error -func (e ErrInvalidParams) Message() string { - return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) -} - -// Error returns the string formatted form of the invalid parameters. -func (e ErrInvalidParams) Error() string { - w := &bytes.Buffer{} - fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) - - for _, err := range e.errs { - fmt.Fprintf(w, "- %s\n", err.Message()) - } - - return w.String() -} - -// OrigErr returns the invalid parameters as a awserr.BatchedErrors value -func (e ErrInvalidParams) OrigErr() error { - return awserr.NewBatchError( - InvalidParameterErrCode, e.Message(), e.OrigErrs()) -} - -// OrigErrs returns a slice of the invalid parameters -func (e ErrInvalidParams) OrigErrs() []error { - errs := make([]error, len(e.errs)) - for i := 0; i < len(errs); i++ { - errs[i] = e.errs[i] - } - - return errs -} - -// An ErrInvalidParam represents an invalid parameter error type. -type ErrInvalidParam interface { - awserr.Error - - // Field name the error occurred on. - Field() string - - // SetContext updates the context of the error. - SetContext(string) - - // AddNestedContext updates the error's context to include a nested level. - AddNestedContext(string) -} - -type errInvalidParam struct { - context string - nestedContext string - field string - code string - msg string -} - -// Code returns the error code for the type of invalid parameter. -func (e *errInvalidParam) Code() string { - return e.code -} - -// Message returns the reason the parameter was invalid, and its context. -func (e *errInvalidParam) Message() string { - return fmt.Sprintf("%s, %s.", e.msg, e.Field()) -} - -// Error returns the string version of the invalid parameter error. -func (e *errInvalidParam) Error() string { - return fmt.Sprintf("%s: %s", e.code, e.Message()) -} - -// OrigErr returns nil, Implemented for awserr.Error interface. -func (e *errInvalidParam) OrigErr() error { - return nil -} - -// Field Returns the field and context the error occurred. -func (e *errInvalidParam) Field() string { - field := e.context - if len(field) > 0 { - field += "." - } - if len(e.nestedContext) > 0 { - field += fmt.Sprintf("%s.", e.nestedContext) - } - field += e.field - - return field -} - -// SetContext updates the base context of the error. -func (e *errInvalidParam) SetContext(ctx string) { - e.context = ctx -} - -// AddNestedContext prepends a context to the field's path. -func (e *errInvalidParam) AddNestedContext(ctx string) { - if len(e.nestedContext) == 0 { - e.nestedContext = ctx - } else { - e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) - } - -} - -// An ErrParamRequired represents an required parameter error. -type ErrParamRequired struct { - errInvalidParam -} - -// NewErrParamRequired creates a new required parameter error. -func NewErrParamRequired(field string) *ErrParamRequired { - return &ErrParamRequired{ - errInvalidParam{ - code: ParamRequiredErrCode, - field: field, - msg: fmt.Sprintf("missing required field"), - }, - } -} - -// An ErrParamMinValue represents a minimum value parameter error. -type ErrParamMinValue struct { - errInvalidParam - min float64 -} - -// NewErrParamMinValue creates a new minimum value parameter error. -func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { - return &ErrParamMinValue{ - errInvalidParam: errInvalidParam{ - code: ParamMinValueErrCode, - field: field, - msg: fmt.Sprintf("minimum field value of %v", min), - }, - min: min, - } -} - -// MinValue returns the field's require minimum value. -// -// float64 is returned for both int and float min values. -func (e *ErrParamMinValue) MinValue() float64 { - return e.min -} - -// An ErrParamMinLen represents a minimum length parameter error. -type ErrParamMinLen struct { - errInvalidParam - min int -} - -// NewErrParamMinLen creates a new minimum length parameter error. -func NewErrParamMinLen(field string, min int) *ErrParamMinLen { - return &ErrParamMinLen{ - errInvalidParam: errInvalidParam{ - code: ParamMinValueErrCode, - field: field, - msg: fmt.Sprintf("minimum field size of %v", min), - }, - min: min, - } -} - -// MinLen returns the field's required minimum length. -func (e *ErrParamMinLen) MinLen() int { - return e.min -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/session/session.go deleted file mode 100644 index 6bc8f1be..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ /dev/null @@ -1,120 +0,0 @@ -// Package session provides a way to create service clients with shared configuration -// and handlers. -// -// Generally this package should be used instead of the `defaults` package. -// -// A session should be used to share configurations and request handlers between multiple -// service clients. When service clients need specific configuration aws.Config can be -// used to provide additional configuration directly to the service client. -package session - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/defaults" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/endpoints" -) - -// A Session provides a central location to create service clients from and -// store configurations and request handlers for those services. -// -// Sessions are safe to create service clients concurrently, but it is not safe -// to mutate the session concurrently. -type Session struct { - Config *aws.Config - Handlers request.Handlers -} - -// New creates a new instance of the handlers merging in the provided Configs -// on top of the SDK's default configurations. Once the session is created it -// can be mutated to modify Configs or Handlers. The session is safe to be read -// concurrently, but it should not be written to concurrently. -// -// Example: -// // Create a session with the default config and request handlers. -// sess := session.New() -// -// // Create a session with a custom region -// sess := session.New(&aws.Config{Region: aws.String("us-east-1")}) -// -// // Create a session, and add additional handlers for all service -// // clients created with the session to inherit. Adds logging handler. -// sess := session.New() -// sess.Handlers.Send.PushFront(func(r *request.Request) { -// // Log every request made and its payload -// logger.Println("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params) -// }) -// -// // Create a S3 client instance from a session -// sess := session.New() -// svc := s3.New(sess) -func New(cfgs ...*aws.Config) *Session { - cfg := defaults.Config() - handlers := defaults.Handlers() - - // Apply the passed in configs so the configuration can be applied to the - // default credential chain - cfg.MergeIn(cfgs...) - cfg.Credentials = defaults.CredChain(cfg, handlers) - - // Reapply any passed in configs to override credentials if set - cfg.MergeIn(cfgs...) - - s := &Session{ - Config: cfg, - Handlers: handlers, - } - - initHandlers(s) - - return s -} - -func initHandlers(s *Session) { - // Add the Validate parameter handler if it is not disabled. - s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) - if !aws.BoolValue(s.Config.DisableParamValidation) { - s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) - } -} - -// Copy creates and returns a copy of the current session, coping the config -// and handlers. If any additional configs are provided they will be merged -// on top of the session's copied config. -// -// Example: -// // Create a copy of the current session, configured for the us-west-2 region. -// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) -func (s *Session) Copy(cfgs ...*aws.Config) *Session { - newSession := &Session{ - Config: s.Config.Copy(cfgs...), - Handlers: s.Handlers.Copy(), - } - - initHandlers(newSession) - - return newSession -} - -// ClientConfig satisfies the client.ConfigProvider interface and is used to -// configure the service client instances. Passing the Session to the service -// client's constructor (New) will use this method to configure the client. -// -// Example: -// sess := session.New() -// s3.New(sess) -func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { - s = s.Copy(cfgs...) - endpoint, signingRegion := endpoints.NormalizeEndpoint( - aws.StringValue(s.Config.Endpoint), serviceName, - aws.StringValue(s.Config.Region), aws.BoolValue(s.Config.DisableSSL)) - - return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - Endpoint: endpoint, - SigningRegion: signingRegion, - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go deleted file mode 100644 index 244c86da..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go +++ /dev/null @@ -1,82 +0,0 @@ -package v4 - -import ( - "net/http" - "strings" -) - -// validator houses a set of rule needed for validation of a -// string value -type rules []rule - -// rule interface allows for more flexible rules and just simply -// checks whether or not a value adheres to that rule -type rule interface { - IsValid(value string) bool -} - -// IsValid will iterate through all rules and see if any rules -// apply to the value and supports nested rules -func (r rules) IsValid(value string) bool { - for _, rule := range r { - if rule.IsValid(value) { - return true - } - } - return false -} - -// mapRule generic rule for maps -type mapRule map[string]struct{} - -// IsValid for the map rule satisfies whether it exists in the map -func (m mapRule) IsValid(value string) bool { - _, ok := m[value] - return ok -} - -// whitelist is a generic rule for whitelisting -type whitelist struct { - rule -} - -// IsValid for whitelist checks if the value is within the whitelist -func (w whitelist) IsValid(value string) bool { - return w.rule.IsValid(value) -} - -// blacklist is a generic rule for blacklisting -type blacklist struct { - rule -} - -// IsValid for whitelist checks if the value is within the whitelist -func (b blacklist) IsValid(value string) bool { - return !b.rule.IsValid(value) -} - -type patterns []string - -// IsValid for patterns checks each pattern and returns if a match has -// been found -func (p patterns) IsValid(value string) bool { - for _, pattern := range p { - if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { - return true - } - } - return false -} - -// inclusiveRules rules allow for rules to depend on one another -type inclusiveRules []rule - -// IsValid will return true if all rules are true -func (r inclusiveRules) IsValid(value string) bool { - for _, rule := range r { - if !rule.IsValid(value) { - return false - } - } - return true -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go deleted file mode 100644 index f040f9ce..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ /dev/null @@ -1,644 +0,0 @@ -// Package v4 implements signing for AWS V4 signer -// -// Provides request signing for request that need to be signed with -// AWS V4 Signatures. -package v4 - -import ( - "bytes" - "crypto/hmac" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/rest" -) - -const ( - authHeaderPrefix = "AWS4-HMAC-SHA256" - timeFormat = "20060102T150405Z" - shortTimeFormat = "20060102" - - // emptyStringSHA256 is a SHA256 of an empty string - emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` -) - -var ignoredHeaders = rules{ - blacklist{ - mapRule{ - "Authorization": struct{}{}, - "User-Agent": struct{}{}, - }, - }, -} - -// requiredSignedHeaders is a whitelist for build canonical headers. -var requiredSignedHeaders = rules{ - whitelist{ - mapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Grant-Full-control": struct{}{}, - "X-Amz-Grant-Read": struct{}{}, - "X-Amz-Grant-Read-Acp": struct{}{}, - "X-Amz-Grant-Write": struct{}{}, - "X-Amz-Grant-Write-Acp": struct{}{}, - "X-Amz-Metadata-Directive": struct{}{}, - "X-Amz-Mfa": struct{}{}, - "X-Amz-Request-Payer": struct{}{}, - "X-Amz-Server-Side-Encryption": struct{}{}, - "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Storage-Class": struct{}{}, - "X-Amz-Website-Redirect-Location": struct{}{}, - }, - }, - patterns{"X-Amz-Meta-"}, -} - -// allowedHoisting is a whitelist for build query headers. The boolean value -// represents whether or not it is a pattern. -var allowedQueryHoisting = inclusiveRules{ - blacklist{requiredSignedHeaders}, - patterns{"X-Amz-"}, -} - -// Signer applies AWS v4 signing to given request. Use this to sign requests -// that need to be signed with AWS V4 Signatures. -type Signer struct { - // The authentication credentials the request will be signed against. - // This value must be set to sign requests. - Credentials *credentials.Credentials - - // Sets the log level the signer should use when reporting information to - // the logger. If the logger is nil nothing will be logged. See - // aws.LogLevelType for more information on available logging levels - // - // By default nothing will be logged. - Debug aws.LogLevelType - - // The logger loging information will be written to. If there the logger - // is nil, nothing will be logged. - Logger aws.Logger - - // Disables the Signer's moving HTTP header key/value pairs from the HTTP - // request header to the request's query string. This is most commonly used - // with pre-signed requests preventing headers from being added to the - // request's query string. - DisableHeaderHoisting bool - - // currentTimeFn returns the time value which represents the current time. - // This value should only be used for testing. If it is nil the default - // time.Now will be used. - currentTimeFn func() time.Time -} - -// NewSigner returns a Signer pointer configured with the credentials and optional -// option values provided. If not options are provided the Signer will use its -// default configuration. -func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { - v4 := &Signer{ - Credentials: credentials, - } - - for _, option := range options { - option(v4) - } - - return v4 -} - -type signingCtx struct { - ServiceName string - Region string - Request *http.Request - Body io.ReadSeeker - Query url.Values - Time time.Time - ExpireTime time.Duration - SignedHeaderVals http.Header - - credValues credentials.Value - isPresign bool - formattedTime string - formattedShortTime string - - bodyDigest string - signedHeaders string - canonicalHeaders string - canonicalString string - credentialString string - stringToSign string - signature string - authorization string -} - -// Sign signs AWS v4 requests with the provided body, service name, region the -// request is made to, and time the request is signed at. The signTime allows -// you to specify that a request is signed for the future, and cannot be -// used until then. -// -// Returns a list of HTTP headers that were included in the signature or an -// error if signing the request failed. Generally for signed requests this value -// is not needed as the full request context will be captured by the http.Request -// value. It is included for reference though. -// -// Sign differs from Presign in that it will sign the request using HTTP -// header values. This type of signing is intended for http.Request values that -// will not be shared, or are shared in a way the header values on the request -// will not be lost. -// -// The requests body is an io.ReadSeeker so the SHA256 of the body can be -// generated. To bypass the signer computing the hash you can set the -// "X-Amz-Content-Sha256" header with a precomputed value. The signer will -// only compute the hash if the request header value is empty. -func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, 0, signTime) -} - -// Presign signs AWS v4 requests with the provided body, service name, region -// the request is made to, and time the request is signed at. The signTime -// allows you to specify that a request is signed for the future, and cannot -// be used until then. -// -// Returns a list of HTTP headers that were included in the signature or an -// error if signing the request failed. For presigned requests these headers -// and their values must be included on the HTTP request when it is made. This -// is helpful to know what header values need to be shared with the party the -// presigned request will be distributed to. -// -// Presign differs from Sign in that it will sign the request using query string -// instead of header values. This allows you to share the Presigned Request's -// URL with third parties, or distribute it throughout your system with minimal -// dependencies. -// -// Presign also takes an exp value which is the duration the -// signed request will be valid after the signing time. This is allows you to -// set when the request will expire. -// -// The requests body is an io.ReadSeeker so the SHA256 of the body can be -// generated. To bypass the signer computing the hash you can set the -// "X-Amz-Content-Sha256" header with a precomputed value. The signer will -// only compute the hash if the request header value is empty. -// -// Presigning a S3 request will not compute the body's SHA256 hash by default. -// This is done due to the general use case for S3 presigned URLs is to share -// PUT/GET capabilities. If you would like to include the body's SHA256 in the -// presigned request's signature you can set the "X-Amz-Content-Sha256" -// HTTP header and that will be included in the request's signature. -func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, exp, signTime) -} - -func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { - currentTimeFn := v4.currentTimeFn - if currentTimeFn == nil { - currentTimeFn = time.Now - } - - ctx := &signingCtx{ - Request: r, - Body: body, - Query: r.URL.Query(), - Time: signTime, - ExpireTime: exp, - isPresign: exp != 0, - ServiceName: service, - Region: region, - } - - if ctx.isRequestSigned() { - if !v4.Credentials.IsExpired() && currentTimeFn().Before(ctx.Time.Add(10*time.Minute)) { - // If the request is already signed, and the credentials have not - // expired, and the request is not too old ignore the signing request. - return ctx.SignedHeaderVals, nil - } - ctx.Time = currentTimeFn() - ctx.handlePresignRemoval() - } - - var err error - ctx.credValues, err = v4.Credentials.Get() - if err != nil { - return http.Header{}, err - } - - ctx.assignAmzQueryValues() - ctx.build(v4.DisableHeaderHoisting) - - if v4.Debug.Matches(aws.LogDebugWithSigning) { - v4.logSigningInfo(ctx) - } - - return ctx.SignedHeaderVals, nil -} - -func (ctx *signingCtx) handlePresignRemoval() { - if !ctx.isPresign { - return - } - - // The credentials have expired for this request. The current signing - // is invalid, and needs to be request because the request will fail. - ctx.removePresign() - - // Update the request's query string to ensure the values stays in - // sync in the case retrieving the new credentials fails. - ctx.Request.URL.RawQuery = ctx.Query.Encode() -} - -func (ctx *signingCtx) assignAmzQueryValues() { - if ctx.isPresign { - ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) - if ctx.credValues.SessionToken != "" { - ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) - } else { - ctx.Query.Del("X-Amz-Security-Token") - } - - return - } - - if ctx.credValues.SessionToken != "" { - ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) - } -} - -// SignRequestHandler is a named request handler the SDK will use to sign -// service client request with using the V4 signature. -var SignRequestHandler = request.NamedHandler{ - Name: "v4.SignRequestHandler", Fn: SignSDKRequest, -} - -// SignSDKRequest signs an AWS request with the V4 signature. This -// request handler is bested used only with the SDK's built in service client's -// API operation requests. -// -// This function should not be used on its on its own, but in conjunction with -// an AWS service client's API operation call. To sign a standalone request -// not created by a service client's API operation method use the "Sign" or -// "Presign" functions of the "Signer" type. -// -// If the credentials of the request's config are set to -// credentials.AnonymousCredentials the request will not be signed. -func SignSDKRequest(req *request.Request) { - signSDKRequestWithCurrTime(req, time.Now) -} -func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time) { - // If the request does not need to be signed ignore the signing of the - // request if the AnonymousCredentials object is used. - if req.Config.Credentials == credentials.AnonymousCredentials { - return - } - - region := req.ClientInfo.SigningRegion - if region == "" { - region = aws.StringValue(req.Config.Region) - } - - name := req.ClientInfo.SigningName - if name == "" { - name = req.ClientInfo.ServiceName - } - - v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { - v4.Debug = req.Config.LogLevel.Value() - v4.Logger = req.Config.Logger - v4.DisableHeaderHoisting = req.NotHoist - v4.currentTimeFn = curTimeFn - }) - - signingTime := req.Time - if !req.LastSignedAt.IsZero() { - signingTime = req.LastSignedAt - } - - signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.Body, name, region, req.ExpireTime, signingTime) - if err != nil { - req.Error = err - req.SignedHeaderVals = nil - return - } - - req.SignedHeaderVals = signedHeaders - req.LastSignedAt = curTimeFn() -} - -const logSignInfoMsg = `DEBUG: Request Signiture: ----[ CANONICAL STRING ]----------------------------- -%s ----[ STRING TO SIGN ]-------------------------------- -%s%s ------------------------------------------------------` -const logSignedURLMsg = ` ----[ SIGNED URL ]------------------------------------ -%s` - -func (v4 *Signer) logSigningInfo(ctx *signingCtx) { - signedURLMsg := "" - if ctx.isPresign { - signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) - } - msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) - v4.Logger.Log(msg) -} - -func (ctx *signingCtx) build(disableHeaderHoisting bool) { - ctx.buildTime() // no depends - ctx.buildCredentialString() // no depends - - unsignedHeaders := ctx.Request.Header - if ctx.isPresign { - if !disableHeaderHoisting { - urlValues := url.Values{} - urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends - for k := range urlValues { - ctx.Query[k] = urlValues[k] - } - } - } - - ctx.buildBodyDigest() - ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) - ctx.buildCanonicalString() // depends on canon headers / signed headers - ctx.buildStringToSign() // depends on canon string - ctx.buildSignature() // depends on string to sign - - if ctx.isPresign { - ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature - } else { - parts := []string{ - authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, - "SignedHeaders=" + ctx.signedHeaders, - "Signature=" + ctx.signature, - } - ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) - } -} - -func (ctx *signingCtx) buildTime() { - ctx.formattedTime = ctx.Time.UTC().Format(timeFormat) - ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat) - - if ctx.isPresign { - duration := int64(ctx.ExpireTime / time.Second) - ctx.Query.Set("X-Amz-Date", ctx.formattedTime) - ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) - } else { - ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime) - } -} - -func (ctx *signingCtx) buildCredentialString() { - ctx.credentialString = strings.Join([]string{ - ctx.formattedShortTime, - ctx.Region, - ctx.ServiceName, - "aws4_request", - }, "/") - - if ctx.isPresign { - ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) - } -} - -func buildQuery(r rule, header http.Header) (url.Values, http.Header) { - query := url.Values{} - unsignedHeaders := http.Header{} - for k, h := range header { - if r.IsValid(k) { - query[k] = h - } else { - unsignedHeaders[k] = h - } - } - - return query, unsignedHeaders -} -func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { - var headers []string - headers = append(headers, "host") - for k, v := range header { - canonicalKey := http.CanonicalHeaderKey(k) - if !r.IsValid(canonicalKey) { - continue // ignored header - } - if ctx.SignedHeaderVals == nil { - ctx.SignedHeaderVals = make(http.Header) - } - - lowerCaseKey := strings.ToLower(k) - if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { - // include additional values - ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) - continue - } - - headers = append(headers, lowerCaseKey) - ctx.SignedHeaderVals[lowerCaseKey] = v - } - sort.Strings(headers) - - ctx.signedHeaders = strings.Join(headers, ";") - - if ctx.isPresign { - ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) - } - - headerValues := make([]string, len(headers)) - for i, k := range headers { - if k == "host" { - headerValues[i] = "host:" + ctx.Request.URL.Host - } else { - headerValues[i] = k + ":" + - strings.Join(ctx.SignedHeaderVals[k], ",") - } - } - - ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") -} - -func (ctx *signingCtx) buildCanonicalString() { - ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) - uri := ctx.Request.URL.Opaque - if uri != "" { - uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") - } else { - uri = ctx.Request.URL.Path - } - if uri == "" { - uri = "/" - } - - if ctx.ServiceName != "s3" { - uri = rest.EscapePath(uri, false) - } - - ctx.canonicalString = strings.Join([]string{ - ctx.Request.Method, - uri, - ctx.Request.URL.RawQuery, - ctx.canonicalHeaders + "\n", - ctx.signedHeaders, - ctx.bodyDigest, - }, "\n") -} - -func (ctx *signingCtx) buildStringToSign() { - ctx.stringToSign = strings.Join([]string{ - authHeaderPrefix, - ctx.formattedTime, - ctx.credentialString, - hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))), - }, "\n") -} - -func (ctx *signingCtx) buildSignature() { - secret := ctx.credValues.SecretAccessKey - date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime)) - region := makeHmac(date, []byte(ctx.Region)) - service := makeHmac(region, []byte(ctx.ServiceName)) - credentials := makeHmac(service, []byte("aws4_request")) - signature := makeHmac(credentials, []byte(ctx.stringToSign)) - ctx.signature = hex.EncodeToString(signature) -} - -func (ctx *signingCtx) buildBodyDigest() { - hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") - if hash == "" { - if ctx.isPresign && ctx.ServiceName == "s3" { - hash = "UNSIGNED-PAYLOAD" - } else if ctx.Body == nil { - hash = emptyStringSHA256 - } else { - hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) - } - if ctx.ServiceName == "s3" { - ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) - } - } - ctx.bodyDigest = hash -} - -// isRequestSigned returns if the request is currently signed or presigned -func (ctx *signingCtx) isRequestSigned() bool { - if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { - return true - } - if ctx.Request.Header.Get("Authorization") != "" { - return true - } - - return false -} - -// unsign removes signing flags for both signed and presigned requests. -func (ctx *signingCtx) removePresign() { - ctx.Query.Del("X-Amz-Algorithm") - ctx.Query.Del("X-Amz-Signature") - ctx.Query.Del("X-Amz-Security-Token") - ctx.Query.Del("X-Amz-Date") - ctx.Query.Del("X-Amz-Expires") - ctx.Query.Del("X-Amz-Credential") - ctx.Query.Del("X-Amz-SignedHeaders") -} - -func makeHmac(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -func makeSha256(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} - -func makeSha256Reader(reader io.ReadSeeker) []byte { - hash := sha256.New() - start, _ := reader.Seek(0, 1) - defer reader.Seek(start, 0) - - io.Copy(hash, reader) - return hash.Sum(nil) -} - -const doubleSpaces = " " - -var doubleSpaceBytes = []byte(doubleSpaces) - -func stripExcessSpaces(headerVals []string) []string { - vals := make([]string, len(headerVals)) - for i, str := range headerVals { - // Trim leading and trailing spaces - trimmed := strings.TrimSpace(str) - - idx := strings.Index(trimmed, doubleSpaces) - var buf []byte - for idx > -1 { - // Multiple adjacent spaces found - if buf == nil { - // first time create the buffer - buf = []byte(trimmed) - } - - stripToIdx := -1 - for j := idx + 1; j < len(buf); j++ { - if buf[j] != ' ' { - buf = append(buf[:idx+1], buf[j:]...) - stripToIdx = j - break - } - } - - if stripToIdx >= 0 { - idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes) - if idx >= 0 { - idx += stripToIdx - } - } else { - idx = -1 - } - } - - if buf != nil { - vals[i] = string(buf) - } else { - vals[i] = trimmed - } - } - return vals -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/types.go deleted file mode 100644 index fa014b49..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ /dev/null @@ -1,106 +0,0 @@ -package aws - -import ( - "io" - "sync" -) - -// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser -func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { - return ReaderSeekerCloser{r} -} - -// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and -// io.Closer interfaces to the underlying object if they are available. -type ReaderSeekerCloser struct { - r io.Reader -} - -// Read reads from the reader up to size of p. The number of bytes read, and -// error if it occurred will be returned. -// -// If the reader is not an io.Reader zero bytes read, and nil error will be returned. -// -// Performs the same functionality as io.Reader Read -func (r ReaderSeekerCloser) Read(p []byte) (int, error) { - switch t := r.r.(type) { - case io.Reader: - return t.Read(p) - } - return 0, nil -} - -// Seek sets the offset for the next Read to offset, interpreted according to -// whence: 0 means relative to the origin of the file, 1 means relative to the -// current offset, and 2 means relative to the end. Seek returns the new offset -// and an error, if any. -// -// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. -func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { - switch t := r.r.(type) { - case io.Seeker: - return t.Seek(offset, whence) - } - return int64(0), nil -} - -// Close closes the ReaderSeekerCloser. -// -// If the ReaderSeekerCloser is not an io.Closer nothing will be done. -func (r ReaderSeekerCloser) Close() error { - switch t := r.r.(type) { - case io.Closer: - return t.Close() - } - return nil -} - -// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface -// Can be used with the s3manager.Downloader to download content to a buffer -// in memory. Safe to use concurrently. -type WriteAtBuffer struct { - buf []byte - m sync.Mutex - - // GrowthCoeff defines the growth rate of the internal buffer. By - // default, the growth rate is 1, where expanding the internal - // buffer will allocate only enough capacity to fit the new expected - // length. - GrowthCoeff float64 -} - -// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer -// provided by buf. -func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { - return &WriteAtBuffer{buf: buf} -} - -// WriteAt writes a slice of bytes to a buffer starting at the position provided -// The number of bytes written will be returned, or error. Can overwrite previous -// written slices if the write ats overlap. -func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { - pLen := len(p) - expLen := pos + int64(pLen) - b.m.Lock() - defer b.m.Unlock() - if int64(len(b.buf)) < expLen { - if int64(cap(b.buf)) < expLen { - if b.GrowthCoeff < 1 { - b.GrowthCoeff = 1 - } - newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) - copy(newBuf, b.buf) - b.buf = newBuf - } - b.buf = b.buf[:expLen] - } - copy(b.buf[pos:], p) - return pLen, nil -} - -// Bytes returns a slice of bytes written to the buffer. -func (b *WriteAtBuffer) Bytes() []byte { - b.m.Lock() - defer b.m.Unlock() - return b.buf[:len(b.buf):len(b.buf)] -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/version.go deleted file mode 100644 index e7805213..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package aws provides core functionality for making requests to AWS services. -package aws - -// SDKName is the name of this AWS SDK -const SDKName = "aws-sdk-go" - -// SDKVersion is the version of this SDK -const SDKVersion = "1.2.4" diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go deleted file mode 100644 index 2b279e65..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go +++ /dev/null @@ -1,65 +0,0 @@ -// Package endpoints validates regional endpoints for services. -package endpoints - -//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go -//go:generate gofmt -s -w endpoints_map.go - -import ( - "fmt" - "regexp" - "strings" -) - -// NormalizeEndpoint takes and endpoint and service API information to return a -// normalized endpoint and signing region. If the endpoint is not an empty string -// the service name and region will be used to look up the service's API endpoint. -// If the endpoint is provided the scheme will be added if it is not present. -func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (normEndpoint, signingRegion string) { - if endpoint == "" { - return EndpointForRegion(serviceName, region, disableSSL) - } - - return AddScheme(endpoint, disableSSL), "" -} - -// EndpointForRegion returns an endpoint and its signing region for a service and region. -// if the service and region pair are not found endpoint and signingRegion will be empty. -func EndpointForRegion(svcName, region string, disableSSL bool) (endpoint, signingRegion string) { - derivedKeys := []string{ - region + "/" + svcName, - region + "/*", - "*/" + svcName, - "*/*", - } - - for _, key := range derivedKeys { - if val, ok := endpointsMap.Endpoints[key]; ok { - ep := val.Endpoint - ep = strings.Replace(ep, "{region}", region, -1) - ep = strings.Replace(ep, "{service}", svcName, -1) - - endpoint = ep - signingRegion = val.SigningRegion - break - } - } - - return AddScheme(endpoint, disableSSL), signingRegion -} - -// Regular expression to determine if the endpoint string is prefixed with a scheme. -var schemeRE = regexp.MustCompile("^([^:]+)://") - -// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no -// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS. -func AddScheme(endpoint string, disableSSL bool) string { - if endpoint != "" && !schemeRE.MatchString(endpoint) { - scheme := "https" - if disableSSL { - scheme = "http" - } - endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) - } - - return endpoint -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json deleted file mode 100644 index 5f4991c2..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "version": 2, - "endpoints": { - "*/*": { - "endpoint": "{service}.{region}.amazonaws.com" - }, - "cn-north-1/*": { - "endpoint": "{service}.{region}.amazonaws.com.cn", - "signatureVersion": "v4" - }, - "cn-north-1/ec2metadata": { - "endpoint": "http://169.254.169.254/latest" - }, - "us-gov-west-1/iam": { - "endpoint": "iam.us-gov.amazonaws.com" - }, - "us-gov-west-1/sts": { - "endpoint": "sts.us-gov-west-1.amazonaws.com" - }, - "us-gov-west-1/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "us-gov-west-1/ec2metadata": { - "endpoint": "http://169.254.169.254/latest" - }, - "*/cloudfront": { - "endpoint": "cloudfront.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/cloudsearchdomain": { - "endpoint": "", - "signingRegion": "us-east-1" - }, - "*/data.iot": { - "endpoint": "", - "signingRegion": "us-east-1" - }, - "*/ec2metadata": { - "endpoint": "http://169.254.169.254/latest" - }, - "*/iam": { - "endpoint": "iam.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/importexport": { - "endpoint": "importexport.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/route53": { - "endpoint": "route53.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/sts": { - "endpoint": "sts.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/waf": { - "endpoint": "waf.amazonaws.com", - "signingRegion": "us-east-1" - }, - "us-east-1/sdb": { - "endpoint": "sdb.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "us-east-1/s3": { - "endpoint": "s3.amazonaws.com" - }, - "eu-central-1/s3": { - "endpoint": "{service}.{region}.amazonaws.com" - } - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go deleted file mode 100644 index e995315a..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go +++ /dev/null @@ -1,88 +0,0 @@ -package endpoints - -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. - -type endpointStruct struct { - Version int - Endpoints map[string]endpointEntry -} - -type endpointEntry struct { - Endpoint string - SigningRegion string -} - -var endpointsMap = endpointStruct{ - Version: 2, - Endpoints: map[string]endpointEntry{ - "*/*": { - Endpoint: "{service}.{region}.amazonaws.com", - }, - "*/cloudfront": { - Endpoint: "cloudfront.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/cloudsearchdomain": { - Endpoint: "", - SigningRegion: "us-east-1", - }, - "*/data.iot": { - Endpoint: "", - SigningRegion: "us-east-1", - }, - "*/ec2metadata": { - Endpoint: "http://169.254.169.254/latest", - }, - "*/iam": { - Endpoint: "iam.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/importexport": { - Endpoint: "importexport.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/route53": { - Endpoint: "route53.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, - "*/sts": { - Endpoint: "sts.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/waf": { - Endpoint: "waf.amazonaws.com", - SigningRegion: "us-east-1", - }, - "cn-north-1/*": { - Endpoint: "{service}.{region}.amazonaws.com.cn", - }, - "cn-north-1/ec2metadata": { - Endpoint: "http://169.254.169.254/latest", - }, - "eu-central-1/s3": { - Endpoint: "{service}.{region}.amazonaws.com", - }, - "us-east-1/s3": { - Endpoint: "s3.amazonaws.com", - }, - "us-east-1/sdb": { - Endpoint: "sdb.amazonaws.com", - SigningRegion: "us-east-1", - }, - "us-gov-west-1/ec2metadata": { - Endpoint: "http://169.254.169.254/latest", - }, - "us-gov-west-1/iam": { - Endpoint: "iam.us-gov.amazonaws.com", - }, - "us-gov-west-1/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, - "us-gov-west-1/sts": { - Endpoint: "sts.us-gov-west-1.amazonaws.com", - }, - }, -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go deleted file mode 100644 index 53831dff..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go +++ /dev/null @@ -1,75 +0,0 @@ -package protocol - -import ( - "crypto/rand" - "fmt" - "reflect" -) - -// RandReader is the random reader the protocol package will use to read -// random bytes from. This is exported for testing, and should not be used. -var RandReader = rand.Reader - -const idempotencyTokenFillTag = `idempotencyToken` - -// CanSetIdempotencyToken returns true if the struct field should be -// automatically populated with a Idempotency token. -// -// Only *string and string type fields that are tagged with idempotencyToken -// which are not already set can be auto filled. -func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { - switch u := v.Interface().(type) { - // To auto fill an Idempotency token the field must be a string, - // tagged for auto fill, and have a zero value. - case *string: - return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 - case string: - return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 - } - - return false -} - -// GetIdempotencyToken returns a randomly generated idempotency token. -func GetIdempotencyToken() string { - b := make([]byte, 16) - RandReader.Read(b) - - return UUIDVersion4(b) -} - -// SetIdempotencyToken will set the value provided with a Idempotency Token. -// Given that the value can be set. Will panic if value is not setable. -func SetIdempotencyToken(v reflect.Value) { - if v.Kind() == reflect.Ptr { - if v.IsNil() && v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - v = reflect.Indirect(v) - - if !v.CanSet() { - panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) - } - - b := make([]byte, 16) - _, err := rand.Read(b) - if err != nil { - // TODO handle error - return - } - - v.Set(reflect.ValueOf(UUIDVersion4(b))) -} - -// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided -func UUIDVersion4(u []byte) string { - // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 - // 13th character is "4" - u[6] = (u[6] | 0x40) & 0x4F - // 17th character is "8", "9", "a", or "b" - u[8] = (u[8] | 0x80) & 0xBF - - return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go deleted file mode 100644 index 56d69db0..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ /dev/null @@ -1,36 +0,0 @@ -// Package query provides serialisation of AWS query requests, and responses. -package query - -//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go - -import ( - "net/url" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" -) - -// BuildHandler is a named request handler for building query protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} - -// Build builds a request for an AWS Query service. -func Build(r *request.Request) { - body := url.Values{ - "Action": {r.Operation.Name}, - "Version": {r.ClientInfo.APIVersion}, - } - if err := queryutil.Parse(body, r.Params, false); err != nil { - r.Error = awserr.New("SerializationError", "failed encoding Query request", err) - return - } - - if r.ExpireTime == 0 { - r.HTTPRequest.Method = "POST" - r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") - r.SetBufferBody([]byte(body.Encode())) - } else { // This is a pre-signed request - r.HTTPRequest.Method = "GET" - r.HTTPRequest.URL.RawQuery = body.Encode() - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go deleted file mode 100644 index 60ea0bd1..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ /dev/null @@ -1,230 +0,0 @@ -package queryutil - -import ( - "encoding/base64" - "fmt" - "net/url" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/private/protocol" -) - -// Parse parses an object i and fills a url.Values object. The isEC2 flag -// indicates if this is the EC2 Query sub-protocol. -func Parse(body url.Values, i interface{}, isEC2 bool) error { - q := queryParser{isEC2: isEC2} - return q.parseValue(body, reflect.ValueOf(i), "", "") -} - -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} - -type queryParser struct { - isEC2 bool -} - -func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - value = elemOf(value) - - // no need to handle zero values - if !value.IsValid() { - return nil - } - - t := tag.Get("type") - if t == "" { - switch value.Kind() { - case reflect.Struct: - t = "structure" - case reflect.Slice: - t = "list" - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - return q.parseStruct(v, value, prefix) - case "list": - return q.parseList(v, value, prefix, tag) - case "map": - return q.parseMap(v, value, prefix, tag) - default: - return q.parseScalar(v, value, prefix, tag) - } -} - -func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { - if !value.IsValid() { - return nil - } - - t := value.Type() - for i := 0; i < value.NumField(); i++ { - elemValue := elemOf(value.Field(i)) - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - - if protocol.CanSetIdempotencyToken(value.Field(i), field) { - token := protocol.GetIdempotencyToken() - elemValue = reflect.ValueOf(token) - } - - var name string - if q.isEC2 { - name = field.Tag.Get("queryName") - } - if name == "" { - if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { - name = field.Tag.Get("locationNameList") - } else if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - if name != "" && q.isEC2 { - name = strings.ToUpper(name[0:1]) + name[1:] - } - } - if name == "" { - name = field.Name - } - - if prefix != "" { - name = prefix + "." + name - } - - if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { - return err - } - } - return nil -} - -func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - // If it's empty, generate an empty value - if !value.IsNil() && value.Len() == 0 { - v.Set(prefix, "") - return nil - } - - // check for unflattened list member - if !q.isEC2 && tag.Get("flattened") == "" { - prefix += ".member" - } - - for i := 0; i < value.Len(); i++ { - slicePrefix := prefix - if slicePrefix == "" { - slicePrefix = strconv.Itoa(i + 1) - } else { - slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) - } - if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { - return err - } - } - return nil -} - -func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - // If it's empty, generate an empty value - if !value.IsNil() && value.Len() == 0 { - v.Set(prefix, "") - return nil - } - - // check for unflattened list member - if !q.isEC2 && tag.Get("flattened") == "" { - prefix += ".entry" - } - - // sort keys for improved serialization consistency. - // this is not strictly necessary for protocol support. - mapKeyValues := value.MapKeys() - mapKeys := map[string]reflect.Value{} - mapKeyNames := make([]string, len(mapKeyValues)) - for i, mapKey := range mapKeyValues { - name := mapKey.String() - mapKeys[name] = mapKey - mapKeyNames[i] = name - } - sort.Strings(mapKeyNames) - - for i, mapKeyName := range mapKeyNames { - mapKey := mapKeys[mapKeyName] - mapValue := value.MapIndex(mapKey) - - kname := tag.Get("locationNameKey") - if kname == "" { - kname = "key" - } - vname := tag.Get("locationNameValue") - if vname == "" { - vname = "value" - } - - // serialize key - var keyName string - if prefix == "" { - keyName = strconv.Itoa(i+1) + "." + kname - } else { - keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname - } - - if err := q.parseValue(v, mapKey, keyName, ""); err != nil { - return err - } - - // serialize value - var valueName string - if prefix == "" { - valueName = strconv.Itoa(i+1) + "." + vname - } else { - valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname - } - - if err := q.parseValue(v, mapValue, valueName, ""); err != nil { - return err - } - } - - return nil -} - -func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { - switch value := r.Interface().(type) { - case string: - v.Set(name, value) - case []byte: - if !r.IsNil() { - v.Set(name, base64.StdEncoding.EncodeToString(value)) - } - case bool: - v.Set(name, strconv.FormatBool(value)) - case int64: - v.Set(name, strconv.FormatInt(value, 10)) - case int: - v.Set(name, strconv.Itoa(value)) - case float64: - v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) - case float32: - v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) - case time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - v.Set(name, value.UTC().Format(ISO8601UTC)) - default: - return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) - } - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go deleted file mode 100644 index a3ea4095..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go +++ /dev/null @@ -1,35 +0,0 @@ -package query - -//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go - -import ( - "encoding/xml" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" -) - -// UnmarshalHandler is a named request handler for unmarshaling query protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} - -// Unmarshal unmarshals a response for an AWS Query service. -func Unmarshal(r *request.Request) { - defer r.HTTPResponse.Body.Close() - if r.DataFilled() { - decoder := xml.NewDecoder(r.HTTPResponse.Body) - err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") - if err != nil { - r.Error = awserr.New("SerializationError", "failed decoding Query response", err) - return - } - } -} - -// UnmarshalMeta unmarshals header response values for an AWS Query service. -func UnmarshalMeta(r *request.Request) { - r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go deleted file mode 100644 index f2142961..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go +++ /dev/null @@ -1,66 +0,0 @@ -package query - -import ( - "encoding/xml" - "io/ioutil" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -type xmlErrorResponse struct { - XMLName xml.Name `xml:"ErrorResponse"` - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` - RequestID string `xml:"RequestId"` -} - -type xmlServiceUnavailableResponse struct { - XMLName xml.Name `xml:"ServiceUnavailableException"` -} - -// UnmarshalErrorHandler is a name request handler to unmarshal request errors -var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} - -// UnmarshalError unmarshals an error response for an AWS Query service. -func UnmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - - bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err) - return - } - - // First check for specific error - resp := xmlErrorResponse{} - decodeErr := xml.Unmarshal(bodyBytes, &resp) - if decodeErr == nil { - reqID := resp.RequestID - if reqID == "" { - reqID = r.RequestID - } - r.Error = awserr.NewRequestFailure( - awserr.New(resp.Code, resp.Message, nil), - r.HTTPResponse.StatusCode, - reqID, - ) - return - } - - // Check for unhandled error - servUnavailResp := xmlServiceUnavailableResponse{} - unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp) - if unavailErr == nil { - r.Error = awserr.NewRequestFailure( - awserr.New("ServiceUnavailableException", "service is unavailable", nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - - // Failed to retrieve any error message from the response body - r.Error = awserr.New("SerializationError", - "failed to decode query XML error response", decodeErr) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go deleted file mode 100644 index 5f412516..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ /dev/null @@ -1,256 +0,0 @@ -// Package rest provides RESTful serialization of AWS requests and responses. -package rest - -import ( - "bytes" - "encoding/base64" - "fmt" - "io" - "net/http" - "net/url" - "path" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// RFC822 returns an RFC822 formatted timestamp for AWS protocols -const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" - -// Whether the byte value can be sent without escaping in AWS URLs -var noEscape [256]bool - -var errValueNotSet = fmt.Errorf("value not set") - -func init() { - for i := 0; i < len(noEscape); i++ { - // AWS expects every character except these to be escaped - noEscape[i] = (i >= 'A' && i <= 'Z') || - (i >= 'a' && i <= 'z') || - (i >= '0' && i <= '9') || - i == '-' || - i == '.' || - i == '_' || - i == '~' - } -} - -// BuildHandler is a named request handler for building rest protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} - -// Build builds the REST component of a service request. -func Build(r *request.Request) { - if r.ParamsFilled() { - v := reflect.ValueOf(r.Params).Elem() - buildLocationElements(r, v) - buildBody(r, v) - } -} - -func buildLocationElements(r *request.Request, v reflect.Value) { - query := r.HTTPRequest.URL.Query() - - for i := 0; i < v.NumField(); i++ { - m := v.Field(i) - if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { - continue - } - - if m.IsValid() { - field := v.Type().Field(i) - name := field.Tag.Get("locationName") - if name == "" { - name = field.Name - } - if m.Kind() == reflect.Ptr { - m = m.Elem() - } - if !m.IsValid() { - continue - } - - var err error - switch field.Tag.Get("location") { - case "headers": // header maps - err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName")) - case "header": - err = buildHeader(&r.HTTPRequest.Header, m, name) - case "uri": - err = buildURI(r.HTTPRequest.URL, m, name) - case "querystring": - err = buildQueryString(query, m, name) - } - r.Error = err - } - if r.Error != nil { - return - } - } - - r.HTTPRequest.URL.RawQuery = query.Encode() - updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path) -} - -func buildBody(r *request.Request, v reflect.Value) { - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - pfield, _ := v.Type().FieldByName(payloadName) - if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { - payload := reflect.Indirect(v.FieldByName(payloadName)) - if payload.IsValid() && payload.Interface() != nil { - switch reader := payload.Interface().(type) { - case io.ReadSeeker: - r.SetReaderBody(reader) - case []byte: - r.SetBufferBody(reader) - case string: - r.SetStringBody(reader) - default: - r.Error = awserr.New("SerializationError", - "failed to encode REST request", - fmt.Errorf("unknown payload type %s", payload.Type())) - } - } - } - } - } -} - -func buildHeader(header *http.Header, v reflect.Value, name string) error { - str, err := convertType(v) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) - } - - header.Add(name, str) - - return nil -} - -func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error { - for _, key := range v.MapKeys() { - str, err := convertType(v.MapIndex(key)) - if err == errValueNotSet { - continue - } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) - - } - - header.Add(prefix+key.String(), str) - } - return nil -} - -func buildURI(u *url.URL, v reflect.Value, name string) error { - value, err := convertType(v) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) - } - - uri := u.Path - uri = strings.Replace(uri, "{"+name+"}", EscapePath(value, true), -1) - uri = strings.Replace(uri, "{"+name+"+}", EscapePath(value, false), -1) - u.Path = uri - - return nil -} - -func buildQueryString(query url.Values, v reflect.Value, name string) error { - switch value := v.Interface().(type) { - case []*string: - for _, item := range value { - query.Add(name, *item) - } - case map[string]*string: - for key, item := range value { - query.Add(key, *item) - } - case map[string][]*string: - for key, items := range value { - for _, item := range items { - query.Add(key, *item) - } - } - default: - str, err := convertType(v) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) - } - query.Set(name, str) - } - - return nil -} - -func updatePath(url *url.URL, urlPath string) { - scheme, query := url.Scheme, url.RawQuery - - hasSlash := strings.HasSuffix(urlPath, "/") - - // clean up path - urlPath = path.Clean(urlPath) - if hasSlash && !strings.HasSuffix(urlPath, "/") { - urlPath += "/" - } - - // get formatted URL minus scheme so we can build this into Opaque - url.Scheme, url.Path, url.RawQuery = "", "", "" - s := url.String() - url.Scheme = scheme - url.RawQuery = query - - // build opaque URI - url.Opaque = s + urlPath -} - -// EscapePath escapes part of a URL path in Amazon style -func EscapePath(path string, encodeSep bool) string { - var buf bytes.Buffer - for i := 0; i < len(path); i++ { - c := path[i] - if noEscape[c] || (c == '/' && !encodeSep) { - buf.WriteByte(c) - } else { - fmt.Fprintf(&buf, "%%%02X", c) - } - } - return buf.String() -} - -func convertType(v reflect.Value) (string, error) { - v = reflect.Indirect(v) - if !v.IsValid() { - return "", errValueNotSet - } - - var str string - switch value := v.Interface().(type) { - case string: - str = value - case []byte: - str = base64.StdEncoding.EncodeToString(value) - case bool: - str = strconv.FormatBool(value) - case int64: - str = strconv.FormatInt(value, 10) - case float64: - str = strconv.FormatFloat(value, 'f', -1, 64) - case time.Time: - str = value.UTC().Format(RFC822) - default: - err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) - return "", err - } - return str, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go deleted file mode 100644 index 4366de2e..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go +++ /dev/null @@ -1,45 +0,0 @@ -package rest - -import "reflect" - -// PayloadMember returns the payload field member of i if there is one, or nil. -func PayloadMember(i interface{}) interface{} { - if i == nil { - return nil - } - - v := reflect.ValueOf(i).Elem() - if !v.IsValid() { - return nil - } - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - field, _ := v.Type().FieldByName(payloadName) - if field.Tag.Get("type") != "structure" { - return nil - } - - payload := v.FieldByName(payloadName) - if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { - return payload.Interface() - } - } - } - return nil -} - -// PayloadType returns the type of a payload field member of i if there is one, or "". -func PayloadType(i interface{}) string { - v := reflect.Indirect(reflect.ValueOf(i)) - if !v.IsValid() { - return "" - } - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - if member, ok := v.Type().FieldByName(payloadName); ok { - return member.Tag.Get("type") - } - } - } - return "" -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go deleted file mode 100644 index 2cba1d9a..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ /dev/null @@ -1,198 +0,0 @@ -package rest - -import ( - "encoding/base64" - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} - -// Unmarshal unmarshals the REST component of a response in a REST service. -func Unmarshal(r *request.Request) { - if r.DataFilled() { - v := reflect.Indirect(reflect.ValueOf(r.Data)) - unmarshalBody(r, v) - } -} - -// UnmarshalMeta unmarshals the REST metadata of a response in a REST service -func UnmarshalMeta(r *request.Request) { - r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") - if r.RequestID == "" { - // Alternative version of request id in the header - r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") - } - if r.DataFilled() { - v := reflect.Indirect(reflect.ValueOf(r.Data)) - unmarshalLocationElements(r, v) - } -} - -func unmarshalBody(r *request.Request, v reflect.Value) { - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - pfield, _ := v.Type().FieldByName(payloadName) - if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { - payload := v.FieldByName(payloadName) - if payload.IsValid() { - switch payload.Interface().(type) { - case []byte: - defer r.HTTPResponse.Body.Close() - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - } else { - payload.Set(reflect.ValueOf(b)) - } - case *string: - defer r.HTTPResponse.Body.Close() - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - } else { - str := string(b) - payload.Set(reflect.ValueOf(&str)) - } - default: - switch payload.Type().String() { - case "io.ReadSeeker": - payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body))) - case "aws.ReadSeekCloser", "io.ReadCloser": - payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) - default: - io.Copy(ioutil.Discard, r.HTTPResponse.Body) - defer r.HTTPResponse.Body.Close() - r.Error = awserr.New("SerializationError", - "failed to decode REST response", - fmt.Errorf("unknown payload type %s", payload.Type())) - } - } - } - } - } - } -} - -func unmarshalLocationElements(r *request.Request, v reflect.Value) { - for i := 0; i < v.NumField(); i++ { - m, field := v.Field(i), v.Type().Field(i) - if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { - continue - } - - if m.IsValid() { - name := field.Tag.Get("locationName") - if name == "" { - name = field.Name - } - - switch field.Tag.Get("location") { - case "statusCode": - unmarshalStatusCode(m, r.HTTPResponse.StatusCode) - case "header": - err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name)) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - break - } - case "headers": - prefix := field.Tag.Get("locationName") - err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - break - } - } - } - if r.Error != nil { - return - } - } -} - -func unmarshalStatusCode(v reflect.Value, statusCode int) { - if !v.IsValid() { - return - } - - switch v.Interface().(type) { - case *int64: - s := int64(statusCode) - v.Set(reflect.ValueOf(&s)) - } -} - -func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { - switch r.Interface().(type) { - case map[string]*string: // we only support string map value types - out := map[string]*string{} - for k, v := range headers { - k = http.CanonicalHeaderKey(k) - if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { - out[k[len(prefix):]] = &v[0] - } - } - r.Set(reflect.ValueOf(out)) - } - return nil -} - -func unmarshalHeader(v reflect.Value, header string) error { - if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { - return nil - } - - switch v.Interface().(type) { - case *string: - v.Set(reflect.ValueOf(&header)) - case []byte: - b, err := base64.StdEncoding.DecodeString(header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&b)) - case *bool: - b, err := strconv.ParseBool(header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&b)) - case *int64: - i, err := strconv.ParseInt(header, 10, 64) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&i)) - case *float64: - f, err := strconv.ParseFloat(header, 64) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&f)) - case *time.Time: - t, err := time.Parse(RFC822, header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&t)) - default: - err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) - return err - } - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go deleted file mode 100644 index c74088bf..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go +++ /dev/null @@ -1,69 +0,0 @@ -// Package restxml provides RESTful XML serialisation of AWS -// requests and responses. -package restxml - -//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go -//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go - -import ( - "bytes" - "encoding/xml" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/query" - "github.com/aws/aws-sdk-go/private/protocol/rest" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" -) - -// BuildHandler is a named request handler for building restxml protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build} - -// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta} - -// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors -var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError} - -// Build builds a request payload for the REST XML protocol. -func Build(r *request.Request) { - rest.Build(r) - - if t := rest.PayloadType(r.Params); t == "structure" || t == "" { - var buf bytes.Buffer - err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to encode rest XML request", err) - return - } - r.SetBufferBody(buf.Bytes()) - } -} - -// Unmarshal unmarshals a payload response for the REST XML protocol. -func Unmarshal(r *request.Request) { - if t := rest.PayloadType(r.Data); t == "structure" || t == "" { - defer r.HTTPResponse.Body.Close() - decoder := xml.NewDecoder(r.HTTPResponse.Body) - err := xmlutil.UnmarshalXML(r.Data, decoder, "") - if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err) - return - } - } else { - rest.Unmarshal(r) - } -} - -// UnmarshalMeta unmarshals response headers for the REST XML protocol. -func UnmarshalMeta(r *request.Request) { - rest.UnmarshalMeta(r) -} - -// UnmarshalError unmarshals a response error for the REST XML protocol. -func UnmarshalError(r *request.Request) { - query.UnmarshalError(r) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go deleted file mode 100644 index da1a6811..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go +++ /dev/null @@ -1,21 +0,0 @@ -package protocol - -import ( - "io" - "io/ioutil" - - "github.com/aws/aws-sdk-go/aws/request" -) - -// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body -var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} - -// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. -func UnmarshalDiscardBody(r *request.Request) { - if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { - return - } - - io.Copy(ioutil.Discard, r.HTTPResponse.Body) - r.HTTPResponse.Body.Close() -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go deleted file mode 100644 index ceb4132c..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ /dev/null @@ -1,293 +0,0 @@ -// Package xmlutil provides XML serialisation of AWS requests and responses. -package xmlutil - -import ( - "encoding/base64" - "encoding/xml" - "fmt" - "reflect" - "sort" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/private/protocol" -) - -// BuildXML will serialize params into an xml.Encoder. -// Error will be returned if the serialization of any of the params or nested values fails. -func BuildXML(params interface{}, e *xml.Encoder) error { - b := xmlBuilder{encoder: e, namespaces: map[string]string{}} - root := NewXMLElement(xml.Name{}) - if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { - return err - } - for _, c := range root.Children { - for _, v := range c { - return StructToXML(e, v, false) - } - } - return nil -} - -// Returns the reflection element of a value, if it is a pointer. -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} - -// A xmlBuilder serializes values from Go code to XML -type xmlBuilder struct { - encoder *xml.Encoder - namespaces map[string]string -} - -// buildValue generic XMLNode builder for any type. Will build value for their specific type -// struct, list, map, scalar. -// -// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If -// type is not provided reflect will be used to determine the value's type. -func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - value = elemOf(value) - if !value.IsValid() { // no need to handle zero values - return nil - } else if tag.Get("location") != "" { // don't handle non-body location values - return nil - } - - t := tag.Get("type") - if t == "" { - switch value.Kind() { - case reflect.Struct: - t = "structure" - case reflect.Slice: - t = "list" - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - if field, ok := value.Type().FieldByName("_"); ok { - tag = tag + reflect.StructTag(" ") + field.Tag - } - return b.buildStruct(value, current, tag) - case "list": - return b.buildList(value, current, tag) - case "map": - return b.buildMap(value, current, tag) - default: - return b.buildScalar(value, current, tag) - } -} - -// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested -// types are converted to XMLNodes also. -func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if !value.IsValid() { - return nil - } - - fieldAdded := false - - // unwrap payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := value.Type().FieldByName(payload) - tag = field.Tag - value = elemOf(value.FieldByName(payload)) - - if !value.IsValid() { - return nil - } - } - - child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) - - // there is an xmlNamespace associated with this struct - if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { - ns := xml.Attr{ - Name: xml.Name{Local: "xmlns"}, - Value: uri, - } - if prefix != "" { - b.namespaces[prefix] = uri // register the namespace - ns.Name.Local = "xmlns:" + prefix - } - - child.Attr = append(child.Attr, ns) - } - - t := value.Type() - for i := 0; i < value.NumField(); i++ { - member := elemOf(value.Field(i)) - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - - mTag := field.Tag - if mTag.Get("location") != "" { // skip non-body members - continue - } - - if protocol.CanSetIdempotencyToken(value.Field(i), field) { - token := protocol.GetIdempotencyToken() - member = reflect.ValueOf(token) - } - - memberName := mTag.Get("locationName") - if memberName == "" { - memberName = field.Name - mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) - } - if err := b.buildValue(member, child, mTag); err != nil { - return err - } - - fieldAdded = true - } - - if fieldAdded { // only append this child if we have one ore more valid members - current.AddChild(child) - } - - return nil -} - -// buildList adds the value's list items to the current XMLNode as children nodes. All -// nested values in the list are converted to XMLNodes also. -func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if value.IsNil() { // don't build omitted lists - return nil - } - - // check for unflattened list member - flattened := tag.Get("flattened") != "" - - xname := xml.Name{Local: tag.Get("locationName")} - if flattened { - for i := 0; i < value.Len(); i++ { - child := NewXMLElement(xname) - current.AddChild(child) - if err := b.buildValue(value.Index(i), child, ""); err != nil { - return err - } - } - } else { - list := NewXMLElement(xname) - current.AddChild(list) - - for i := 0; i < value.Len(); i++ { - iname := tag.Get("locationNameList") - if iname == "" { - iname = "member" - } - - child := NewXMLElement(xml.Name{Local: iname}) - list.AddChild(child) - if err := b.buildValue(value.Index(i), child, ""); err != nil { - return err - } - } - } - - return nil -} - -// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All -// nested values in the map are converted to XMLNodes also. -// -// Error will be returned if it is unable to build the map's values into XMLNodes -func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - if value.IsNil() { // don't build omitted maps - return nil - } - - maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) - current.AddChild(maproot) - current = maproot - - kname, vname := "key", "value" - if n := tag.Get("locationNameKey"); n != "" { - kname = n - } - if n := tag.Get("locationNameValue"); n != "" { - vname = n - } - - // sorting is not required for compliance, but it makes testing easier - keys := make([]string, value.Len()) - for i, k := range value.MapKeys() { - keys[i] = k.String() - } - sort.Strings(keys) - - for _, k := range keys { - v := value.MapIndex(reflect.ValueOf(k)) - - mapcur := current - if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps - child := NewXMLElement(xml.Name{Local: "entry"}) - mapcur.AddChild(child) - mapcur = child - } - - kchild := NewXMLElement(xml.Name{Local: kname}) - kchild.Text = k - vchild := NewXMLElement(xml.Name{Local: vname}) - mapcur.AddChild(kchild) - mapcur.AddChild(vchild) - - if err := b.buildValue(v, vchild, ""); err != nil { - return err - } - } - - return nil -} - -// buildScalar will convert the value into a string and append it as a attribute or child -// of the current XMLNode. -// -// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. -// -// Error will be returned if the value type is unsupported. -func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { - var str string - switch converted := value.Interface().(type) { - case string: - str = converted - case []byte: - if !value.IsNil() { - str = base64.StdEncoding.EncodeToString(converted) - } - case bool: - str = strconv.FormatBool(converted) - case int64: - str = strconv.FormatInt(converted, 10) - case int: - str = strconv.Itoa(converted) - case float64: - str = strconv.FormatFloat(converted, 'f', -1, 64) - case float32: - str = strconv.FormatFloat(float64(converted), 'f', -1, 32) - case time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - str = converted.UTC().Format(ISO8601UTC) - default: - return fmt.Errorf("unsupported value for param %s: %v (%s)", - tag.Get("locationName"), value.Interface(), value.Type().Name()) - } - - xname := xml.Name{Local: tag.Get("locationName")} - if tag.Get("xmlAttribute") != "" { // put into current node's attribute list - attr := xml.Attr{Name: xname, Value: str} - current.Attr = append(current.Attr, attr) - } else { // regular text node - current.AddChild(&XMLNode{Name: xname, Text: str}) - } - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go deleted file mode 100644 index 49f291a8..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ /dev/null @@ -1,260 +0,0 @@ -package xmlutil - -import ( - "encoding/base64" - "encoding/xml" - "fmt" - "io" - "reflect" - "strconv" - "strings" - "time" -) - -// UnmarshalXML deserializes an xml.Decoder into the container v. V -// needs to match the shape of the XML expected to be decoded. -// If the shape doesn't match unmarshaling will fail. -func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { - n, _ := XMLToStruct(d, nil) - if n.Children != nil { - for _, root := range n.Children { - for _, c := range root { - if wrappedChild, ok := c.Children[wrapper]; ok { - c = wrappedChild[0] // pull out wrapped element - } - - err := parse(reflect.ValueOf(v), c, "") - if err != nil { - if err == io.EOF { - return nil - } - return err - } - } - } - return nil - } - return nil -} - -// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect -// will be used to determine the type from r. -func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - rtype := r.Type() - if rtype.Kind() == reflect.Ptr { - rtype = rtype.Elem() // check kind of actual element type - } - - t := tag.Get("type") - if t == "" { - switch rtype.Kind() { - case reflect.Struct: - t = "structure" - case reflect.Slice: - t = "list" - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - if field, ok := rtype.FieldByName("_"); ok { - tag = field.Tag - } - return parseStruct(r, node, tag) - case "list": - return parseList(r, node, tag) - case "map": - return parseMap(r, node, tag) - default: - return parseScalar(r, node, tag) - } -} - -// parseStruct deserializes a structure and its fields from an XMLNode. Any nested -// types in the structure will also be deserialized. -func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - t := r.Type() - if r.Kind() == reflect.Ptr { - if r.IsNil() { // create the structure if it's nil - s := reflect.New(r.Type().Elem()) - r.Set(s) - r = s - } - - r = r.Elem() - t = t.Elem() - } - - // unwrap any payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := t.FieldByName(payload) - return parseStruct(r.FieldByName(payload), node, field.Tag) - } - - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - if c := field.Name[0:1]; strings.ToLower(c) == c { - continue // ignore unexported fields - } - - // figure out what this field is called - name := field.Name - if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { - name = field.Tag.Get("locationNameList") - } else if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - - // try to find the field by name in elements - elems := node.Children[name] - - if elems == nil { // try to find the field in attributes - for _, a := range node.Attr { - if name == a.Name.Local { - // turn this into a text node for de-serializing - elems = []*XMLNode{{Text: a.Value}} - } - } - } - - member := r.FieldByName(field.Name) - for _, elem := range elems { - err := parse(member, elem, field.Tag) - if err != nil { - return err - } - } - } - return nil -} - -// parseList deserializes a list of values from an XML node. Each list entry -// will also be deserialized. -func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - t := r.Type() - - if tag.Get("flattened") == "" { // look at all item entries - mname := "member" - if name := tag.Get("locationNameList"); name != "" { - mname = name - } - - if Children, ok := node.Children[mname]; ok { - if r.IsNil() { - r.Set(reflect.MakeSlice(t, len(Children), len(Children))) - } - - for i, c := range Children { - err := parse(r.Index(i), c, "") - if err != nil { - return err - } - } - } - } else { // flattened list means this is a single element - if r.IsNil() { - r.Set(reflect.MakeSlice(t, 0, 0)) - } - - childR := reflect.Zero(t.Elem()) - r.Set(reflect.Append(r, childR)) - err := parse(r.Index(r.Len()-1), node, "") - if err != nil { - return err - } - } - - return nil -} - -// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode -// will also be deserialized as map entries. -func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - if r.IsNil() { - r.Set(reflect.MakeMap(r.Type())) - } - - if tag.Get("flattened") == "" { // look at all child entries - for _, entry := range node.Children["entry"] { - parseMapEntry(r, entry, tag) - } - } else { // this element is itself an entry - parseMapEntry(r, node, tag) - } - - return nil -} - -// parseMapEntry deserializes a map entry from a XML node. -func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - kname, vname := "key", "value" - if n := tag.Get("locationNameKey"); n != "" { - kname = n - } - if n := tag.Get("locationNameValue"); n != "" { - vname = n - } - - keys, ok := node.Children[kname] - values := node.Children[vname] - if ok { - for i, key := range keys { - keyR := reflect.ValueOf(key.Text) - value := values[i] - valueR := reflect.New(r.Type().Elem()).Elem() - - parse(valueR, value, "") - r.SetMapIndex(keyR, valueR) - } - } - return nil -} - -// parseScaller deserializes an XMLNode value into a concrete type based on the -// interface type of r. -// -// Error is returned if the deserialization fails due to invalid type conversion, -// or unsupported interface type. -func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { - switch r.Interface().(type) { - case *string: - r.Set(reflect.ValueOf(&node.Text)) - return nil - case []byte: - b, err := base64.StdEncoding.DecodeString(node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(b)) - case *bool: - v, err := strconv.ParseBool(node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&v)) - case *int64: - v, err := strconv.ParseInt(node.Text, 10, 64) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&v)) - case *float64: - v, err := strconv.ParseFloat(node.Text, 64) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&v)) - case *time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - t, err := time.Parse(ISO8601UTC, node.Text) - if err != nil { - return err - } - r.Set(reflect.ValueOf(&t)) - default: - return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) - } - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go deleted file mode 100644 index 72c198a9..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go +++ /dev/null @@ -1,105 +0,0 @@ -package xmlutil - -import ( - "encoding/xml" - "io" - "sort" -) - -// A XMLNode contains the values to be encoded or decoded. -type XMLNode struct { - Name xml.Name `json:",omitempty"` - Children map[string][]*XMLNode `json:",omitempty"` - Text string `json:",omitempty"` - Attr []xml.Attr `json:",omitempty"` -} - -// NewXMLElement returns a pointer to a new XMLNode initialized to default values. -func NewXMLElement(name xml.Name) *XMLNode { - return &XMLNode{ - Name: name, - Children: map[string][]*XMLNode{}, - Attr: []xml.Attr{}, - } -} - -// AddChild adds child to the XMLNode. -func (n *XMLNode) AddChild(child *XMLNode) { - if _, ok := n.Children[child.Name.Local]; !ok { - n.Children[child.Name.Local] = []*XMLNode{} - } - n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) -} - -// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. -func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { - out := &XMLNode{} - for { - tok, err := d.Token() - if tok == nil || err == io.EOF { - break - } - if err != nil { - return out, err - } - - switch typed := tok.(type) { - case xml.CharData: - out.Text = string(typed.Copy()) - case xml.StartElement: - el := typed.Copy() - out.Attr = el.Attr - if out.Children == nil { - out.Children = map[string][]*XMLNode{} - } - - name := typed.Name.Local - slice := out.Children[name] - if slice == nil { - slice = []*XMLNode{} - } - node, e := XMLToStruct(d, &el) - if e != nil { - return out, e - } - node.Name = typed.Name - slice = append(slice, node) - out.Children[name] = slice - case xml.EndElement: - if s != nil && s.Name.Local == typed.Name.Local { // matching end token - return out, nil - } - } - } - return out, nil -} - -// StructToXML writes an XMLNode to a xml.Encoder as tokens. -func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { - e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) - - if node.Text != "" { - e.EncodeToken(xml.CharData([]byte(node.Text))) - } else if sorted { - sortedNames := []string{} - for k := range node.Children { - sortedNames = append(sortedNames, k) - } - sort.Strings(sortedNames) - - for _, k := range sortedNames { - for _, v := range node.Children[k] { - StructToXML(e, v, sorted) - } - } - } else { - for _, c := range node.Children { - for _, v := range c { - StructToXML(e, v, sorted) - } - } - } - - e.EncodeToken(xml.EndElement{Name: node.Name}) - return e.Flush() -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go deleted file mode 100644 index b51e9449..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go +++ /dev/null @@ -1,134 +0,0 @@ -package waiter - -import ( - "fmt" - "reflect" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" -) - -// A Config provides a collection of configuration values to setup a generated -// waiter code with. -type Config struct { - Name string - Delay int - MaxAttempts int - Operation string - Acceptors []WaitAcceptor -} - -// A WaitAcceptor provides the information needed to wait for an API operation -// to complete. -type WaitAcceptor struct { - Expected interface{} - Matcher string - State string - Argument string -} - -// A Waiter provides waiting for an operation to complete. -type Waiter struct { - Config - Client interface{} - Input interface{} -} - -// Wait waits for an operation to complete, expire max attempts, or fail. Error -// is returned if the operation fails. -func (w *Waiter) Wait() error { - client := reflect.ValueOf(w.Client) - in := reflect.ValueOf(w.Input) - method := client.MethodByName(w.Config.Operation + "Request") - - for i := 0; i < w.MaxAttempts; i++ { - res := method.Call([]reflect.Value{in}) - req := res[0].Interface().(*request.Request) - req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Waiter")) - - err := req.Send() - for _, a := range w.Acceptors { - result := false - var vals []interface{} - switch a.Matcher { - case "pathAll", "path": - // Require all matches to be equal for result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - if len(vals) == 0 { - break - } - result = true - for _, val := range vals { - if !awsutil.DeepEqual(val, a.Expected) { - result = false - break - } - } - case "pathAny": - // Only a single match needs to equal for the result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - for _, val := range vals { - if awsutil.DeepEqual(val, a.Expected) { - result = true - break - } - } - case "status": - s := a.Expected.(int) - result = s == req.HTTPResponse.StatusCode - case "error": - if aerr, ok := err.(awserr.Error); ok { - result = aerr.Code() == a.Expected.(string) - } - case "pathList": - // ignored matcher - default: - logf(client, "WARNING: Waiter for %s encountered unexpected matcher: %s", - w.Config.Operation, a.Matcher) - } - - if !result { - // If there was no matching result found there is nothing more to do - // for this response, retry the request. - continue - } - - switch a.State { - case "success": - // waiter completed - return nil - case "failure": - // Waiter failure state triggered - return awserr.New("ResourceNotReady", - fmt.Sprintf("failed waiting for successful resource state"), err) - case "retry": - // clear the error and retry the operation - err = nil - default: - logf(client, "WARNING: Waiter for %s encountered unexpected state: %s", - w.Config.Operation, a.State) - } - } - if err != nil { - return err - } - - time.Sleep(time.Second * time.Duration(w.Delay)) - } - - return awserr.New("ResourceNotReady", - fmt.Sprintf("exceeded %d wait attempts", w.MaxAttempts), nil) -} - -func logf(client reflect.Value, msg string, args ...interface{}) { - cfgVal := client.FieldByName("Config") - if !cfgVal.IsValid() { - return - } - if cfg, ok := cfgVal.Interface().(*aws.Config); ok && cfg.Logger != nil { - cfg.Logger.Log(fmt.Sprintf(msg, args...)) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go deleted file mode 100644 index 75263b1b..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go +++ /dev/null @@ -1,226 +0,0 @@ -package sign - -import ( - "bytes" - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "net/url" - "strings" - "time" - "unicode" -) - -// An AWSEpochTime wraps a time value providing JSON serialization needed for -// AWS Policy epoch time fields. -type AWSEpochTime struct { - time.Time -} - -// NewAWSEpochTime returns a new AWSEpochTime pointer wrapping the Go time provided. -func NewAWSEpochTime(t time.Time) *AWSEpochTime { - return &AWSEpochTime{t} -} - -// MarshalJSON serializes the epoch time as AWS Profile epoch time. -func (t AWSEpochTime) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`{"AWS:EpochTime":%d}`, t.UTC().Unix())), nil -} - -// An IPAddress wraps an IPAddress source IP providing JSON serialization information -type IPAddress struct { - SourceIP string `json:"AWS:SourceIp"` -} - -// A Condition defines the restrictions for how a signed URL can be used. -type Condition struct { - // Optional IP address mask the signed URL must be requested from. - IPAddress *IPAddress `json:"IpAddress,omitempty"` - - // Optional date that the signed URL cannot be used until. It is invalid - // to make requests with the signed URL prior to this date. - DateGreaterThan *AWSEpochTime `json:",omitempty"` - - // Required date that the signed URL will expire. A DateLessThan is required - // sign cloud front URLs - DateLessThan *AWSEpochTime `json:",omitempty"` -} - -// A Statement is a collection of conditions for resources -type Statement struct { - // The Web or RTMP resource the URL will be signed for - Resource string - - // The set of conditions for this resource - Condition Condition -} - -// A Policy defines the resources that a signed will be signed for. -// -// See the following page for more information on how policies are constructed. -// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html#private-content-custom-policy-statement -type Policy struct { - // List of resource and condition statements. - // Signed URLs should only provide a single statement. - Statements []Statement `json:"Statement"` -} - -// Override for testing to mock out usage of crypto/rand.Reader -var randReader = rand.Reader - -// Sign will sign a policy using an RSA private key. It will return a base 64 -// encoded signature and policy if no error is encountered. -// -// The signature and policy should be added to the signed URL following the -// guidelines in: -// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-signed-urls.html -func (p *Policy) Sign(privKey *rsa.PrivateKey) (b64Signature, b64Policy []byte, err error) { - if err = p.Validate(); err != nil { - return nil, nil, err - } - - // Build and escape the policy - b64Policy, jsonPolicy, err := encodePolicy(p) - if err != nil { - return nil, nil, err - } - awsEscapeEncoded(b64Policy) - - // Build and escape the signature - b64Signature, err = signEncodedPolicy(randReader, jsonPolicy, privKey) - if err != nil { - return nil, nil, err - } - awsEscapeEncoded(b64Signature) - - return b64Signature, b64Policy, nil -} - -// Validate verifies that the policy is valid and usable, and returns an -// error if there is a problem. -func (p *Policy) Validate() error { - if len(p.Statements) == 0 { - return fmt.Errorf("at least one policy statement is required") - } - for i, s := range p.Statements { - if s.Resource == "" { - return fmt.Errorf("statement at index %d does not have a resource", i) - } - if !isASCII(s.Resource) { - return fmt.Errorf("unable to sign resource, [%s]. "+ - "Resources must only contain ascii characters. "+ - "Hostnames with unicode should be encoded as Punycode, (e.g. golang.org/x/net/idna), "+ - "and URL unicode path/query characters should be escaped.", s.Resource) - } - } - - return nil -} - -// CreateResource constructs, validates, and returns a resource URL string. An -// error will be returned if unable to create the resource string. -func CreateResource(scheme, u string) (string, error) { - scheme = strings.ToLower(scheme) - - if scheme == "http" || scheme == "https" || scheme == "http*" || scheme == "*" { - return u, nil - } - - if scheme == "rtmp" { - parsed, err := url.Parse(u) - if err != nil { - return "", fmt.Errorf("unable to parse rtmp URL, err: %s", err) - } - - rtmpURL := strings.TrimLeft(parsed.Path, "/") - if parsed.RawQuery != "" { - rtmpURL = fmt.Sprintf("%s?%s", rtmpURL, parsed.RawQuery) - } - - return rtmpURL, nil - } - - return "", fmt.Errorf("invalid URL scheme must be http, https, or rtmp. Provided: %s", scheme) -} - -// NewCannedPolicy returns a new Canned Policy constructed using the resource -// and expires time. This can be used to generate the basic model for a Policy -// that can be then augmented with additional conditions. -// -// See the following page for more information on how policies are constructed. -// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html#private-content-custom-policy-statement -func NewCannedPolicy(resource string, expires time.Time) *Policy { - return &Policy{ - Statements: []Statement{ - { - Resource: resource, - Condition: Condition{ - DateLessThan: NewAWSEpochTime(expires), - }, - }, - }, - } -} - -// encodePolicy encodes the Policy as JSON and also base 64 encodes it. -func encodePolicy(p *Policy) (b64Policy, jsonPolicy []byte, err error) { - jsonPolicy, err = json.Marshal(p) - if err != nil { - return nil, nil, fmt.Errorf("failed to encode policy, %s", err.Error()) - } - - // Remove leading and trailing white space, JSON encoding will note include - // whitespace within the encoding. - jsonPolicy = bytes.TrimSpace(jsonPolicy) - - b64Policy = make([]byte, base64.StdEncoding.EncodedLen(len(jsonPolicy))) - base64.StdEncoding.Encode(b64Policy, jsonPolicy) - return b64Policy, jsonPolicy, nil -} - -// signEncodedPolicy will sign and base 64 encode the JSON encoded policy. -func signEncodedPolicy(randReader io.Reader, jsonPolicy []byte, privKey *rsa.PrivateKey) ([]byte, error) { - hash := sha1.New() - if _, err := bytes.NewReader(jsonPolicy).WriteTo(hash); err != nil { - return nil, fmt.Errorf("failed to calculate signing hash, %s", err.Error()) - } - - sig, err := rsa.SignPKCS1v15(randReader, privKey, crypto.SHA1, hash.Sum(nil)) - if err != nil { - return nil, fmt.Errorf("failed to sign policy, %s", err.Error()) - } - - b64Sig := make([]byte, base64.StdEncoding.EncodedLen(len(sig))) - base64.StdEncoding.Encode(b64Sig, sig) - return b64Sig, nil -} - -// special characters to be replaced with awsEscapeEncoded -var invalidEncodedChar = map[byte]byte{ - '+': '-', - '=': '_', - '/': '~', -} - -// awsEscapeEncoded will replace base64 encoding's special characters to be URL safe. -func awsEscapeEncoded(b []byte) { - for i, v := range b { - if r, ok := invalidEncodedChar[v]; ok { - b[i] = r - } - } -} - -func isASCII(u string) bool { - for _, c := range u { - if c > unicode.MaxASCII { - return false - } - } - return true -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey.go deleted file mode 100644 index ffb3c3a7..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey.go +++ /dev/null @@ -1,68 +0,0 @@ -package sign - -import ( - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "fmt" - "io" - "io/ioutil" - "os" -) - -// LoadPEMPrivKeyFile reads a PEM encoded RSA private key from the file name. -// A new RSA private key will be returned if no error. -func LoadPEMPrivKeyFile(name string) (*rsa.PrivateKey, error) { - file, err := os.Open(name) - if err != nil { - return nil, err - } - defer file.Close() - - return LoadPEMPrivKey(file) -} - -// LoadPEMPrivKey reads a PEM encoded RSA private key from the io.Reader. -// A new RSA private key will be returned if no error. -func LoadPEMPrivKey(reader io.Reader) (*rsa.PrivateKey, error) { - block, err := loadPem(reader) - if err != nil { - return nil, err - } - - return x509.ParsePKCS1PrivateKey(block.Bytes) -} - -// LoadEncryptedPEMPrivKey decrypts the PEM encoded private key using the -// password provided returning a RSA private key. If the PEM data is invalid, -// or unable to decrypt an error will be returned. -func LoadEncryptedPEMPrivKey(reader io.Reader, password []byte) (*rsa.PrivateKey, error) { - block, err := loadPem(reader) - if err != nil { - return nil, err - } - - decryptedBlock, err := x509.DecryptPEMBlock(block, password) - if err != nil { - return nil, err - } - - return x509.ParsePKCS1PrivateKey(decryptedBlock) -} - -func loadPem(reader io.Reader) (*pem.Block, error) { - b, err := ioutil.ReadAll(reader) - if err != nil { - return nil, err - } - - block, _ := pem.Decode(b) - if block == nil { - // pem.Decode will set block to nil if there is no PEM data in the input - // the second parameter will contain the provided bytes that failed - // to be decoded. - return nil, fmt.Errorf("no valid PEM data provided") - } - - return block, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/randomreader.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/randomreader.go deleted file mode 100644 index 7138e22f..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/randomreader.go +++ /dev/null @@ -1,30 +0,0 @@ -package sign - -import ( - "bytes" - "encoding/binary" - "math/rand" -) - -// A randomReader wraps a math/rand.Rand within an reader so that it can used -// as a predictable testing replacement for crypto/rand.Reader -type randomReader struct { - b *bytes.Buffer - r *rand.Rand -} - -// newRandomReader returns a new instance of the random reader -func newRandomReader(r *rand.Rand) *randomReader { - return &randomReader{b: &bytes.Buffer{}, r: r} -} - -// Read will read random bytes from up to the length of b. -func (m *randomReader) Read(b []byte) (int, error) { - for i := 0; i < len(b); { - binary.Write(m.b, binary.LittleEndian, m.r.Int63()) - n, _ := m.b.Read(b[i:]) - i += n - } - - return len(b), nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie.go deleted file mode 100644 index 9b2deadf..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie.go +++ /dev/null @@ -1,241 +0,0 @@ -package sign - -import ( - "crypto/rsa" - "fmt" - "net/http" - "strings" - "time" -) - -const ( - // CookiePolicyName name of the policy cookie - CookiePolicyName = "CloudFront-Policy" - // CookieSignatureName name of the signature cookie - CookieSignatureName = "CloudFront-Signature" - // CookieKeyIDName name of the signing Key ID cookie - CookieKeyIDName = "CloudFront-Key-Pair-Id" -) - -// A CookieOptions optional additonal options that can be applied to the signed -// cookies. -type CookieOptions struct { - Path string - Domain string - Secure bool -} - -// apply will integration the options provided into the base cookie options -// a new copy will be returned. The base CookieOption will not be modified. -func (o CookieOptions) apply(opts ...func(*CookieOptions)) CookieOptions { - if len(opts) == 0 { - return o - } - - for _, opt := range opts { - opt(&o) - } - - return o -} - -// A CookieSigner provides signing utilities to sign Cookies for Amazon CloudFront -// resources. Using a private key and Credential Key Pair key ID the CookieSigner -// only needs to be created once per Credential Key Pair key ID and private key. -// -// More information about signed Cookies and their structure can be found at: -// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-setting-signed-cookie-custom-policy.html -// -// To sign a Cookie, create a CookieSigner with your private key and credential -// pair key ID. Once you have a CookieSigner instance you can call Sign or -// SignWithPolicy to sign the URLs. -// -// The signer is safe to use concurrently, but the optional cookies options -// are not safe to modify concurrently. -type CookieSigner struct { - keyID string - privKey *rsa.PrivateKey - - Opts CookieOptions -} - -// NewCookieSigner constructs and returns a new CookieSigner to be used to for -// signing Amazon CloudFront URL resources with. -func NewCookieSigner(keyID string, privKey *rsa.PrivateKey, opts ...func(*CookieOptions)) *CookieSigner { - signer := &CookieSigner{ - keyID: keyID, - privKey: privKey, - Opts: CookieOptions{}.apply(opts...), - } - - return signer -} - -// Sign returns the cookies needed to allow user agents to make arbetrary -// requests to cloudfront for the resource(s) defined by the policy. -// -// Sign will create a CloudFront policy with only a resource and condition of -// DateLessThan equal to the expires time provided. -// -// The returned slice cookies should all be added to the Client's cookies or -// server's response. -// -// Example: -// s := NewCookieSigner(keyID, privKey) -// -// // Get Signed cookies for a resource that will expire in 1 hour -// cookies, err := s.Sign("*", time.Now().Add(1 * time.Hour)) -// if err != nil { -// fmt.Println("failed to create signed cookies", err) -// return -// } -// -// // Or get Signed cookies for a resource that will expire in 1 hour -// // and set path and domain of cookies -// cookies, err := s.Sign("*", time.Now().Add(1 * time.Hour), func(o *sign.CookieOptions) { -// o.Path = "/" -// o.Domain = ".example.com" -// }) -// if err != nil { -// fmt.Println("failed to create signed cookies", err) -// return -// } -// -// // Server Response via http.ResponseWriter -// for _, c := range cookies { -// http.SetCookie(w, c) -// } -// -// // Client request via the cookie jar -// if client.CookieJar != nil { -// for _, c := range cookies { -// client.Cookie(w, c) -// } -// } -func (s CookieSigner) Sign(u string, expires time.Time, opts ...func(*CookieOptions)) ([]*http.Cookie, error) { - scheme, err := cookieURLScheme(u) - if err != nil { - return nil, err - } - - resource, err := CreateResource(scheme, u) - if err != nil { - return nil, err - } - - p := NewCannedPolicy(resource, expires) - return createCookies(p, s.keyID, s.privKey, s.Opts.apply(opts...)) -} - -// Returns and validates the URL's scheme. -// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-setting-signed-cookie-custom-policy.html#private-content-custom-policy-statement-cookies -func cookieURLScheme(u string) (string, error) { - parts := strings.SplitN(u, "://", 2) - if len(parts) != 2 { - return "", fmt.Errorf("invalid cookie URL, missing scheme") - } - - scheme := strings.ToLower(parts[0]) - if scheme != "http" && scheme != "https" && scheme != "http*" { - return "", fmt.Errorf("invalid cookie URL scheme. Expect http, https, or http*. Go, %s", scheme) - } - - return scheme, nil -} - -// SignWithPolicy returns the cookies needed to allow user agents to make -// arbetrairy requets to cloudfront for the resource(s) defined by the policy. -// -// The returned slice cookies should all be added to the Client's cookies or -// server's response. -// -// Example: -// s := NewCookieSigner(keyID, privKey) -// -// policy := &sign.Policy{ -// Statements: []sign.Statement{ -// { -// // Read the provided documentation on how to set this -// // correctly, you'll probably want to use wildcards. -// Resource: RawCloudFrontURL, -// Condition: sign.Condition{ -// // Optional IP source address range -// IPAddress: &sign.IPAddress{SourceIP: "192.0.2.0/24"}, -// // Optional date URL is not valid until -// DateGreaterThan: &sign.AWSEpochTime{time.Now().Add(30 * time.Minute)}, -// // Required date the URL will expire after -// DateLessThan: &sign.AWSEpochTime{time.Now().Add(1 * time.Hour)}, -// }, -// }, -// }, -// } -// -// // Get Signed cookies for a resource that will expire in 1 hour -// cookies, err := s.SignWithPolicy(policy) -// if err != nil { -// fmt.Println("failed to create signed cookies", err) -// return -// } -// -// // Or get Signed cookies for a resource that will expire in 1 hour -// // and set path and domain of cookies -// cookies, err := s.Sign(policy, func(o *sign.CookieOptions) { -// o.Path = "/" -// o.Domain = ".example.com" -// }) -// if err != nil { -// fmt.Println("failed to create signed cookies", err) -// return -// } -// -// // Server Response via http.ResponseWriter -// for _, c := range cookies { -// http.SetCookie(w, c) -// } -// -// // Client request via the cookie jar -// if client.CookieJar != nil { -// for _, c := range cookies { -// client.Cookie(w, c) -// } -// } -func (s CookieSigner) SignWithPolicy(p *Policy, opts ...func(*CookieOptions)) ([]*http.Cookie, error) { - return createCookies(p, s.keyID, s.privKey, s.Opts.apply(opts...)) -} - -// Prepares the cookies to be attached to the header. An (optional) options -// struct is provided in case people don't want to manually edit their cookies. -func createCookies(p *Policy, keyID string, privKey *rsa.PrivateKey, opt CookieOptions) ([]*http.Cookie, error) { - b64Sig, b64Policy, err := p.Sign(privKey) - if err != nil { - return nil, err - } - - // Creates proper cookies - cPolicy := &http.Cookie{ - Name: CookiePolicyName, - Value: string(b64Policy), - HttpOnly: true, - } - cSignature := &http.Cookie{ - Name: CookieSignatureName, - Value: string(b64Sig), - HttpOnly: true, - } - cKey := &http.Cookie{ - Name: CookieKeyIDName, - Value: keyID, - HttpOnly: true, - } - - cookies := []*http.Cookie{cPolicy, cSignature, cKey} - - // Applie the cookie options - for _, c := range cookies { - c.Path = opt.Path - c.Domain = opt.Domain - c.Secure = opt.Secure - } - - return cookies, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go deleted file mode 100644 index ba56b4a7..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go +++ /dev/null @@ -1,205 +0,0 @@ -// Package sign provides utilities to generate signed URLs for Amazon CloudFront. -// -// More information about signed URLs and their structure can be found at: -// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-canned-policy.html -// -// To sign a URL create a URLSigner with your private key and credential pair key ID. -// Once you have a URLSigner instance you can call Sign or SignWithPolicy to -// sign the URLs. -// -// Example: -// -// // Sign URL to be valid for 1 hour from now. -// signer := sign.NewURLSigner(keyID, privKey) -// signedURL, err := signer.Sign(rawURL, time.Now().Add(1*time.Hour)) -// if err != nil { -// log.Fatalf("Failed to sign url, err: %s\n", err.Error()) -// } -// -package sign - -import ( - "crypto/rsa" - "fmt" - "net/url" - "strings" - "time" -) - -// An URLSigner provides URL signing utilities to sign URLs for Amazon CloudFront -// resources. Using a private key and Credential Key Pair key ID the URLSigner -// only needs to be created once per Credential Key Pair key ID and private key. -// -// The signer is safe to use concurrently. -type URLSigner struct { - keyID string - privKey *rsa.PrivateKey -} - -// NewURLSigner constructs and returns a new URLSigner to be used to for signing -// Amazon CloudFront URL resources with. -func NewURLSigner(keyID string, privKey *rsa.PrivateKey) *URLSigner { - return &URLSigner{ - keyID: keyID, - privKey: privKey, - } -} - -// Sign will sign a single URL to expire at the time of expires sign using the -// Amazon CloudFront default Canned Policy. The URL will be signed with the -// private key and Credential Key Pair Key ID previously provided to URLSigner. -// -// This is the default method of signing Amazon CloudFront URLs. If extra policy -// conditions are need other than URL expiry use SignWithPolicy instead. -// -// Example: -// -// // Sign URL to be valid for 1 hour from now. -// signer := sign.NewURLSigner(keyID, privKey) -// signedURL, err := signer.Sign(rawURL, time.Now().Add(1*time.Hour)) -// if err != nil { -// log.Fatalf("Failed to sign url, err: %s\n", err.Error()) -// } -// -func (s URLSigner) Sign(url string, expires time.Time) (string, error) { - scheme, cleanedURL, err := cleanURLScheme(url) - if err != nil { - return "", err - } - - resource, err := CreateResource(scheme, url) - if err != nil { - return "", err - } - - return signURL(scheme, cleanedURL, s.keyID, NewCannedPolicy(resource, expires), false, s.privKey) -} - -// SignWithPolicy will sign a URL with the Policy provided. The URL will be -// signed with the private key and Credential Key Pair Key ID previously provided to URLSigner. -// -// Use this signing method if you are looking to sign a URL with more than just -// the URL's expiry time, or reusing Policies between multiple URL signings. -// If only the expiry time is needed you can use Sign and provide just the -// URL's expiry time. A minimum of at least one policy statement is required for a signed URL. -// -// Note: It is not safe to use Polices between multiple signers concurrently -// -// Example: -// -// // Sign URL to be valid for 30 minutes from now, expires one hour from now, and -// // restricted to the 192.0.2.0/24 IP address range. -// policy := &sign.Policy{ -// Statements: []sign.Statement{ -// { -// Resource: rawURL, -// Condition: sign.Condition{ -// // Optional IP source address range -// IPAddress: &sign.IPAddress{SourceIP: "192.0.2.0/24"}, -// // Optional date URL is not valid until -// DateGreaterThan: &sign.AWSEpochTime{time.Now().Add(30 * time.Minute)}, -// // Required date the URL will expire after -// DateLessThan: &sign.AWSEpochTime{time.Now().Add(1 * time.Hour)}, -// }, -// }, -// }, -// } -// -// signer := sign.NewURLSigner(keyID, privKey) -// signedURL, err := signer.SignWithPolicy(rawURL, policy) -// if err != nil { -// log.Fatalf("Failed to sign url, err: %s\n", err.Error()) -// } -// -func (s URLSigner) SignWithPolicy(url string, p *Policy) (string, error) { - scheme, cleanedURL, err := cleanURLScheme(url) - if err != nil { - return "", err - } - - return signURL(scheme, cleanedURL, s.keyID, p, true, s.privKey) -} - -func signURL(scheme, url, keyID string, p *Policy, customPolicy bool, privKey *rsa.PrivateKey) (string, error) { - // Validation URL elements - if err := validateURL(url); err != nil { - return "", err - } - - b64Signature, b64Policy, err := p.Sign(privKey) - if err != nil { - return "", err - } - - // build and return signed URL - builtURL := buildSignedURL(url, keyID, p, customPolicy, b64Policy, b64Signature) - if scheme == "rtmp" { - return buildRTMPURL(builtURL) - } - - return builtURL, nil -} - -func buildSignedURL(baseURL, keyID string, p *Policy, customPolicy bool, b64Policy, b64Signature []byte) string { - pred := "?" - if strings.Contains(baseURL, "?") { - pred = "&" - } - signedURL := baseURL + pred - - if customPolicy { - signedURL += "Policy=" + string(b64Policy) - } else { - signedURL += fmt.Sprintf("Expires=%d", p.Statements[0].Condition.DateLessThan.UTC().Unix()) - } - signedURL += fmt.Sprintf("&Signature=%s&Key-Pair-Id=%s", string(b64Signature), keyID) - - return signedURL -} - -func buildRTMPURL(u string) (string, error) { - parsed, err := url.Parse(u) - if err != nil { - return "", fmt.Errorf("unable to parse rtmp signed URL, err: %s", err) - } - - rtmpURL := strings.TrimLeft(parsed.Path, "/") - if parsed.RawQuery != "" { - rtmpURL = fmt.Sprintf("%s?%s", rtmpURL, parsed.RawQuery) - } - - return rtmpURL, nil -} - -func cleanURLScheme(u string) (scheme, cleanedURL string, err error) { - parts := strings.SplitN(u, "://", 2) - if len(parts) != 2 { - return "", "", fmt.Errorf("invalid URL, missing scheme and domain/path") - } - scheme = strings.Replace(parts[0], "*", "", 1) - cleanedURL = fmt.Sprintf("%s://%s", scheme, parts[1]) - - return strings.ToLower(scheme), cleanedURL, nil -} - -var illegalQueryParms = []string{"Expires", "Policy", "Signature", "Key-Pair-Id"} - -func validateURL(u string) error { - parsed, err := url.Parse(u) - if err != nil { - return fmt.Errorf("unable to parse URL, err: %s", err.Error()) - } - - if parsed.Scheme == "" { - return fmt.Errorf("URL missing valid scheme, %s", u) - } - - q := parsed.Query() - for _, p := range illegalQueryParms { - if _, ok := q[p]; ok { - return fmt.Errorf("%s cannot be a query parameter for a signed URL", p) - } - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/api.go deleted file mode 100644 index 5132954f..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ /dev/null @@ -1,9527 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. - -// Package s3 provides a client for Amazon Simple Storage Service. -package s3 - -import ( - "fmt" - "io" - "time" - - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/restxml" -) - -const opAbortMultipartUpload = "AbortMultipartUpload" - -// AbortMultipartUploadRequest generates a "aws/request.Request" representing the -// client's request for the AbortMultipartUpload operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AbortMultipartUpload method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the AbortMultipartUploadRequest method. -// req, resp := client.AbortMultipartUploadRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { - op := &request.Operation{ - Name: opAbortMultipartUpload, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &AbortMultipartUploadInput{} - } - - req = c.newRequest(op, input, output) - output = &AbortMultipartUploadOutput{} - req.Data = output - return -} - -// Aborts a multipart upload. -// -// To verify that all parts have been removed, so you don't get charged for -// the part storage, you should call the List Parts operation and ensure the -// parts list is empty. -func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { - req, out := c.AbortMultipartUploadRequest(input) - err := req.Send() - return out, err -} - -const opCompleteMultipartUpload = "CompleteMultipartUpload" - -// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the -// client's request for the CompleteMultipartUpload operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CompleteMultipartUpload method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the CompleteMultipartUploadRequest method. -// req, resp := client.CompleteMultipartUploadRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { - op := &request.Operation{ - Name: opCompleteMultipartUpload, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &CompleteMultipartUploadInput{} - } - - req = c.newRequest(op, input, output) - output = &CompleteMultipartUploadOutput{} - req.Data = output - return -} - -// Completes a multipart upload by assembling previously uploaded parts. -func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { - req, out := c.CompleteMultipartUploadRequest(input) - err := req.Send() - return out, err -} - -const opCopyObject = "CopyObject" - -// CopyObjectRequest generates a "aws/request.Request" representing the -// client's request for the CopyObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CopyObject method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the CopyObjectRequest method. -// req, resp := client.CopyObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { - op := &request.Operation{ - Name: opCopyObject, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &CopyObjectInput{} - } - - req = c.newRequest(op, input, output) - output = &CopyObjectOutput{} - req.Data = output - return -} - -// Creates a copy of an object that is already stored in Amazon S3. -func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { - req, out := c.CopyObjectRequest(input) - err := req.Send() - return out, err -} - -const opCreateBucket = "CreateBucket" - -// CreateBucketRequest generates a "aws/request.Request" representing the -// client's request for the CreateBucket operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateBucket method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the CreateBucketRequest method. -// req, resp := client.CreateBucketRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { - op := &request.Operation{ - Name: opCreateBucket, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}", - } - - if input == nil { - input = &CreateBucketInput{} - } - - req = c.newRequest(op, input, output) - output = &CreateBucketOutput{} - req.Data = output - return -} - -// Creates a new bucket. -func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { - req, out := c.CreateBucketRequest(input) - err := req.Send() - return out, err -} - -const opCreateMultipartUpload = "CreateMultipartUpload" - -// CreateMultipartUploadRequest generates a "aws/request.Request" representing the -// client's request for the CreateMultipartUpload operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateMultipartUpload method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the CreateMultipartUploadRequest method. -// req, resp := client.CreateMultipartUploadRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { - op := &request.Operation{ - Name: opCreateMultipartUpload, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}?uploads", - } - - if input == nil { - input = &CreateMultipartUploadInput{} - } - - req = c.newRequest(op, input, output) - output = &CreateMultipartUploadOutput{} - req.Data = output - return -} - -// Initiates a multipart upload and returns an upload ID. -// -// Note: After you initiate multipart upload and upload one or more parts, you -// must either complete or abort multipart upload in order to stop getting charged -// for storage of the uploaded parts. Only after you either complete or abort -// multipart upload, Amazon S3 frees up the parts storage and stops charging -// you for the parts storage. -func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { - req, out := c.CreateMultipartUploadRequest(input) - err := req.Send() - return out, err -} - -const opDeleteBucket = "DeleteBucket" - -// DeleteBucketRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucket operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucket method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the DeleteBucketRequest method. -// req, resp := client.DeleteBucketRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { - op := &request.Operation{ - Name: opDeleteBucket, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}", - } - - if input == nil { - input = &DeleteBucketInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteBucketOutput{} - req.Data = output - return -} - -// Deletes the bucket. All objects (including all object versions and Delete -// Markers) in the bucket must be deleted before the bucket itself can be deleted. -func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { - req, out := c.DeleteBucketRequest(input) - err := req.Send() - return out, err -} - -const opDeleteBucketCors = "DeleteBucketCors" - -// DeleteBucketCorsRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketCors operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketCors method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the DeleteBucketCorsRequest method. -// req, resp := client.DeleteBucketCorsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { - op := &request.Operation{ - Name: opDeleteBucketCors, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?cors", - } - - if input == nil { - input = &DeleteBucketCorsInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteBucketCorsOutput{} - req.Data = output - return -} - -// Deletes the cors configuration information set for the bucket. -func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { - req, out := c.DeleteBucketCorsRequest(input) - err := req.Send() - return out, err -} - -const opDeleteBucketLifecycle = "DeleteBucketLifecycle" - -// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketLifecycle operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketLifecycle method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the DeleteBucketLifecycleRequest method. -// req, resp := client.DeleteBucketLifecycleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { - op := &request.Operation{ - Name: opDeleteBucketLifecycle, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?lifecycle", - } - - if input == nil { - input = &DeleteBucketLifecycleInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteBucketLifecycleOutput{} - req.Data = output - return -} - -// Deletes the lifecycle configuration from the bucket. -func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { - req, out := c.DeleteBucketLifecycleRequest(input) - err := req.Send() - return out, err -} - -const opDeleteBucketPolicy = "DeleteBucketPolicy" - -// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketPolicy method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the DeleteBucketPolicyRequest method. -// req, resp := client.DeleteBucketPolicyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { - op := &request.Operation{ - Name: opDeleteBucketPolicy, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?policy", - } - - if input == nil { - input = &DeleteBucketPolicyInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteBucketPolicyOutput{} - req.Data = output - return -} - -// Deletes the policy from the bucket. -func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { - req, out := c.DeleteBucketPolicyRequest(input) - err := req.Send() - return out, err -} - -const opDeleteBucketReplication = "DeleteBucketReplication" - -// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketReplication operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketReplication method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the DeleteBucketReplicationRequest method. -// req, resp := client.DeleteBucketReplicationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { - op := &request.Operation{ - Name: opDeleteBucketReplication, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?replication", - } - - if input == nil { - input = &DeleteBucketReplicationInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteBucketReplicationOutput{} - req.Data = output - return -} - -// Deletes the replication configuration from the bucket. -func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { - req, out := c.DeleteBucketReplicationRequest(input) - err := req.Send() - return out, err -} - -const opDeleteBucketTagging = "DeleteBucketTagging" - -// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketTagging method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the DeleteBucketTaggingRequest method. -// req, resp := client.DeleteBucketTaggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { - op := &request.Operation{ - Name: opDeleteBucketTagging, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?tagging", - } - - if input == nil { - input = &DeleteBucketTaggingInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteBucketTaggingOutput{} - req.Data = output - return -} - -// Deletes the tags from the bucket. -func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { - req, out := c.DeleteBucketTaggingRequest(input) - err := req.Send() - return out, err -} - -const opDeleteBucketWebsite = "DeleteBucketWebsite" - -// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketWebsite operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketWebsite method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the DeleteBucketWebsiteRequest method. -// req, resp := client.DeleteBucketWebsiteRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { - op := &request.Operation{ - Name: opDeleteBucketWebsite, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?website", - } - - if input == nil { - input = &DeleteBucketWebsiteInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteBucketWebsiteOutput{} - req.Data = output - return -} - -// This operation removes the website configuration from the bucket. -func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { - req, out := c.DeleteBucketWebsiteRequest(input) - err := req.Send() - return out, err -} - -const opDeleteObject = "DeleteObject" - -// DeleteObjectRequest generates a "aws/request.Request" representing the -// client's request for the DeleteObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteObject method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the DeleteObjectRequest method. -// req, resp := client.DeleteObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { - op := &request.Operation{ - Name: opDeleteObject, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &DeleteObjectInput{} - } - - req = c.newRequest(op, input, output) - output = &DeleteObjectOutput{} - req.Data = output - return -} - -// Removes the null version (if there is one) of an object and inserts a delete -// marker, which becomes the latest version of the object. If there isn't a -// null version, Amazon S3 does not remove any objects. -func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { - req, out := c.DeleteObjectRequest(input) - err := req.Send() - return out, err -} - -const opDeleteObjects = "DeleteObjects" - -// DeleteObjectsRequest generates a "aws/request.Request" representing the -// client's request for the DeleteObjects operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteObjects method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the DeleteObjectsRequest method. -// req, resp := client.DeleteObjectsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { - op := &request.Operation{ - Name: opDeleteObjects, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}?delete", - } - - if input == nil { - input = &DeleteObjectsInput{} - } - - req = c.newRequest(op, input, output) - output = &DeleteObjectsOutput{} - req.Data = output - return -} - -// This operation enables you to delete multiple objects from a bucket using -// a single HTTP request. You may specify up to 1000 keys. -func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { - req, out := c.DeleteObjectsRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" - -// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketAccelerateConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketAccelerateConfiguration method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketAccelerateConfigurationRequest method. -// req, resp := client.GetBucketAccelerateConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { - op := &request.Operation{ - Name: opGetBucketAccelerateConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?accelerate", - } - - if input == nil { - input = &GetBucketAccelerateConfigurationInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketAccelerateConfigurationOutput{} - req.Data = output - return -} - -// Returns the accelerate configuration of a bucket. -func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { - req, out := c.GetBucketAccelerateConfigurationRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketAcl = "GetBucketAcl" - -// GetBucketAclRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketAcl method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketAclRequest method. -// req, resp := client.GetBucketAclRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { - op := &request.Operation{ - Name: opGetBucketAcl, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?acl", - } - - if input == nil { - input = &GetBucketAclInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketAclOutput{} - req.Data = output - return -} - -// Gets the access control policy for the bucket. -func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { - req, out := c.GetBucketAclRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketCors = "GetBucketCors" - -// GetBucketCorsRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketCors operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketCors method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketCorsRequest method. -// req, resp := client.GetBucketCorsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { - op := &request.Operation{ - Name: opGetBucketCors, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?cors", - } - - if input == nil { - input = &GetBucketCorsInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketCorsOutput{} - req.Data = output - return -} - -// Returns the cors configuration for the bucket. -func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { - req, out := c.GetBucketCorsRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketLifecycle = "GetBucketLifecycle" - -// GetBucketLifecycleRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketLifecycle operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLifecycle method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketLifecycleRequest method. -// req, resp := client.GetBucketLifecycleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { - if c.Client.Config.Logger != nil { - c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") - } - op := &request.Operation{ - Name: opGetBucketLifecycle, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?lifecycle", - } - - if input == nil { - input = &GetBucketLifecycleInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketLifecycleOutput{} - req.Data = output - return -} - -// Deprecated, see the GetBucketLifecycleConfiguration operation. -func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { - req, out := c.GetBucketLifecycleRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" - -// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketLifecycleConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLifecycleConfiguration method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketLifecycleConfigurationRequest method. -// req, resp := client.GetBucketLifecycleConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { - op := &request.Operation{ - Name: opGetBucketLifecycleConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?lifecycle", - } - - if input == nil { - input = &GetBucketLifecycleConfigurationInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketLifecycleConfigurationOutput{} - req.Data = output - return -} - -// Returns the lifecycle configuration information set on the bucket. -func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { - req, out := c.GetBucketLifecycleConfigurationRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketLocation = "GetBucketLocation" - -// GetBucketLocationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketLocation operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLocation method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketLocationRequest method. -// req, resp := client.GetBucketLocationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { - op := &request.Operation{ - Name: opGetBucketLocation, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?location", - } - - if input == nil { - input = &GetBucketLocationInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketLocationOutput{} - req.Data = output - return -} - -// Returns the region the bucket resides in. -func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { - req, out := c.GetBucketLocationRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketLogging = "GetBucketLogging" - -// GetBucketLoggingRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketLogging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLogging method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketLoggingRequest method. -// req, resp := client.GetBucketLoggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { - op := &request.Operation{ - Name: opGetBucketLogging, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?logging", - } - - if input == nil { - input = &GetBucketLoggingInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketLoggingOutput{} - req.Data = output - return -} - -// Returns the logging status of a bucket and the permissions users have to -// view and modify that status. To use GET, you must be the bucket owner. -func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { - req, out := c.GetBucketLoggingRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketNotification = "GetBucketNotification" - -// GetBucketNotificationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketNotification operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketNotification method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketNotificationRequest method. -// req, resp := client.GetBucketNotificationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { - if c.Client.Config.Logger != nil { - c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") - } - op := &request.Operation{ - Name: opGetBucketNotification, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?notification", - } - - if input == nil { - input = &GetBucketNotificationConfigurationRequest{} - } - - req = c.newRequest(op, input, output) - output = &NotificationConfigurationDeprecated{} - req.Data = output - return -} - -// Deprecated, see the GetBucketNotificationConfiguration operation. -func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { - req, out := c.GetBucketNotificationRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" - -// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketNotificationConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketNotificationConfiguration method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketNotificationConfigurationRequest method. -// req, resp := client.GetBucketNotificationConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { - op := &request.Operation{ - Name: opGetBucketNotificationConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?notification", - } - - if input == nil { - input = &GetBucketNotificationConfigurationRequest{} - } - - req = c.newRequest(op, input, output) - output = &NotificationConfiguration{} - req.Data = output - return -} - -// Returns the notification configuration of a bucket. -func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { - req, out := c.GetBucketNotificationConfigurationRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketPolicy = "GetBucketPolicy" - -// GetBucketPolicyRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketPolicy method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketPolicyRequest method. -// req, resp := client.GetBucketPolicyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { - op := &request.Operation{ - Name: opGetBucketPolicy, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?policy", - } - - if input == nil { - input = &GetBucketPolicyInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketPolicyOutput{} - req.Data = output - return -} - -// Returns the policy of a specified bucket. -func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { - req, out := c.GetBucketPolicyRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketReplication = "GetBucketReplication" - -// GetBucketReplicationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketReplication operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketReplication method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketReplicationRequest method. -// req, resp := client.GetBucketReplicationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { - op := &request.Operation{ - Name: opGetBucketReplication, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?replication", - } - - if input == nil { - input = &GetBucketReplicationInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketReplicationOutput{} - req.Data = output - return -} - -// Deprecated, see the GetBucketReplicationConfiguration operation. -func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { - req, out := c.GetBucketReplicationRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketRequestPayment = "GetBucketRequestPayment" - -// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketRequestPayment operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketRequestPayment method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketRequestPaymentRequest method. -// req, resp := client.GetBucketRequestPaymentRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { - op := &request.Operation{ - Name: opGetBucketRequestPayment, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?requestPayment", - } - - if input == nil { - input = &GetBucketRequestPaymentInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketRequestPaymentOutput{} - req.Data = output - return -} - -// Returns the request payment configuration of a bucket. -func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { - req, out := c.GetBucketRequestPaymentRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketTagging = "GetBucketTagging" - -// GetBucketTaggingRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketTagging method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketTaggingRequest method. -// req, resp := client.GetBucketTaggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { - op := &request.Operation{ - Name: opGetBucketTagging, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?tagging", - } - - if input == nil { - input = &GetBucketTaggingInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketTaggingOutput{} - req.Data = output - return -} - -// Returns the tag set associated with the bucket. -func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { - req, out := c.GetBucketTaggingRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketVersioning = "GetBucketVersioning" - -// GetBucketVersioningRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketVersioning operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketVersioning method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketVersioningRequest method. -// req, resp := client.GetBucketVersioningRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { - op := &request.Operation{ - Name: opGetBucketVersioning, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?versioning", - } - - if input == nil { - input = &GetBucketVersioningInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketVersioningOutput{} - req.Data = output - return -} - -// Returns the versioning state of a bucket. -func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { - req, out := c.GetBucketVersioningRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketWebsite = "GetBucketWebsite" - -// GetBucketWebsiteRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketWebsite operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketWebsite method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetBucketWebsiteRequest method. -// req, resp := client.GetBucketWebsiteRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { - op := &request.Operation{ - Name: opGetBucketWebsite, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?website", - } - - if input == nil { - input = &GetBucketWebsiteInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketWebsiteOutput{} - req.Data = output - return -} - -// Returns the website configuration for a bucket. -func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { - req, out := c.GetBucketWebsiteRequest(input) - err := req.Send() - return out, err -} - -const opGetObject = "GetObject" - -// GetObjectRequest generates a "aws/request.Request" representing the -// client's request for the GetObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObject method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetObjectRequest method. -// req, resp := client.GetObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { - op := &request.Operation{ - Name: opGetObject, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &GetObjectInput{} - } - - req = c.newRequest(op, input, output) - output = &GetObjectOutput{} - req.Data = output - return -} - -// Retrieves objects from Amazon S3. -func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { - req, out := c.GetObjectRequest(input) - err := req.Send() - return out, err -} - -const opGetObjectAcl = "GetObjectAcl" - -// GetObjectAclRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObjectAcl method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetObjectAclRequest method. -// req, resp := client.GetObjectAclRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { - op := &request.Operation{ - Name: opGetObjectAcl, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?acl", - } - - if input == nil { - input = &GetObjectAclInput{} - } - - req = c.newRequest(op, input, output) - output = &GetObjectAclOutput{} - req.Data = output - return -} - -// Returns the access control list (ACL) of an object. -func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { - req, out := c.GetObjectAclRequest(input) - err := req.Send() - return out, err -} - -const opGetObjectTorrent = "GetObjectTorrent" - -// GetObjectTorrentRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectTorrent operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObjectTorrent method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the GetObjectTorrentRequest method. -// req, resp := client.GetObjectTorrentRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { - op := &request.Operation{ - Name: opGetObjectTorrent, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?torrent", - } - - if input == nil { - input = &GetObjectTorrentInput{} - } - - req = c.newRequest(op, input, output) - output = &GetObjectTorrentOutput{} - req.Data = output - return -} - -// Return torrent files from a bucket. -func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { - req, out := c.GetObjectTorrentRequest(input) - err := req.Send() - return out, err -} - -const opHeadBucket = "HeadBucket" - -// HeadBucketRequest generates a "aws/request.Request" representing the -// client's request for the HeadBucket operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the HeadBucket method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the HeadBucketRequest method. -// req, resp := client.HeadBucketRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { - op := &request.Operation{ - Name: opHeadBucket, - HTTPMethod: "HEAD", - HTTPPath: "/{Bucket}", - } - - if input == nil { - input = &HeadBucketInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &HeadBucketOutput{} - req.Data = output - return -} - -// This operation is useful to determine if a bucket exists and you have permission -// to access it. -func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { - req, out := c.HeadBucketRequest(input) - err := req.Send() - return out, err -} - -const opHeadObject = "HeadObject" - -// HeadObjectRequest generates a "aws/request.Request" representing the -// client's request for the HeadObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the HeadObject method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the HeadObjectRequest method. -// req, resp := client.HeadObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { - op := &request.Operation{ - Name: opHeadObject, - HTTPMethod: "HEAD", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &HeadObjectInput{} - } - - req = c.newRequest(op, input, output) - output = &HeadObjectOutput{} - req.Data = output - return -} - -// The HEAD operation retrieves metadata from an object without returning the -// object itself. This operation is useful if you're only interested in an object's -// metadata. To use HEAD, you must have READ access to the object. -func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { - req, out := c.HeadObjectRequest(input) - err := req.Send() - return out, err -} - -const opListBuckets = "ListBuckets" - -// ListBucketsRequest generates a "aws/request.Request" representing the -// client's request for the ListBuckets operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListBuckets method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the ListBucketsRequest method. -// req, resp := client.ListBucketsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { - op := &request.Operation{ - Name: opListBuckets, - HTTPMethod: "GET", - HTTPPath: "/", - } - - if input == nil { - input = &ListBucketsInput{} - } - - req = c.newRequest(op, input, output) - output = &ListBucketsOutput{} - req.Data = output - return -} - -// Returns a list of all buckets owned by the authenticated sender of the request. -func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { - req, out := c.ListBucketsRequest(input) - err := req.Send() - return out, err -} - -const opListMultipartUploads = "ListMultipartUploads" - -// ListMultipartUploadsRequest generates a "aws/request.Request" representing the -// client's request for the ListMultipartUploads operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListMultipartUploads method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the ListMultipartUploadsRequest method. -// req, resp := client.ListMultipartUploadsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { - op := &request.Operation{ - Name: opListMultipartUploads, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?uploads", - Paginator: &request.Paginator{ - InputTokens: []string{"KeyMarker", "UploadIdMarker"}, - OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, - LimitToken: "MaxUploads", - TruncationToken: "IsTruncated", - }, - } - - if input == nil { - input = &ListMultipartUploadsInput{} - } - - req = c.newRequest(op, input, output) - output = &ListMultipartUploadsOutput{} - req.Data = output - return -} - -// This operation lists in-progress multipart uploads. -func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { - req, out := c.ListMultipartUploadsRequest(input) - err := req.Send() - return out, err -} - -// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListMultipartUploads method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListMultipartUploads operation. -// pageNum := 0 -// err := client.ListMultipartUploadsPages(params, -// func(page *ListMultipartUploadsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(p *ListMultipartUploadsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListMultipartUploadsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListMultipartUploadsOutput), lastPage) - }) -} - -const opListObjectVersions = "ListObjectVersions" - -// ListObjectVersionsRequest generates a "aws/request.Request" representing the -// client's request for the ListObjectVersions operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListObjectVersions method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the ListObjectVersionsRequest method. -// req, resp := client.ListObjectVersionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { - op := &request.Operation{ - Name: opListObjectVersions, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?versions", - Paginator: &request.Paginator{ - InputTokens: []string{"KeyMarker", "VersionIdMarker"}, - OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, - LimitToken: "MaxKeys", - TruncationToken: "IsTruncated", - }, - } - - if input == nil { - input = &ListObjectVersionsInput{} - } - - req = c.newRequest(op, input, output) - output = &ListObjectVersionsOutput{} - req.Data = output - return -} - -// Returns metadata about all of the versions of objects in a bucket. -func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { - req, out := c.ListObjectVersionsRequest(input) - err := req.Send() - return out, err -} - -// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListObjectVersions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListObjectVersions operation. -// pageNum := 0 -// err := client.ListObjectVersionsPages(params, -// func(page *ListObjectVersionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(p *ListObjectVersionsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListObjectVersionsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListObjectVersionsOutput), lastPage) - }) -} - -const opListObjects = "ListObjects" - -// ListObjectsRequest generates a "aws/request.Request" representing the -// client's request for the ListObjects operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListObjects method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the ListObjectsRequest method. -// req, resp := client.ListObjectsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { - op := &request.Operation{ - Name: opListObjects, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"NextMarker || Contents[-1].Key"}, - LimitToken: "MaxKeys", - TruncationToken: "IsTruncated", - }, - } - - if input == nil { - input = &ListObjectsInput{} - } - - req = c.newRequest(op, input, output) - output = &ListObjectsOutput{} - req.Data = output - return -} - -// Returns some or all (up to 1000) of the objects in a bucket. You can use -// the request parameters as selection criteria to return a subset of the objects -// in a bucket. -func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { - req, out := c.ListObjectsRequest(input) - err := req.Send() - return out, err -} - -// ListObjectsPages iterates over the pages of a ListObjects operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListObjects method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListObjects operation. -// pageNum := 0 -// err := client.ListObjectsPages(params, -// func(page *ListObjectsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(p *ListObjectsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListObjectsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListObjectsOutput), lastPage) - }) -} - -const opListObjectsV2 = "ListObjectsV2" - -// ListObjectsV2Request generates a "aws/request.Request" representing the -// client's request for the ListObjectsV2 operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListObjectsV2 method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the ListObjectsV2Request method. -// req, resp := client.ListObjectsV2Request(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { - op := &request.Operation{ - Name: opListObjectsV2, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?list-type=2", - Paginator: &request.Paginator{ - InputTokens: []string{"ContinuationToken"}, - OutputTokens: []string{"NextContinuationToken"}, - LimitToken: "MaxKeys", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListObjectsV2Input{} - } - - req = c.newRequest(op, input, output) - output = &ListObjectsV2Output{} - req.Data = output - return -} - -// Returns some or all (up to 1000) of the objects in a bucket. You can use -// the request parameters as selection criteria to return a subset of the objects -// in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend -// you use this revised API for new application development. -func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { - req, out := c.ListObjectsV2Request(input) - err := req.Send() - return out, err -} - -// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListObjectsV2 method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListObjectsV2 operation. -// pageNum := 0 -// err := client.ListObjectsV2Pages(params, -// func(page *ListObjectsV2Output, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(p *ListObjectsV2Output, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListObjectsV2Request(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListObjectsV2Output), lastPage) - }) -} - -const opListParts = "ListParts" - -// ListPartsRequest generates a "aws/request.Request" representing the -// client's request for the ListParts operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListParts method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the ListPartsRequest method. -// req, resp := client.ListPartsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { - op := &request.Operation{ - Name: opListParts, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}", - Paginator: &request.Paginator{ - InputTokens: []string{"PartNumberMarker"}, - OutputTokens: []string{"NextPartNumberMarker"}, - LimitToken: "MaxParts", - TruncationToken: "IsTruncated", - }, - } - - if input == nil { - input = &ListPartsInput{} - } - - req = c.newRequest(op, input, output) - output = &ListPartsOutput{} - req.Data = output - return -} - -// Lists the parts that have been uploaded for a specific multipart upload. -func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { - req, out := c.ListPartsRequest(input) - err := req.Send() - return out, err -} - -// ListPartsPages iterates over the pages of a ListParts operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListParts method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListParts operation. -// pageNum := 0 -// err := client.ListPartsPages(params, -// func(page *ListPartsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *S3) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListPartsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListPartsOutput), lastPage) - }) -} - -const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" - -// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketAccelerateConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketAccelerateConfiguration method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketAccelerateConfigurationRequest method. -// req, resp := client.PutBucketAccelerateConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { - op := &request.Operation{ - Name: opPutBucketAccelerateConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?accelerate", - } - - if input == nil { - input = &PutBucketAccelerateConfigurationInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketAccelerateConfigurationOutput{} - req.Data = output - return -} - -// Sets the accelerate configuration of an existing bucket. -func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { - req, out := c.PutBucketAccelerateConfigurationRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketAcl = "PutBucketAcl" - -// PutBucketAclRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketAcl method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketAclRequest method. -// req, resp := client.PutBucketAclRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { - op := &request.Operation{ - Name: opPutBucketAcl, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?acl", - } - - if input == nil { - input = &PutBucketAclInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketAclOutput{} - req.Data = output - return -} - -// Sets the permissions on a bucket using access control lists (ACL). -func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { - req, out := c.PutBucketAclRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketCors = "PutBucketCors" - -// PutBucketCorsRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketCors operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketCors method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketCorsRequest method. -// req, resp := client.PutBucketCorsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { - op := &request.Operation{ - Name: opPutBucketCors, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?cors", - } - - if input == nil { - input = &PutBucketCorsInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketCorsOutput{} - req.Data = output - return -} - -// Sets the cors configuration for a bucket. -func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { - req, out := c.PutBucketCorsRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketLifecycle = "PutBucketLifecycle" - -// PutBucketLifecycleRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketLifecycle operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketLifecycle method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketLifecycleRequest method. -// req, resp := client.PutBucketLifecycleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { - if c.Client.Config.Logger != nil { - c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") - } - op := &request.Operation{ - Name: opPutBucketLifecycle, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?lifecycle", - } - - if input == nil { - input = &PutBucketLifecycleInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketLifecycleOutput{} - req.Data = output - return -} - -// Deprecated, see the PutBucketLifecycleConfiguration operation. -func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { - req, out := c.PutBucketLifecycleRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" - -// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketLifecycleConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketLifecycleConfiguration method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. -// req, resp := client.PutBucketLifecycleConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { - op := &request.Operation{ - Name: opPutBucketLifecycleConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?lifecycle", - } - - if input == nil { - input = &PutBucketLifecycleConfigurationInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketLifecycleConfigurationOutput{} - req.Data = output - return -} - -// Sets lifecycle configuration for your bucket. If a lifecycle configuration -// exists, it replaces it. -func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { - req, out := c.PutBucketLifecycleConfigurationRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketLogging = "PutBucketLogging" - -// PutBucketLoggingRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketLogging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketLogging method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketLoggingRequest method. -// req, resp := client.PutBucketLoggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { - op := &request.Operation{ - Name: opPutBucketLogging, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?logging", - } - - if input == nil { - input = &PutBucketLoggingInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketLoggingOutput{} - req.Data = output - return -} - -// Set the logging parameters for a bucket and to specify permissions for who -// can view and modify the logging parameters. To set the logging status of -// a bucket, you must be the bucket owner. -func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { - req, out := c.PutBucketLoggingRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketNotification = "PutBucketNotification" - -// PutBucketNotificationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketNotification operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketNotification method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketNotificationRequest method. -// req, resp := client.PutBucketNotificationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { - if c.Client.Config.Logger != nil { - c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") - } - op := &request.Operation{ - Name: opPutBucketNotification, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?notification", - } - - if input == nil { - input = &PutBucketNotificationInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketNotificationOutput{} - req.Data = output - return -} - -// Deprecated, see the PutBucketNotificationConfiguraiton operation. -func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { - req, out := c.PutBucketNotificationRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" - -// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketNotificationConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketNotificationConfiguration method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketNotificationConfigurationRequest method. -// req, resp := client.PutBucketNotificationConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { - op := &request.Operation{ - Name: opPutBucketNotificationConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?notification", - } - - if input == nil { - input = &PutBucketNotificationConfigurationInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketNotificationConfigurationOutput{} - req.Data = output - return -} - -// Enables notifications of specified events for a bucket. -func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { - req, out := c.PutBucketNotificationConfigurationRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketPolicy = "PutBucketPolicy" - -// PutBucketPolicyRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketPolicy method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketPolicyRequest method. -// req, resp := client.PutBucketPolicyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { - op := &request.Operation{ - Name: opPutBucketPolicy, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?policy", - } - - if input == nil { - input = &PutBucketPolicyInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketPolicyOutput{} - req.Data = output - return -} - -// Replaces a policy on a bucket. If the bucket already has a policy, the one -// in this request completely replaces it. -func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { - req, out := c.PutBucketPolicyRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketReplication = "PutBucketReplication" - -// PutBucketReplicationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketReplication operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketReplication method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketReplicationRequest method. -// req, resp := client.PutBucketReplicationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { - op := &request.Operation{ - Name: opPutBucketReplication, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?replication", - } - - if input == nil { - input = &PutBucketReplicationInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketReplicationOutput{} - req.Data = output - return -} - -// Creates a new replication configuration (or replaces an existing one, if -// present). -func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { - req, out := c.PutBucketReplicationRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketRequestPayment = "PutBucketRequestPayment" - -// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketRequestPayment operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketRequestPayment method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketRequestPaymentRequest method. -// req, resp := client.PutBucketRequestPaymentRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { - op := &request.Operation{ - Name: opPutBucketRequestPayment, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?requestPayment", - } - - if input == nil { - input = &PutBucketRequestPaymentInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketRequestPaymentOutput{} - req.Data = output - return -} - -// Sets the request payment configuration for a bucket. By default, the bucket -// owner pays for downloads from the bucket. This configuration parameter enables -// the bucket owner (only) to specify that the person requesting the download -// will be charged for the download. Documentation on requester pays buckets -// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html -func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { - req, out := c.PutBucketRequestPaymentRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketTagging = "PutBucketTagging" - -// PutBucketTaggingRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketTagging method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketTaggingRequest method. -// req, resp := client.PutBucketTaggingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { - op := &request.Operation{ - Name: opPutBucketTagging, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?tagging", - } - - if input == nil { - input = &PutBucketTaggingInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketTaggingOutput{} - req.Data = output - return -} - -// Sets the tags for a bucket. -func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { - req, out := c.PutBucketTaggingRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketVersioning = "PutBucketVersioning" - -// PutBucketVersioningRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketVersioning operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketVersioning method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketVersioningRequest method. -// req, resp := client.PutBucketVersioningRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { - op := &request.Operation{ - Name: opPutBucketVersioning, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?versioning", - } - - if input == nil { - input = &PutBucketVersioningInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketVersioningOutput{} - req.Data = output - return -} - -// Sets the versioning state of an existing bucket. To set the versioning state, -// you must be the bucket owner. -func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { - req, out := c.PutBucketVersioningRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketWebsite = "PutBucketWebsite" - -// PutBucketWebsiteRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketWebsite operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketWebsite method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutBucketWebsiteRequest method. -// req, resp := client.PutBucketWebsiteRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { - op := &request.Operation{ - Name: opPutBucketWebsite, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?website", - } - - if input == nil { - input = &PutBucketWebsiteInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketWebsiteOutput{} - req.Data = output - return -} - -// Set the website configuration for a bucket. -func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { - req, out := c.PutBucketWebsiteRequest(input) - err := req.Send() - return out, err -} - -const opPutObject = "PutObject" - -// PutObjectRequest generates a "aws/request.Request" representing the -// client's request for the PutObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutObject method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutObjectRequest method. -// req, resp := client.PutObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { - op := &request.Operation{ - Name: opPutObject, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &PutObjectInput{} - } - - req = c.newRequest(op, input, output) - output = &PutObjectOutput{} - req.Data = output - return -} - -// Adds an object to a bucket. -func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { - req, out := c.PutObjectRequest(input) - err := req.Send() - return out, err -} - -const opPutObjectAcl = "PutObjectAcl" - -// PutObjectAclRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutObjectAcl method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the PutObjectAclRequest method. -// req, resp := client.PutObjectAclRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { - op := &request.Operation{ - Name: opPutObjectAcl, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}?acl", - } - - if input == nil { - input = &PutObjectAclInput{} - } - - req = c.newRequest(op, input, output) - output = &PutObjectAclOutput{} - req.Data = output - return -} - -// uses the acl subresource to set the access control list (ACL) permissions -// for an object that already exists in a bucket -func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { - req, out := c.PutObjectAclRequest(input) - err := req.Send() - return out, err -} - -const opRestoreObject = "RestoreObject" - -// RestoreObjectRequest generates a "aws/request.Request" representing the -// client's request for the RestoreObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the RestoreObject method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the RestoreObjectRequest method. -// req, resp := client.RestoreObjectRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { - op := &request.Operation{ - Name: opRestoreObject, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}?restore", - } - - if input == nil { - input = &RestoreObjectInput{} - } - - req = c.newRequest(op, input, output) - output = &RestoreObjectOutput{} - req.Data = output - return -} - -// Restores an archived copy of an object back into Amazon S3 -func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { - req, out := c.RestoreObjectRequest(input) - err := req.Send() - return out, err -} - -const opUploadPart = "UploadPart" - -// UploadPartRequest generates a "aws/request.Request" representing the -// client's request for the UploadPart operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the UploadPart method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the UploadPartRequest method. -// req, resp := client.UploadPartRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { - op := &request.Operation{ - Name: opUploadPart, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &UploadPartInput{} - } - - req = c.newRequest(op, input, output) - output = &UploadPartOutput{} - req.Data = output - return -} - -// Uploads a part in a multipart upload. -// -// Note: After you initiate multipart upload and upload one or more parts, you -// must either complete or abort multipart upload in order to stop getting charged -// for storage of the uploaded parts. Only after you either complete or abort -// multipart upload, Amazon S3 frees up the parts storage and stops charging -// you for the parts storage. -func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { - req, out := c.UploadPartRequest(input) - err := req.Send() - return out, err -} - -const opUploadPartCopy = "UploadPartCopy" - -// UploadPartCopyRequest generates a "aws/request.Request" representing the -// client's request for the UploadPartCopy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. -// -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the UploadPartCopy method directly -// instead. -// -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. -// -// // Example sending a request using the UploadPartCopyRequest method. -// req, resp := client.UploadPartCopyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { - op := &request.Operation{ - Name: opUploadPartCopy, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &UploadPartCopyInput{} - } - - req = c.newRequest(op, input, output) - output = &UploadPartCopyOutput{} - req.Data = output - return -} - -// Uploads a part by copying data from an existing object as data source. -func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { - req, out := c.UploadPartCopyRequest(input) - err := req.Send() - return out, err -} - -// Specifies the days since the initiation of an Incomplete Multipart Upload -// that Lifecycle will wait before permanently removing all parts of the upload. -type AbortIncompleteMultipartUpload struct { - _ struct{} `type:"structure"` - - // Indicates the number of days that must pass since initiation for Lifecycle - // to abort an Incomplete Multipart Upload. - DaysAfterInitiation *int64 `type:"integer"` -} - -// String returns the string representation -func (s AbortIncompleteMultipartUpload) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AbortIncompleteMultipartUpload) GoString() string { - return s.String() -} - -type AbortMultipartUploadInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` -} - -// String returns the string representation -func (s AbortMultipartUploadInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AbortMultipartUploadInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AbortMultipartUploadInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type AbortMultipartUploadOutput struct { - _ struct{} `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation -func (s AbortMultipartUploadOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AbortMultipartUploadOutput) GoString() string { - return s.String() -} - -type AccelerateConfiguration struct { - _ struct{} `type:"structure"` - - // The accelerate configuration of the bucket. - Status *string `type:"string" enum:"BucketAccelerateStatus"` -} - -// String returns the string representation -func (s AccelerateConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AccelerateConfiguration) GoString() string { - return s.String() -} - -type AccessControlPolicy struct { - _ struct{} `type:"structure"` - - // A list of grants. - Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` - - Owner *Owner `type:"structure"` -} - -// String returns the string representation -func (s AccessControlPolicy) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AccessControlPolicy) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AccessControlPolicy) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"} - if s.Grants != nil { - for i, v := range s.Grants { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type Bucket struct { - _ struct{} `type:"structure"` - - // Date the bucket was created. - CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - // The name of the bucket. - Name *string `type:"string"` -} - -// String returns the string representation -func (s Bucket) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Bucket) GoString() string { - return s.String() -} - -type BucketLifecycleConfiguration struct { - _ struct{} `type:"structure"` - - Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation -func (s BucketLifecycleConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BucketLifecycleConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *BucketLifecycleConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"} - if s.Rules == nil { - invalidParams.Add(request.NewErrParamRequired("Rules")) - } - if s.Rules != nil { - for i, v := range s.Rules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type BucketLoggingStatus struct { - _ struct{} `type:"structure"` - - LoggingEnabled *LoggingEnabled `type:"structure"` -} - -// String returns the string representation -func (s BucketLoggingStatus) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BucketLoggingStatus) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *BucketLoggingStatus) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"} - if s.LoggingEnabled != nil { - if err := s.LoggingEnabled.Validate(); err != nil { - invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type CORSConfiguration struct { - _ struct{} `type:"structure"` - - CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation -func (s CORSConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CORSConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CORSConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"} - if s.CORSRules == nil { - invalidParams.Add(request.NewErrParamRequired("CORSRules")) - } - if s.CORSRules != nil { - for i, v := range s.CORSRules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type CORSRule struct { - _ struct{} `type:"structure"` - - // Specifies which headers are allowed in a pre-flight OPTIONS request. - AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` - - // Identifies HTTP methods that the domain/origin specified in the rule is allowed - // to execute. - AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` - - // One or more origins you want customers to be able to access the bucket from. - AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` - - // One or more headers in the response that you want customers to be able to - // access from their applications (for example, from a JavaScript XMLHttpRequest - // object). - ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` - - // The time in seconds that your browser is to cache the preflight response - // for the specified resource. - MaxAgeSeconds *int64 `type:"integer"` -} - -// String returns the string representation -func (s CORSRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CORSRule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CORSRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CORSRule"} - if s.AllowedMethods == nil { - invalidParams.Add(request.NewErrParamRequired("AllowedMethods")) - } - if s.AllowedOrigins == nil { - invalidParams.Add(request.NewErrParamRequired("AllowedOrigins")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type CloudFunctionConfiguration struct { - _ struct{} `type:"structure"` - - CloudFunction *string `type:"string"` - - // Bucket event for which to send notifications. - Event *string `deprecated:"true" type:"string" enum:"Event"` - - Events []*string `locationName:"Event" type:"list" flattened:"true"` - - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - InvocationRole *string `type:"string"` -} - -// String returns the string representation -func (s CloudFunctionConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CloudFunctionConfiguration) GoString() string { - return s.String() -} - -type CommonPrefix struct { - _ struct{} `type:"structure"` - - Prefix *string `type:"string"` -} - -// String returns the string representation -func (s CommonPrefix) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CommonPrefix) GoString() string { - return s.String() -} - -type CompleteMultipartUploadInput struct { - _ struct{} `type:"structure" payload:"MultipartUpload"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` -} - -// String returns the string representation -func (s CompleteMultipartUploadInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CompleteMultipartUploadInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CompleteMultipartUploadInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type CompleteMultipartUploadOutput struct { - _ struct{} `type:"structure"` - - Bucket *string `type:"string"` - - // Entity tag of the object. - ETag *string `type:"string"` - - // If the object expiration is configured, this will contain the expiration - // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - Key *string `min:"1" type:"string"` - - Location *string `type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // Version of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation -func (s CompleteMultipartUploadOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CompleteMultipartUploadOutput) GoString() string { - return s.String() -} - -type CompletedMultipartUpload struct { - _ struct{} `type:"structure"` - - Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s CompletedMultipartUpload) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CompletedMultipartUpload) GoString() string { - return s.String() -} - -type CompletedPart struct { - _ struct{} `type:"structure"` - - // Entity tag returned when the part was uploaded. - ETag *string `type:"string"` - - // Part number that identifies the part. This is a positive integer between - // 1 and 10,000. - PartNumber *int64 `type:"integer"` -} - -// String returns the string representation -func (s CompletedPart) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CompletedPart) GoString() string { - return s.String() -} - -type Condition struct { - _ struct{} `type:"structure"` - - // The HTTP error code when the redirect is applied. In the event of an error, - // if the error code equals this value, then the specified redirect is applied. - // Required when parent element Condition is specified and sibling KeyPrefixEquals - // is not specified. If both are specified, then both must be true for the redirect - // to be applied. - HttpErrorCodeReturnedEquals *string `type:"string"` - - // The object key name prefix when the redirect is applied. For example, to - // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. - // To redirect request for all pages with the prefix docs/, the key prefix will - // be /docs, which identifies all objects in the docs/ folder. Required when - // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals - // is not specified. If both conditions are specified, both must be true for - // the redirect to be applied. - KeyPrefixEquals *string `type:"string"` -} - -// String returns the string representation -func (s Condition) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Condition) GoString() string { - return s.String() -} - -type CopyObjectInput struct { - _ struct{} `type:"structure"` - - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // The name of the source bucket and key name of the source object, separated - // by a slash (/). Must be URL-encoded. - CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` - - // Copies the object if its entity tag (ETag) matches the specified tag. - CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` - - // Copies the object if it has been modified since the specified time. - CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` - - // Copies the object if its entity tag (ETag) is different than the specified - // ETag. - CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` - - // Copies the object if it hasn't been modified since the specified time. - CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` - - // Specifies the algorithm to use when decrypting the source object (e.g., AES256). - CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt - // the source object. The encryption key provided in this header must be one - // that was used when the source object was created. - CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` - - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to read the object data and its metadata. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the object ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to write the ACL for the applicable object. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // Specifies whether the metadata is copied from the source object or replaced - // with metadata provided in the request. - MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // The type of storage to use for the object. Defaults to 'STANDARD'. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` -} - -// String returns the string representation -func (s CopyObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CopyObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CopyObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.CopySource == nil { - invalidParams.Add(request.NewErrParamRequired("CopySource")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type CopyObjectOutput struct { - _ struct{} `type:"structure" payload:"CopyObjectResult"` - - CopyObjectResult *CopyObjectResult `type:"structure"` - - CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` - - // If the object expiration is configured, the response includes this header. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // Version ID of the newly created copy. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation -func (s CopyObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CopyObjectOutput) GoString() string { - return s.String() -} - -type CopyObjectResult struct { - _ struct{} `type:"structure"` - - ETag *string `type:"string"` - - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` -} - -// String returns the string representation -func (s CopyObjectResult) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CopyObjectResult) GoString() string { - return s.String() -} - -type CopyPartResult struct { - _ struct{} `type:"structure"` - - // Entity tag of the object. - ETag *string `type:"string"` - - // Date and time at which the object was uploaded. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` -} - -// String returns the string representation -func (s CopyPartResult) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CopyPartResult) GoString() string { - return s.String() -} - -type CreateBucketConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies the region where the bucket will be created. If you don't specify - // a region, the bucket will be created in US Standard. - LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` -} - -// String returns the string representation -func (s CreateBucketConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateBucketConfiguration) GoString() string { - return s.String() -} - -type CreateBucketInput struct { - _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` - - // The canned ACL to apply to the bucket. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure"` - - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to list the objects in the bucket. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the bucket ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to create, overwrite, and delete any object in the bucket. - GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` - - // Allows grantee to write the ACL for the applicable bucket. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` -} - -// String returns the string representation -func (s CreateBucketInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateBucketInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateBucketInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type CreateBucketOutput struct { - _ struct{} `type:"structure"` - - Location *string `location:"header" locationName:"Location" type:"string"` -} - -// String returns the string representation -func (s CreateBucketOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateBucketOutput) GoString() string { - return s.String() -} - -type CreateMultipartUploadInput struct { - _ struct{} `type:"structure"` - - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` - - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to read the object data and its metadata. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the object ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to write the ACL for the applicable object. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // The type of storage to use for the object. Defaults to 'STANDARD'. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` -} - -// String returns the string representation -func (s CreateMultipartUploadInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateMultipartUploadInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateMultipartUploadInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type CreateMultipartUploadOutput struct { - _ struct{} `type:"structure"` - - // Date when multipart upload will become eligible for abort operation by lifecycle. - AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` - - // Id of the lifecycle rule that makes a multipart upload eligible for abort - // operation. - AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` - - // Name of the bucket to which the multipart upload was initiated. - Bucket *string `locationName:"Bucket" type:"string"` - - // Object key for which the multipart upload was initiated. - Key *string `min:"1" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // ID for the initiated multipart upload. - UploadId *string `type:"string"` -} - -// String returns the string representation -func (s CreateMultipartUploadOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateMultipartUploadOutput) GoString() string { - return s.String() -} - -type Delete struct { - _ struct{} `type:"structure"` - - Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` - - // Element to enable quiet mode for the request. When you add this element, - // you must set its value to true. - Quiet *bool `type:"boolean"` -} - -// String returns the string representation -func (s Delete) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Delete) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Delete) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Delete"} - if s.Objects == nil { - invalidParams.Add(request.NewErrParamRequired("Objects")) - } - if s.Objects != nil { - for i, v := range s.Objects { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DeleteBucketCorsInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketCorsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketCorsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketCorsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DeleteBucketCorsOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketCorsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketCorsOutput) GoString() string { - return s.String() -} - -type DeleteBucketInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DeleteBucketLifecycleInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketLifecycleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketLifecycleInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketLifecycleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DeleteBucketLifecycleOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketLifecycleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketLifecycleOutput) GoString() string { - return s.String() -} - -type DeleteBucketOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketOutput) GoString() string { - return s.String() -} - -type DeleteBucketPolicyInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DeleteBucketPolicyOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketPolicyOutput) GoString() string { - return s.String() -} - -type DeleteBucketReplicationInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketReplicationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketReplicationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketReplicationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DeleteBucketReplicationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketReplicationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketReplicationOutput) GoString() string { - return s.String() -} - -type DeleteBucketTaggingInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketTaggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DeleteBucketTaggingOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketTaggingOutput) GoString() string { - return s.String() -} - -type DeleteBucketWebsiteInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteBucketWebsiteInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketWebsiteInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketWebsiteInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DeleteBucketWebsiteOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketWebsiteOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketWebsiteOutput) GoString() string { - return s.String() -} - -type DeleteMarkerEntry struct { - _ struct{} `type:"structure"` - - // Specifies whether the object is (true) or is not (false) the latest version - // of an object. - IsLatest *bool `type:"boolean"` - - // The object key. - Key *string `min:"1" type:"string"` - - // Date and time the object was last modified. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - Owner *Owner `type:"structure"` - - // Version ID of an object. - VersionId *string `type:"string"` -} - -// String returns the string representation -func (s DeleteMarkerEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteMarkerEntry) GoString() string { - return s.String() -} - -type DeleteObjectInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. - MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s DeleteObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DeleteObjectOutput struct { - _ struct{} `type:"structure"` - - // Specifies whether the versioned object that was permanently deleted was (true) - // or was not (false) a delete marker. - DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // Returns the version ID of the delete marker created as a result of the DELETE - // operation. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation -func (s DeleteObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteObjectOutput) GoString() string { - return s.String() -} - -type DeleteObjectsInput struct { - _ struct{} `type:"structure" payload:"Delete"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - Delete *Delete `locationName:"Delete" type:"structure" required:"true"` - - // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. - MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` -} - -// String returns the string representation -func (s DeleteObjectsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteObjectsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteObjectsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Delete == nil { - invalidParams.Add(request.NewErrParamRequired("Delete")) - } - if s.Delete != nil { - if err := s.Delete.Validate(); err != nil { - invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DeleteObjectsOutput struct { - _ struct{} `type:"structure"` - - Deleted []*DeletedObject `type:"list" flattened:"true"` - - Errors []*Error `locationName:"Error" type:"list" flattened:"true"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation -func (s DeleteObjectsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteObjectsOutput) GoString() string { - return s.String() -} - -type DeletedObject struct { - _ struct{} `type:"structure"` - - DeleteMarker *bool `type:"boolean"` - - DeleteMarkerVersionId *string `type:"string"` - - Key *string `min:"1" type:"string"` - - VersionId *string `type:"string"` -} - -// String returns the string representation -func (s DeletedObject) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeletedObject) GoString() string { - return s.String() -} - -type Destination struct { - _ struct{} `type:"structure"` - - // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store - // replicas of the object identified by the rule. - Bucket *string `type:"string" required:"true"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"StorageClass"` -} - -// String returns the string representation -func (s Destination) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Destination) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Destination) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Destination"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type Error struct { - _ struct{} `type:"structure"` - - Code *string `type:"string"` - - Key *string `min:"1" type:"string"` - - Message *string `type:"string"` - - VersionId *string `type:"string"` -} - -// String returns the string representation -func (s Error) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Error) GoString() string { - return s.String() -} - -type ErrorDocument struct { - _ struct{} `type:"structure"` - - // The object key name to use when a 4XX class error occurs. - Key *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s ErrorDocument) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ErrorDocument) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ErrorDocument) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// Container for key value pair that defines the criteria for the filter rule. -type FilterRule struct { - _ struct{} `type:"structure"` - - // Object key name prefix or suffix identifying one or more objects to which - // the filtering rule applies. Maximum prefix length can be up to 1,024 characters. - // Overlapping prefixes and suffixes are not supported. For more information, - // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. - Name *string `type:"string" enum:"FilterRuleName"` - - Value *string `type:"string"` -} - -// String returns the string representation -func (s FilterRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FilterRule) GoString() string { - return s.String() -} - -type GetBucketAccelerateConfigurationInput struct { - _ struct{} `type:"structure"` - - // Name of the bucket for which the accelerate configuration is retrieved. - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketAccelerateConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketAccelerateConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketAccelerateConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type GetBucketAccelerateConfigurationOutput struct { - _ struct{} `type:"structure"` - - // The accelerate configuration of the bucket. - Status *string `type:"string" enum:"BucketAccelerateStatus"` -} - -// String returns the string representation -func (s GetBucketAccelerateConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketAccelerateConfigurationOutput) GoString() string { - return s.String() -} - -type GetBucketAclInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketAclInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketAclInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type GetBucketAclOutput struct { - _ struct{} `type:"structure"` - - // A list of grants. - Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` - - Owner *Owner `type:"structure"` -} - -// String returns the string representation -func (s GetBucketAclOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketAclOutput) GoString() string { - return s.String() -} - -type GetBucketCorsInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketCorsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketCorsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketCorsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type GetBucketCorsOutput struct { - _ struct{} `type:"structure"` - - CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s GetBucketCorsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketCorsOutput) GoString() string { - return s.String() -} - -type GetBucketLifecycleConfigurationInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketLifecycleConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLifecycleConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLifecycleConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type GetBucketLifecycleConfigurationOutput struct { - _ struct{} `type:"structure"` - - Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s GetBucketLifecycleConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLifecycleConfigurationOutput) GoString() string { - return s.String() -} - -type GetBucketLifecycleInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketLifecycleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLifecycleInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLifecycleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type GetBucketLifecycleOutput struct { - _ struct{} `type:"structure"` - - Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s GetBucketLifecycleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLifecycleOutput) GoString() string { - return s.String() -} - -type GetBucketLocationInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketLocationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLocationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLocationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type GetBucketLocationOutput struct { - _ struct{} `type:"structure"` - - LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` -} - -// String returns the string representation -func (s GetBucketLocationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLocationOutput) GoString() string { - return s.String() -} - -type GetBucketLoggingInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketLoggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLoggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLoggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type GetBucketLoggingOutput struct { - _ struct{} `type:"structure"` - - LoggingEnabled *LoggingEnabled `type:"structure"` -} - -// String returns the string representation -func (s GetBucketLoggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLoggingOutput) GoString() string { - return s.String() -} - -type GetBucketNotificationConfigurationRequest struct { - _ struct{} `type:"structure"` - - // Name of the bucket to get the notification configuration for. - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketNotificationConfigurationRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketNotificationConfigurationRequest) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketNotificationConfigurationRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type GetBucketPolicyInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type GetBucketPolicyOutput struct { - _ struct{} `type:"structure" payload:"Policy"` - - // The bucket policy as a JSON document. - Policy *string `type:"string"` -} - -// String returns the string representation -func (s GetBucketPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketPolicyOutput) GoString() string { - return s.String() -} - -type GetBucketReplicationInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketReplicationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketReplicationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketReplicationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type GetBucketReplicationOutput struct { - _ struct{} `type:"structure" payload:"ReplicationConfiguration"` - - // Container for replication rules. You can add as many as 1,000 rules. Total - // replication configuration size can be up to 2 MB. - ReplicationConfiguration *ReplicationConfiguration `type:"structure"` -} - -// String returns the string representation -func (s GetBucketReplicationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketReplicationOutput) GoString() string { - return s.String() -} - -type GetBucketRequestPaymentInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketRequestPaymentInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketRequestPaymentInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketRequestPaymentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type GetBucketRequestPaymentOutput struct { - _ struct{} `type:"structure"` - - // Specifies who pays for the download and request fees. - Payer *string `type:"string" enum:"Payer"` -} - -// String returns the string representation -func (s GetBucketRequestPaymentOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketRequestPaymentOutput) GoString() string { - return s.String() -} - -type GetBucketTaggingInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketTaggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type GetBucketTaggingOutput struct { - _ struct{} `type:"structure"` - - TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` -} - -// String returns the string representation -func (s GetBucketTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketTaggingOutput) GoString() string { - return s.String() -} - -type GetBucketVersioningInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketVersioningInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketVersioningInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketVersioningInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type GetBucketVersioningOutput struct { - _ struct{} `type:"structure"` - - // Specifies whether MFA delete is enabled in the bucket versioning configuration. - // This element is only returned if the bucket has been configured with MFA - // delete. If the bucket has never been so configured, this element is not returned. - MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` - - // The versioning state of the bucket. - Status *string `type:"string" enum:"BucketVersioningStatus"` -} - -// String returns the string representation -func (s GetBucketVersioningOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketVersioningOutput) GoString() string { - return s.String() -} - -type GetBucketWebsiteInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s GetBucketWebsiteInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketWebsiteInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketWebsiteInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type GetBucketWebsiteOutput struct { - _ struct{} `type:"structure"` - - ErrorDocument *ErrorDocument `type:"structure"` - - IndexDocument *IndexDocument `type:"structure"` - - RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` - - RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` -} - -// String returns the string representation -func (s GetBucketWebsiteOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketWebsiteOutput) GoString() string { - return s.String() -} - -type GetObjectAclInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s GetObjectAclInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectAclInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type GetObjectAclOutput struct { - _ struct{} `type:"structure"` - - // A list of grants. - Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` - - Owner *Owner `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation -func (s GetObjectAclOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectAclOutput) GoString() string { - return s.String() -} - -type GetObjectInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Return the object only if its entity tag (ETag) is the same as the one specified, - // otherwise return a 412 (precondition failed). - IfMatch *string `location:"header" locationName:"If-Match" type:"string"` - - // Return the object only if it has been modified since the specified time, - // otherwise return a 304 (not modified). - IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` - - // Return the object only if its entity tag (ETag) is different from the one - // specified, otherwise return a 304 (not modified). - IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` - - // Return the object only if it has not been modified since the specified time, - // otherwise return a 412 (precondition failed). - IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` - - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. - Range *string `location:"header" locationName:"Range" type:"string"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Sets the Cache-Control header of the response. - ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` - - // Sets the Content-Disposition header of the response - ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` - - // Sets the Content-Encoding header of the response. - ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` - - // Sets the Content-Language header of the response. - ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` - - // Sets the Content-Type header of the response. - ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` - - // Sets the Expires header of the response. - ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s GetObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type GetObjectOutput struct { - _ struct{} `type:"structure" payload:"Body"` - - AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` - - // Object data. - Body io.ReadCloser `type:"blob"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // Size of the body in bytes. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - - // The portion of the object returned in the response. - ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // Specifies whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. - DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` - - // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key value pairs - // providing object expiration information. The value of the rule-id is URL - // encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *string `location:"header" locationName:"Expires" type:"string"` - - // Last modified date of the object - LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // This is set to the number of metadata entries not returned in x-amz-meta - // headers. This can happen if you create metadata using an API like SOAP that - // supports more flexible metadata than the REST API. For example, using SOAP, - // you can create metadata whose values are not legal HTTP headers. - MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` - - ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // Provides information about object restoration operation and expiration time - // of the restored object copy. - Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // Version of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` -} - -// String returns the string representation -func (s GetObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectOutput) GoString() string { - return s.String() -} - -type GetObjectTorrentInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` -} - -// String returns the string representation -func (s GetObjectTorrentInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectTorrentInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectTorrentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type GetObjectTorrentOutput struct { - _ struct{} `type:"structure" payload:"Body"` - - Body io.ReadCloser `type:"blob"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation -func (s GetObjectTorrentOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectTorrentOutput) GoString() string { - return s.String() -} - -type Grant struct { - _ struct{} `type:"structure"` - - Grantee *Grantee `type:"structure"` - - // Specifies the permission given to the grantee. - Permission *string `type:"string" enum:"Permission"` -} - -// String returns the string representation -func (s Grant) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Grant) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Grant) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Grant"} - if s.Grantee != nil { - if err := s.Grantee.Validate(); err != nil { - invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type Grantee struct { - _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` - - // Screen name of the grantee. - DisplayName *string `type:"string"` - - // Email address of the grantee. - EmailAddress *string `type:"string"` - - // The canonical user ID of the grantee. - ID *string `type:"string"` - - // Type of grantee - Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` - - // URI of the grantee group. - URI *string `type:"string"` -} - -// String returns the string representation -func (s Grantee) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Grantee) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Grantee) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Grantee"} - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type HeadBucketInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s HeadBucketInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HeadBucketInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *HeadBucketInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type HeadBucketOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s HeadBucketOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HeadBucketOutput) GoString() string { - return s.String() -} - -type HeadObjectInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Return the object only if its entity tag (ETag) is the same as the one specified, - // otherwise return a 412 (precondition failed). - IfMatch *string `location:"header" locationName:"If-Match" type:"string"` - - // Return the object only if it has been modified since the specified time, - // otherwise return a 304 (not modified). - IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` - - // Return the object only if its entity tag (ETag) is different from the one - // specified, otherwise return a 304 (not modified). - IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` - - // Return the object only if it has not been modified since the specified time, - // otherwise return a 412 (precondition failed). - IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` - - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. - Range *string `location:"header" locationName:"Range" type:"string"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s HeadObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HeadObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *HeadObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type HeadObjectOutput struct { - _ struct{} `type:"structure"` - - AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // Size of the body in bytes. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // Specifies whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. - DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` - - // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key value pairs - // providing object expiration information. The value of the rule-id is URL - // encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *string `location:"header" locationName:"Expires" type:"string"` - - // Last modified date of the object - LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // This is set to the number of metadata entries not returned in x-amz-meta - // headers. This can happen if you create metadata using an API like SOAP that - // supports more flexible metadata than the REST API. For example, using SOAP, - // you can create metadata whose values are not legal HTTP headers. - MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` - - ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // Provides information about object restoration operation and expiration time - // of the restored object copy. - Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // Version of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` -} - -// String returns the string representation -func (s HeadObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HeadObjectOutput) GoString() string { - return s.String() -} - -type IndexDocument struct { - _ struct{} `type:"structure"` - - // A suffix that is appended to a request that is for a directory on the website - // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ - // the data that is returned will be for the object with the key name images/index.html) - // The suffix must not be empty and must not include a slash character. - Suffix *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s IndexDocument) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s IndexDocument) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *IndexDocument) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "IndexDocument"} - if s.Suffix == nil { - invalidParams.Add(request.NewErrParamRequired("Suffix")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type Initiator struct { - _ struct{} `type:"structure"` - - // Name of the Principal. - DisplayName *string `type:"string"` - - // If the principal is an AWS account, it provides the Canonical User ID. If - // the principal is an IAM User, it provides a user ARN value. - ID *string `type:"string"` -} - -// String returns the string representation -func (s Initiator) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Initiator) GoString() string { - return s.String() -} - -// Container for object key name prefix and suffix filtering rules. -type KeyFilter struct { - _ struct{} `type:"structure"` - - // A list of containers for key value pair that defines the criteria for the - // filter rule. - FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s KeyFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s KeyFilter) GoString() string { - return s.String() -} - -// Container for specifying the AWS Lambda notification configuration. -type LambdaFunctionConfiguration struct { - _ struct{} `type:"structure"` - - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - - // Container for object key name filtering rules. For information about key - // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. - Filter *NotificationConfigurationFilter `type:"structure"` - - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - // Lambda cloud function ARN that Amazon S3 can invoke when it detects events - // of the specified type. - LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` -} - -// String returns the string representation -func (s LambdaFunctionConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LambdaFunctionConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LambdaFunctionConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"} - if s.Events == nil { - invalidParams.Add(request.NewErrParamRequired("Events")) - } - if s.LambdaFunctionArn == nil { - invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type LifecycleConfiguration struct { - _ struct{} `type:"structure"` - - Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation -func (s LifecycleConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LifecycleConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LifecycleConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"} - if s.Rules == nil { - invalidParams.Add(request.NewErrParamRequired("Rules")) - } - if s.Rules != nil { - for i, v := range s.Rules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type LifecycleExpiration struct { - _ struct{} `type:"structure"` - - // Indicates at what date the object is to be moved or deleted. Should be in - // GMT ISO 8601 Format. - Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - // Indicates the lifetime, in days, of the objects that are subject to the rule. - // The value must be a non-zero positive integer. - Days *int64 `type:"integer"` - - // Indicates whether Amazon S3 will remove a delete marker with no noncurrent - // versions. If set to true, the delete marker will be expired; if set to false - // the policy takes no action. This cannot be specified with Days or Date in - // a Lifecycle Expiration Policy. - ExpiredObjectDeleteMarker *bool `type:"boolean"` -} - -// String returns the string representation -func (s LifecycleExpiration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LifecycleExpiration) GoString() string { - return s.String() -} - -type LifecycleRule struct { - _ struct{} `type:"structure"` - - // Specifies the days since the initiation of an Incomplete Multipart Upload - // that Lifecycle will wait before permanently removing all parts of the upload. - AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` - - Expiration *LifecycleExpiration `type:"structure"` - - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string `type:"string"` - - // Specifies when noncurrent object versions expire. Upon expiration, Amazon - // S3 permanently deletes the noncurrent object versions. You set this lifecycle - // configuration action on a bucket that has versioning enabled (or suspended) - // to request that Amazon S3 delete noncurrent object versions at a specific - // period in the object's lifetime. - NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` - - NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` - - // Prefix identifying one or more objects to which the rule applies. - Prefix *string `type:"string" required:"true"` - - // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule - // is not currently being applied. - Status *string `type:"string" required:"true" enum:"ExpirationStatus"` - - Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s LifecycleRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LifecycleRule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LifecycleRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"} - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) - } - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type ListBucketsInput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s ListBucketsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListBucketsInput) GoString() string { - return s.String() -} - -type ListBucketsOutput struct { - _ struct{} `type:"structure"` - - Buckets []*Bucket `locationNameList:"Bucket" type:"list"` - - Owner *Owner `type:"structure"` -} - -// String returns the string representation -func (s ListBucketsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListBucketsOutput) GoString() string { - return s.String() -} - -type ListMultipartUploadsInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Character you use to group keys. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in - // XML 1.0, you can add this parameter to request that Amazon S3 encode the - // keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // Together with upload-id-marker, this parameter specifies the multipart upload - // after which listing should begin. - KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` - - // Sets the maximum number of multipart uploads, from 1 to 1,000, to return - // in the response body. 1,000 is the maximum number of uploads that can be - // returned in a response. - MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` - - // Lists in-progress uploads only for those keys that begin with the specified - // prefix. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` - - // Together with key-marker, specifies the multipart upload after which listing - // should begin. If key-marker is not specified, the upload-id-marker parameter - // is ignored. - UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` -} - -// String returns the string representation -func (s ListMultipartUploadsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListMultipartUploadsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListMultipartUploadsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type ListMultipartUploadsOutput struct { - _ struct{} `type:"structure"` - - // Name of the bucket to which the multipart upload was initiated. - Bucket *string `type:"string"` - - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `type:"string" enum:"EncodingType"` - - // Indicates whether the returned list of multipart uploads is truncated. A - // value of true indicates that the list was truncated. The list can be truncated - // if the number of multipart uploads exceeds the limit allowed or specified - // by max uploads. - IsTruncated *bool `type:"boolean"` - - // The key at or after which the listing began. - KeyMarker *string `type:"string"` - - // Maximum number of multipart uploads that could have been included in the - // response. - MaxUploads *int64 `type:"integer"` - - // When a list is truncated, this element specifies the value that should be - // used for the key-marker request parameter in a subsequent request. - NextKeyMarker *string `type:"string"` - - // When a list is truncated, this element specifies the value that should be - // used for the upload-id-marker request parameter in a subsequent request. - NextUploadIdMarker *string `type:"string"` - - // When a prefix is provided in the request, this field contains the specified - // prefix. The result contains only keys starting with the specified prefix. - Prefix *string `type:"string"` - - // Upload ID after which listing began. - UploadIdMarker *string `type:"string"` - - Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s ListMultipartUploadsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListMultipartUploadsOutput) GoString() string { - return s.String() -} - -type ListObjectVersionsInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // A delimiter is a character you use to group keys. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in - // XML 1.0, you can add this parameter to request that Amazon S3 encode the - // keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // Specifies the key to start with when listing objects in a bucket. - KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` - - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. - MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` - - // Limits the response to keys that begin with the specified prefix. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` - - // Specifies the object version you want to start listing from. - VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` -} - -// String returns the string representation -func (s ListObjectVersionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListObjectVersionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListObjectVersionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type ListObjectVersionsOutput struct { - _ struct{} `type:"structure"` - - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` - - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `type:"string" enum:"EncodingType"` - - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. If your results were truncated, you can - // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker - // response parameters as a starting place in another request to return the - // rest of the results. - IsTruncated *bool `type:"boolean"` - - // Marks the last Key returned in a truncated response. - KeyMarker *string `type:"string"` - - MaxKeys *int64 `type:"integer"` - - Name *string `type:"string"` - - // Use this value for the key marker request parameter in a subsequent request. - NextKeyMarker *string `type:"string"` - - // Use this value for the next version id marker parameter in a subsequent request. - NextVersionIdMarker *string `type:"string"` - - Prefix *string `type:"string"` - - VersionIdMarker *string `type:"string"` - - Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s ListObjectVersionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListObjectVersionsOutput) GoString() string { - return s.String() -} - -type ListObjectsInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // A delimiter is a character you use to group keys. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in - // XML 1.0, you can add this parameter to request that Amazon S3 encode the - // keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // Specifies the key to start with when listing objects in a bucket. - Marker *string `location:"querystring" locationName:"marker" type:"string"` - - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. - MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` - - // Limits the response to keys that begin with the specified prefix. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` -} - -// String returns the string representation -func (s ListObjectsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListObjectsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListObjectsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type ListObjectsOutput struct { - _ struct{} `type:"structure"` - - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - Contents []*Object `type:"list" flattened:"true"` - - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `type:"string" enum:"EncodingType"` - - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. - IsTruncated *bool `type:"boolean"` - - Marker *string `type:"string"` - - MaxKeys *int64 `type:"integer"` - - Name *string `type:"string"` - - // When response is truncated (the IsTruncated element value in the response - // is true), you can use the key name in this field as marker in the subsequent - // request to get next set of objects. Amazon S3 lists objects in alphabetical - // order Note: This element is returned only if you have delimiter request parameter - // specified. If response does not include the NextMaker and it is truncated, - // you can use the value of the last Key in the response as the marker in the - // subsequent request to get the next set of object keys. - NextMarker *string `type:"string"` - - Prefix *string `type:"string"` -} - -// String returns the string representation -func (s ListObjectsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListObjectsOutput) GoString() string { - return s.String() -} - -type ListObjectsV2Input struct { - _ struct{} `type:"structure"` - - // Name of the bucket to list. - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // ContinuationToken indicates Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key - ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - - // A delimiter is a character you use to group keys. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // The owner field is not present in listV2 by default, if you want to return - // owner field with each key in the result then set the fetch owner field to - // true - FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` - - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. - MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` - - // Limits the response to keys that begin with the specified prefix. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` - - // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket - StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` -} - -// String returns the string representation -func (s ListObjectsV2Input) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListObjectsV2Input) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListObjectsV2Input) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type ListObjectsV2Output struct { - _ struct{} `type:"structure"` - - // CommonPrefixes contains all (if there are any) keys between Prefix and the - // next occurrence of the string specified by delimiter - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - // Metadata about each object returned. - Contents []*Object `type:"list" flattened:"true"` - - // ContinuationToken indicates Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key - ContinuationToken *string `type:"string"` - - // A delimiter is a character you use to group keys. - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `type:"string" enum:"EncodingType"` - - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. - IsTruncated *bool `type:"boolean"` - - // KeyCount is the number of keys returned with this request. KeyCount will - // always be less than equals to MaxKeys field. Say you ask for 50 keys, your - // result will include less than equals 50 keys - KeyCount *int64 `type:"integer"` - - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. - MaxKeys *int64 `type:"integer"` - - // Name of the bucket to list. - Name *string `type:"string"` - - // NextContinuationToken is sent when isTruncated is true which means there - // are more keys in the bucket that can be listed. The next list requests to - // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken - // is obfuscated and is not a real key - NextContinuationToken *string `type:"string"` - - // Limits the response to keys that begin with the specified prefix. - Prefix *string `type:"string"` - - // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket - StartAfter *string `type:"string"` -} - -// String returns the string representation -func (s ListObjectsV2Output) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListObjectsV2Output) GoString() string { - return s.String() -} - -type ListPartsInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Sets the maximum number of parts to return. - MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` - - // Specifies the part after which listing should begin. Only parts with higher - // part numbers will be listed. - PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Upload ID identifying the multipart upload whose parts are being listed. - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` -} - -// String returns the string representation -func (s ListPartsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListPartsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListPartsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type ListPartsOutput struct { - _ struct{} `type:"structure"` - - // Date when multipart upload will become eligible for abort operation by lifecycle. - AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` - - // Id of the lifecycle rule that makes a multipart upload eligible for abort - // operation. - AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` - - // Name of the bucket to which the multipart upload was initiated. - Bucket *string `type:"string"` - - // Identifies who initiated the multipart upload. - Initiator *Initiator `type:"structure"` - - // Indicates whether the returned list of parts is truncated. - IsTruncated *bool `type:"boolean"` - - // Object key for which the multipart upload was initiated. - Key *string `min:"1" type:"string"` - - // Maximum number of parts that were allowed in the response. - MaxParts *int64 `type:"integer"` - - // When a list is truncated, this element specifies the last part in the list, - // as well as the value to use for the part-number-marker request parameter - // in a subsequent request. - NextPartNumberMarker *int64 `type:"integer"` - - Owner *Owner `type:"structure"` - - // Part number after which listing begins. - PartNumberMarker *int64 `type:"integer"` - - Parts []*Part `locationName:"Part" type:"list" flattened:"true"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"StorageClass"` - - // Upload ID identifying the multipart upload whose parts are being listed. - UploadId *string `type:"string"` -} - -// String returns the string representation -func (s ListPartsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListPartsOutput) GoString() string { - return s.String() -} - -type LoggingEnabled struct { - _ struct{} `type:"structure"` - - // Specifies the bucket where you want Amazon S3 to store server access logs. - // You can have your logs delivered to any bucket that you own, including the - // same bucket that is being logged. You can also configure multiple buckets - // to deliver their logs to the same target bucket. In this case you should - // choose a different TargetPrefix for each source bucket so that the delivered - // log files can be distinguished by key. - TargetBucket *string `type:"string"` - - TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` - - // This element lets you specify a prefix for the keys that the log files will - // be stored under. - TargetPrefix *string `type:"string"` -} - -// String returns the string representation -func (s LoggingEnabled) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LoggingEnabled) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *LoggingEnabled) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} - if s.TargetGrants != nil { - for i, v := range s.TargetGrants { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type MultipartUpload struct { - _ struct{} `type:"structure"` - - // Date and time at which the multipart upload was initiated. - Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - // Identifies who initiated the multipart upload. - Initiator *Initiator `type:"structure"` - - // Key of the object for which the multipart upload was initiated. - Key *string `min:"1" type:"string"` - - Owner *Owner `type:"structure"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"StorageClass"` - - // Upload ID that identifies the multipart upload. - UploadId *string `type:"string"` -} - -// String returns the string representation -func (s MultipartUpload) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MultipartUpload) GoString() string { - return s.String() -} - -// Specifies when noncurrent object versions expire. Upon expiration, Amazon -// S3 permanently deletes the noncurrent object versions. You set this lifecycle -// configuration action on a bucket that has versioning enabled (or suspended) -// to request that Amazon S3 delete noncurrent object versions at a specific -// period in the object's lifetime. -type NoncurrentVersionExpiration struct { - _ struct{} `type:"structure"` - - // Specifies the number of days an object is noncurrent before Amazon S3 can - // perform the associated action. For information about the noncurrent days - // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in - // the Amazon Simple Storage Service Developer Guide. - NoncurrentDays *int64 `type:"integer"` -} - -// String returns the string representation -func (s NoncurrentVersionExpiration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NoncurrentVersionExpiration) GoString() string { - return s.String() -} - -// Container for the transition rule that describes when noncurrent objects -// transition to the STANDARD_IA or GLACIER storage class. If your bucket is -// versioning-enabled (or versioning is suspended), you can set this action -// to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA -// or GLACIER storage class at a specific period in the object's lifetime. -type NoncurrentVersionTransition struct { - _ struct{} `type:"structure"` - - // Specifies the number of days an object is noncurrent before Amazon S3 can - // perform the associated action. For information about the noncurrent days - // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in - // the Amazon Simple Storage Service Developer Guide. - NoncurrentDays *int64 `type:"integer"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"TransitionStorageClass"` -} - -// String returns the string representation -func (s NoncurrentVersionTransition) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NoncurrentVersionTransition) GoString() string { - return s.String() -} - -// Container for specifying the notification configuration of the bucket. If -// this element is empty, notifications are turned off on the bucket. -type NotificationConfiguration struct { - _ struct{} `type:"structure"` - - LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` - - QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` - - TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` -} - -// String returns the string representation -func (s NotificationConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NotificationConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *NotificationConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"} - if s.LambdaFunctionConfigurations != nil { - for i, v := range s.LambdaFunctionConfigurations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams)) - } - } - } - if s.QueueConfigurations != nil { - for i, v := range s.QueueConfigurations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams)) - } - } - } - if s.TopicConfigurations != nil { - for i, v := range s.TopicConfigurations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type NotificationConfigurationDeprecated struct { - _ struct{} `type:"structure"` - - CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` - - QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` - - TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` -} - -// String returns the string representation -func (s NotificationConfigurationDeprecated) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NotificationConfigurationDeprecated) GoString() string { - return s.String() -} - -// Container for object key name filtering rules. For information about key -// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// in the Amazon Simple Storage Service Developer Guide. -type NotificationConfigurationFilter struct { - _ struct{} `type:"structure"` - - // Container for object key name prefix and suffix filtering rules. - Key *KeyFilter `locationName:"S3Key" type:"structure"` -} - -// String returns the string representation -func (s NotificationConfigurationFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NotificationConfigurationFilter) GoString() string { - return s.String() -} - -type Object struct { - _ struct{} `type:"structure"` - - ETag *string `type:"string"` - - Key *string `min:"1" type:"string"` - - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - Owner *Owner `type:"structure"` - - Size *int64 `type:"integer"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"ObjectStorageClass"` -} - -// String returns the string representation -func (s Object) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Object) GoString() string { - return s.String() -} - -type ObjectIdentifier struct { - _ struct{} `type:"structure"` - - // Key name of the object to delete. - Key *string `min:"1" type:"string" required:"true"` - - // VersionId for the specific version of the object to delete. - VersionId *string `type:"string"` -} - -// String returns the string representation -func (s ObjectIdentifier) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ObjectIdentifier) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ObjectIdentifier) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type ObjectVersion struct { - _ struct{} `type:"structure"` - - ETag *string `type:"string"` - - // Specifies whether the object is (true) or is not (false) the latest version - // of an object. - IsLatest *bool `type:"boolean"` - - // The object key. - Key *string `min:"1" type:"string"` - - // Date and time the object was last modified. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - Owner *Owner `type:"structure"` - - // Size in bytes of the object. - Size *int64 `type:"integer"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` - - // Version ID of an object. - VersionId *string `type:"string"` -} - -// String returns the string representation -func (s ObjectVersion) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ObjectVersion) GoString() string { - return s.String() -} - -type Owner struct { - _ struct{} `type:"structure"` - - DisplayName *string `type:"string"` - - ID *string `type:"string"` -} - -// String returns the string representation -func (s Owner) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Owner) GoString() string { - return s.String() -} - -type Part struct { - _ struct{} `type:"structure"` - - // Entity tag returned when the part was uploaded. - ETag *string `type:"string"` - - // Date and time at which the part was uploaded. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - // Part number identifying the part. This is a positive integer between 1 and - // 10,000. - PartNumber *int64 `type:"integer"` - - // Size of the uploaded part data. - Size *int64 `type:"integer"` -} - -// String returns the string representation -func (s Part) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Part) GoString() string { - return s.String() -} - -type PutBucketAccelerateConfigurationInput struct { - _ struct{} `type:"structure" payload:"AccelerateConfiguration"` - - // Specifies the Accelerate Configuration you want to set for the bucket. - AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true"` - - // Name of the bucket for which the accelerate configuration is set. - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -} - -// String returns the string representation -func (s PutBucketAccelerateConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketAccelerateConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketAccelerateConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"} - if s.AccelerateConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration")) - } - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutBucketAccelerateConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketAccelerateConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketAccelerateConfigurationOutput) GoString() string { - return s.String() -} - -type PutBucketAclInput struct { - _ struct{} `type:"structure" payload:"AccessControlPolicy"` - - // The canned ACL to apply to the bucket. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` - - AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to list the objects in the bucket. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the bucket ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to create, overwrite, and delete any object in the bucket. - GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` - - // Allows grantee to write the ACL for the applicable bucket. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` -} - -// String returns the string representation -func (s PutBucketAclInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketAclInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.AccessControlPolicy != nil { - if err := s.AccessControlPolicy.Validate(); err != nil { - invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutBucketAclOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketAclOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketAclOutput) GoString() string { - return s.String() -} - -type PutBucketCorsInput struct { - _ struct{} `type:"structure" payload:"CORSConfiguration"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true"` -} - -// String returns the string representation -func (s PutBucketCorsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketCorsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketCorsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.CORSConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) - } - if s.CORSConfiguration != nil { - if err := s.CORSConfiguration.Validate(); err != nil { - invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutBucketCorsOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketCorsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketCorsOutput) GoString() string { - return s.String() -} - -type PutBucketLifecycleConfigurationInput struct { - _ struct{} `type:"structure" payload:"LifecycleConfiguration"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` -} - -// String returns the string representation -func (s PutBucketLifecycleConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLifecycleConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketLifecycleConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.LifecycleConfiguration != nil { - if err := s.LifecycleConfiguration.Validate(); err != nil { - invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutBucketLifecycleConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketLifecycleConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLifecycleConfigurationOutput) GoString() string { - return s.String() -} - -type PutBucketLifecycleInput struct { - _ struct{} `type:"structure" payload:"LifecycleConfiguration"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` -} - -// String returns the string representation -func (s PutBucketLifecycleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLifecycleInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketLifecycleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.LifecycleConfiguration != nil { - if err := s.LifecycleConfiguration.Validate(); err != nil { - invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutBucketLifecycleOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketLifecycleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLifecycleOutput) GoString() string { - return s.String() -} - -type PutBucketLoggingInput struct { - _ struct{} `type:"structure" payload:"BucketLoggingStatus"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true"` -} - -// String returns the string representation -func (s PutBucketLoggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLoggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketLoggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.BucketLoggingStatus == nil { - invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) - } - if s.BucketLoggingStatus != nil { - if err := s.BucketLoggingStatus.Validate(); err != nil { - invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutBucketLoggingOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketLoggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLoggingOutput) GoString() string { - return s.String() -} - -type PutBucketNotificationConfigurationInput struct { - _ struct{} `type:"structure" payload:"NotificationConfiguration"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Container for specifying the notification configuration of the bucket. If - // this element is empty, notifications are turned off on the bucket. - NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"` -} - -// String returns the string representation -func (s PutBucketNotificationConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketNotificationConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketNotificationConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.NotificationConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) - } - if s.NotificationConfiguration != nil { - if err := s.NotificationConfiguration.Validate(); err != nil { - invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutBucketNotificationConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketNotificationConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketNotificationConfigurationOutput) GoString() string { - return s.String() -} - -type PutBucketNotificationInput struct { - _ struct{} `type:"structure" payload:"NotificationConfiguration"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"` -} - -// String returns the string representation -func (s PutBucketNotificationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketNotificationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketNotificationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.NotificationConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutBucketNotificationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketNotificationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketNotificationOutput) GoString() string { - return s.String() -} - -type PutBucketPolicyInput struct { - _ struct{} `type:"structure" payload:"Policy"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The bucket policy as a JSON document. - Policy *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s PutBucketPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Policy == nil { - invalidParams.Add(request.NewErrParamRequired("Policy")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutBucketPolicyOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketPolicyOutput) GoString() string { - return s.String() -} - -type PutBucketReplicationInput struct { - _ struct{} `type:"structure" payload:"ReplicationConfiguration"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Container for replication rules. You can add as many as 1,000 rules. Total - // replication configuration size can be up to 2 MB. - ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"` -} - -// String returns the string representation -func (s PutBucketReplicationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketReplicationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketReplicationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.ReplicationConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) - } - if s.ReplicationConfiguration != nil { - if err := s.ReplicationConfiguration.Validate(); err != nil { - invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutBucketReplicationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketReplicationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketReplicationOutput) GoString() string { - return s.String() -} - -type PutBucketRequestPaymentInput struct { - _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"` -} - -// String returns the string representation -func (s PutBucketRequestPaymentInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketRequestPaymentInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketRequestPaymentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.RequestPaymentConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) - } - if s.RequestPaymentConfiguration != nil { - if err := s.RequestPaymentConfiguration.Validate(); err != nil { - invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutBucketRequestPaymentOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketRequestPaymentOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketRequestPaymentOutput) GoString() string { - return s.String() -} - -type PutBucketTaggingInput struct { - _ struct{} `type:"structure" payload:"Tagging"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` -} - -// String returns the string representation -func (s PutBucketTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketTaggingInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Tagging == nil { - invalidParams.Add(request.NewErrParamRequired("Tagging")) - } - if s.Tagging != nil { - if err := s.Tagging.Validate(); err != nil { - invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutBucketTaggingOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketTaggingOutput) GoString() string { - return s.String() -} - -type PutBucketVersioningInput struct { - _ struct{} `type:"structure" payload:"VersioningConfiguration"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. - MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - - VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"` -} - -// String returns the string representation -func (s PutBucketVersioningInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketVersioningInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketVersioningInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.VersioningConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutBucketVersioningOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketVersioningOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketVersioningOutput) GoString() string { - return s.String() -} - -type PutBucketWebsiteInput struct { - _ struct{} `type:"structure" payload:"WebsiteConfiguration"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"` -} - -// String returns the string representation -func (s PutBucketWebsiteInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketWebsiteInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketWebsiteInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.WebsiteConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) - } - if s.WebsiteConfiguration != nil { - if err := s.WebsiteConfiguration.Validate(); err != nil { - invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutBucketWebsiteOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketWebsiteOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketWebsiteOutput) GoString() string { - return s.String() -} - -type PutObjectAclInput struct { - _ struct{} `type:"structure" payload:"AccessControlPolicy"` - - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to list the objects in the bucket. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the bucket ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to create, overwrite, and delete any object in the bucket. - GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` - - // Allows grantee to write the ACL for the applicable bucket. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s PutObjectAclInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectAclInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.AccessControlPolicy != nil { - if err := s.AccessControlPolicy.Validate(); err != nil { - invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutObjectAclOutput struct { - _ struct{} `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation -func (s PutObjectAclOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectAclOutput) GoString() string { - return s.String() -} - -type PutObjectInput struct { - _ struct{} `type:"structure" payload:"Body"` - - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - // Object data. - Body io.ReadSeeker `type:"blob"` - - // Name of the bucket to which the PUT operation was initiated. - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // Size of the body in bytes. This parameter is useful when the size of the - // body cannot be determined automatically. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` - - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to read the object data and its metadata. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the object ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to write the ACL for the applicable object. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - // Object key for which the PUT operation was initiated. - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // The type of storage to use for the object. Defaults to 'STANDARD'. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` -} - -// String returns the string representation -func (s PutObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutObjectOutput struct { - _ struct{} `type:"structure"` - - // Entity tag for the uploaded object. - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If the object expiration is configured, this will contain the expiration - // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // Version of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` -} - -// String returns the string representation -func (s PutObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectOutput) GoString() string { - return s.String() -} - -// Container for specifying an configuration when you want Amazon S3 to publish -// events to an Amazon Simple Queue Service (Amazon SQS) queue. -type QueueConfiguration struct { - _ struct{} `type:"structure"` - - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - - // Container for object key name filtering rules. For information about key - // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. - Filter *NotificationConfigurationFilter `type:"structure"` - - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - // Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects - // events of specified type. - QueueArn *string `locationName:"Queue" type:"string" required:"true"` -} - -// String returns the string representation -func (s QueueConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s QueueConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *QueueConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"} - if s.Events == nil { - invalidParams.Add(request.NewErrParamRequired("Events")) - } - if s.QueueArn == nil { - invalidParams.Add(request.NewErrParamRequired("QueueArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type QueueConfigurationDeprecated struct { - _ struct{} `type:"structure"` - - // Bucket event for which to send notifications. - Event *string `deprecated:"true" type:"string" enum:"Event"` - - Events []*string `locationName:"Event" type:"list" flattened:"true"` - - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - Queue *string `type:"string"` -} - -// String returns the string representation -func (s QueueConfigurationDeprecated) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s QueueConfigurationDeprecated) GoString() string { - return s.String() -} - -type Redirect struct { - _ struct{} `type:"structure"` - - // The host name to use in the redirect request. - HostName *string `type:"string"` - - // The HTTP redirect code to use on the response. Not required if one of the - // siblings is present. - HttpRedirectCode *string `type:"string"` - - // Protocol to use (http, https) when redirecting requests. The default is the - // protocol that is used in the original request. - Protocol *string `type:"string" enum:"Protocol"` - - // The object key prefix to use in the redirect request. For example, to redirect - // requests for all pages with prefix docs/ (objects in the docs/ folder) to - // documents/, you can set a condition block with KeyPrefixEquals set to docs/ - // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required - // if one of the siblings is present. Can be present only if ReplaceKeyWith - // is not provided. - ReplaceKeyPrefixWith *string `type:"string"` - - // The specific object key to use in the redirect request. For example, redirect - // request to error.html. Not required if one of the sibling is present. Can - // be present only if ReplaceKeyPrefixWith is not provided. - ReplaceKeyWith *string `type:"string"` -} - -// String returns the string representation -func (s Redirect) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Redirect) GoString() string { - return s.String() -} - -type RedirectAllRequestsTo struct { - _ struct{} `type:"structure"` - - // Name of the host where requests will be redirected. - HostName *string `type:"string" required:"true"` - - // Protocol to use (http, https) when redirecting requests. The default is the - // protocol that is used in the original request. - Protocol *string `type:"string" enum:"Protocol"` -} - -// String returns the string representation -func (s RedirectAllRequestsTo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RedirectAllRequestsTo) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RedirectAllRequestsTo) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"} - if s.HostName == nil { - invalidParams.Add(request.NewErrParamRequired("HostName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// Container for replication rules. You can add as many as 1,000 rules. Total -// replication configuration size can be up to 2 MB. -type ReplicationConfiguration struct { - _ struct{} `type:"structure"` - - // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating - // the objects. - Role *string `type:"string" required:"true"` - - // Container for information about a particular replication rule. Replication - // configuration must have at least one rule and can contain up to 1,000 rules. - Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` -} - -// String returns the string representation -func (s ReplicationConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ReplicationConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicationConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"} - if s.Role == nil { - invalidParams.Add(request.NewErrParamRequired("Role")) - } - if s.Rules == nil { - invalidParams.Add(request.NewErrParamRequired("Rules")) - } - if s.Rules != nil { - for i, v := range s.Rules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type ReplicationRule struct { - _ struct{} `type:"structure"` - - Destination *Destination `type:"structure" required:"true"` - - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string `type:"string"` - - // Object keyname prefix identifying one or more objects to which the rule applies. - // Maximum prefix length can be up to 1,024 characters. Overlapping prefixes - // are not supported. - Prefix *string `type:"string" required:"true"` - - // The rule is ignored if status is not Enabled. - Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` -} - -// String returns the string representation -func (s ReplicationRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ReplicationRule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicationRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"} - if s.Destination == nil { - invalidParams.Add(request.NewErrParamRequired("Destination")) - } - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) - } - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - if s.Destination != nil { - if err := s.Destination.Validate(); err != nil { - invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type RequestPaymentConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies who pays for the download and request fees. - Payer *string `type:"string" required:"true" enum:"Payer"` -} - -// String returns the string representation -func (s RequestPaymentConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RequestPaymentConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RequestPaymentConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"} - if s.Payer == nil { - invalidParams.Add(request.NewErrParamRequired("Payer")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type RestoreObjectInput struct { - _ struct{} `type:"structure" payload:"RestoreRequest"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"` - - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` -} - -// String returns the string representation -func (s RestoreObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RestoreObjectInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RestoreObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.RestoreRequest != nil { - if err := s.RestoreRequest.Validate(); err != nil { - invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type RestoreObjectOutput struct { - _ struct{} `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` -} - -// String returns the string representation -func (s RestoreObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RestoreObjectOutput) GoString() string { - return s.String() -} - -type RestoreRequest struct { - _ struct{} `type:"structure"` - - // Lifetime of the active copy in days - Days *int64 `type:"integer" required:"true"` -} - -// String returns the string representation -func (s RestoreRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RestoreRequest) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RestoreRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} - if s.Days == nil { - invalidParams.Add(request.NewErrParamRequired("Days")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type RoutingRule struct { - _ struct{} `type:"structure"` - - // A container for describing a condition that must be met for the specified - // redirect to apply. For example, 1. If request is for pages in the /docs folder, - // redirect to the /documents folder. 2. If request results in HTTP error 4xx, - // redirect request to another host where you might process the error. - Condition *Condition `type:"structure"` - - // Container for redirect information. You can redirect requests to another - // host, to another page, or with another protocol. In the event of an error, - // you can can specify a different error code to return. - Redirect *Redirect `type:"structure" required:"true"` -} - -// String returns the string representation -func (s RoutingRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RoutingRule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RoutingRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} - if s.Redirect == nil { - invalidParams.Add(request.NewErrParamRequired("Redirect")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type Rule struct { - _ struct{} `type:"structure"` - - // Specifies the days since the initiation of an Incomplete Multipart Upload - // that Lifecycle will wait before permanently removing all parts of the upload. - AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` - - Expiration *LifecycleExpiration `type:"structure"` - - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string `type:"string"` - - // Specifies when noncurrent object versions expire. Upon expiration, Amazon - // S3 permanently deletes the noncurrent object versions. You set this lifecycle - // configuration action on a bucket that has versioning enabled (or suspended) - // to request that Amazon S3 delete noncurrent object versions at a specific - // period in the object's lifetime. - NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` - - // Container for the transition rule that describes when noncurrent objects - // transition to the STANDARD_IA or GLACIER storage class. If your bucket is - // versioning-enabled (or versioning is suspended), you can set this action - // to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA - // or GLACIER storage class at a specific period in the object's lifetime. - NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` - - // Prefix identifying one or more objects to which the rule applies. - Prefix *string `type:"string" required:"true"` - - // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule - // is not currently being applied. - Status *string `type:"string" required:"true" enum:"ExpirationStatus"` - - Transition *Transition `type:"structure"` -} - -// String returns the string representation -func (s Rule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Rule) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Rule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Rule"} - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) - } - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type Tag struct { - _ struct{} `type:"structure"` - - // Name of the tag. - Key *string `min:"1" type:"string" required:"true"` - - // Value of the tag. - Value *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s Tag) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Tag) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Tag) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Tag"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type Tagging struct { - _ struct{} `type:"structure"` - - TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` -} - -// String returns the string representation -func (s Tagging) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Tagging) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Tagging) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Tagging"} - if s.TagSet == nil { - invalidParams.Add(request.NewErrParamRequired("TagSet")) - } - if s.TagSet != nil { - for i, v := range s.TagSet { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type TargetGrant struct { - _ struct{} `type:"structure"` - - Grantee *Grantee `type:"structure"` - - // Logging permissions assigned to the Grantee for the bucket. - Permission *string `type:"string" enum:"BucketLogsPermission"` -} - -// String returns the string representation -func (s TargetGrant) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TargetGrant) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TargetGrant) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TargetGrant"} - if s.Grantee != nil { - if err := s.Grantee.Validate(); err != nil { - invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// Container for specifying the configuration when you want Amazon S3 to publish -// events to an Amazon Simple Notification Service (Amazon SNS) topic. -type TopicConfiguration struct { - _ struct{} `type:"structure"` - - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - - // Container for object key name filtering rules. For information about key - // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. - Filter *NotificationConfigurationFilter `type:"structure"` - - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - // Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects - // events of specified type. - TopicArn *string `locationName:"Topic" type:"string" required:"true"` -} - -// String returns the string representation -func (s TopicConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TopicConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TopicConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TopicConfiguration"} - if s.Events == nil { - invalidParams.Add(request.NewErrParamRequired("Events")) - } - if s.TopicArn == nil { - invalidParams.Add(request.NewErrParamRequired("TopicArn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type TopicConfigurationDeprecated struct { - _ struct{} `type:"structure"` - - // Bucket event for which to send notifications. - Event *string `deprecated:"true" type:"string" enum:"Event"` - - Events []*string `locationName:"Event" type:"list" flattened:"true"` - - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - // Amazon SNS topic to which Amazon S3 will publish a message to report the - // specified events for the bucket. - Topic *string `type:"string"` -} - -// String returns the string representation -func (s TopicConfigurationDeprecated) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TopicConfigurationDeprecated) GoString() string { - return s.String() -} - -type Transition struct { - _ struct{} `type:"structure"` - - // Indicates at what date the object is to be moved or deleted. Should be in - // GMT ISO 8601 Format. - Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - // Indicates the lifetime, in days, of the objects that are subject to the rule. - // The value must be a non-zero positive integer. - Days *int64 `type:"integer"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"TransitionStorageClass"` -} - -// String returns the string representation -func (s Transition) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Transition) GoString() string { - return s.String() -} - -type UploadPartCopyInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The name of the source bucket and key name of the source object, separated - // by a slash (/). Must be URL-encoded. - CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` - - // Copies the object if its entity tag (ETag) matches the specified tag. - CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` - - // Copies the object if it has been modified since the specified time. - CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` - - // Copies the object if its entity tag (ETag) is different than the specified - // ETag. - CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` - - // Copies the object if it hasn't been modified since the specified time. - CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` - - // The range of bytes to copy from the source object. The range value must use - // the form bytes=first-last, where the first and last are the zero-based byte - // offsets to copy. For example, bytes=0-9 indicates that you want to copy the - // first ten bytes of the source. You can copy a range only if the source object - // is greater than 5 GB. - CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` - - // Specifies the algorithm to use when decrypting the source object (e.g., AES256). - CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt - // the source object. The encryption key provided in this header must be one - // that was used when the source object was created. - CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` - - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Part number of part being copied. This is a positive integer between 1 and - // 10,000. - PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. This must be the same encryption key specified in the initiate multipart - // upload request. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Upload ID identifying the multipart upload whose part is being copied. - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` -} - -// String returns the string representation -func (s UploadPartCopyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UploadPartCopyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UploadPartCopyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UploadPartCopyInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.CopySource == nil { - invalidParams.Add(request.NewErrParamRequired("CopySource")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.PartNumber == nil { - invalidParams.Add(request.NewErrParamRequired("PartNumber")) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type UploadPartCopyOutput struct { - _ struct{} `type:"structure" payload:"CopyPartResult"` - - CopyPartResult *CopyPartResult `type:"structure"` - - // The version of the source object that was copied, if you have enabled versioning - // on the source bucket. - CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` -} - -// String returns the string representation -func (s UploadPartCopyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UploadPartCopyOutput) GoString() string { - return s.String() -} - -type UploadPartInput struct { - _ struct{} `type:"structure" payload:"Body"` - - // Object data. - Body io.ReadSeeker `type:"blob"` - - // Name of the bucket to which the multipart upload was initiated. - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Size of the body in bytes. This parameter is useful when the size of the - // body cannot be determined automatically. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - - // Object key for which the multipart upload was initiated. - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Part number of part being uploaded. This is a positive integer between 1 - // and 10,000. - PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. This must be the same encryption key specified in the initiate multipart - // upload request. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Upload ID identifying the multipart upload whose part is being uploaded. - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` -} - -// String returns the string representation -func (s UploadPartInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UploadPartInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UploadPartInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UploadPartInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.PartNumber == nil { - invalidParams.Add(request.NewErrParamRequired("PartNumber")) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type UploadPartOutput struct { - _ struct{} `type:"structure"` - - // Entity tag for the uploaded object. - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` -} - -// String returns the string representation -func (s UploadPartOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UploadPartOutput) GoString() string { - return s.String() -} - -type VersioningConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies whether MFA delete is enabled in the bucket versioning configuration. - // This element is only returned if the bucket has been configured with MFA - // delete. If the bucket has never been so configured, this element is not returned. - MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"` - - // The versioning state of the bucket. - Status *string `type:"string" enum:"BucketVersioningStatus"` -} - -// String returns the string representation -func (s VersioningConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s VersioningConfiguration) GoString() string { - return s.String() -} - -type WebsiteConfiguration struct { - _ struct{} `type:"structure"` - - ErrorDocument *ErrorDocument `type:"structure"` - - IndexDocument *IndexDocument `type:"structure"` - - RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` - - RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` -} - -// String returns the string representation -func (s WebsiteConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WebsiteConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *WebsiteConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "WebsiteConfiguration"} - if s.ErrorDocument != nil { - if err := s.ErrorDocument.Validate(); err != nil { - invalidParams.AddNested("ErrorDocument", err.(request.ErrInvalidParams)) - } - } - if s.IndexDocument != nil { - if err := s.IndexDocument.Validate(); err != nil { - invalidParams.AddNested("IndexDocument", err.(request.ErrInvalidParams)) - } - } - if s.RedirectAllRequestsTo != nil { - if err := s.RedirectAllRequestsTo.Validate(); err != nil { - invalidParams.AddNested("RedirectAllRequestsTo", err.(request.ErrInvalidParams)) - } - } - if s.RoutingRules != nil { - for i, v := range s.RoutingRules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoutingRules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -const ( - // @enum BucketAccelerateStatus - BucketAccelerateStatusEnabled = "Enabled" - // @enum BucketAccelerateStatus - BucketAccelerateStatusSuspended = "Suspended" -) - -const ( - // @enum BucketCannedACL - BucketCannedACLPrivate = "private" - // @enum BucketCannedACL - BucketCannedACLPublicRead = "public-read" - // @enum BucketCannedACL - BucketCannedACLPublicReadWrite = "public-read-write" - // @enum BucketCannedACL - BucketCannedACLAuthenticatedRead = "authenticated-read" -) - -const ( - // @enum BucketLocationConstraint - BucketLocationConstraintEu = "EU" - // @enum BucketLocationConstraint - BucketLocationConstraintEuWest1 = "eu-west-1" - // @enum BucketLocationConstraint - BucketLocationConstraintUsWest1 = "us-west-1" - // @enum BucketLocationConstraint - BucketLocationConstraintUsWest2 = "us-west-2" - // @enum BucketLocationConstraint - BucketLocationConstraintApSouth1 = "ap-south-1" - // @enum BucketLocationConstraint - BucketLocationConstraintApSoutheast1 = "ap-southeast-1" - // @enum BucketLocationConstraint - BucketLocationConstraintApSoutheast2 = "ap-southeast-2" - // @enum BucketLocationConstraint - BucketLocationConstraintApNortheast1 = "ap-northeast-1" - // @enum BucketLocationConstraint - BucketLocationConstraintSaEast1 = "sa-east-1" - // @enum BucketLocationConstraint - BucketLocationConstraintCnNorth1 = "cn-north-1" - // @enum BucketLocationConstraint - BucketLocationConstraintEuCentral1 = "eu-central-1" -) - -const ( - // @enum BucketLogsPermission - BucketLogsPermissionFullControl = "FULL_CONTROL" - // @enum BucketLogsPermission - BucketLogsPermissionRead = "READ" - // @enum BucketLogsPermission - BucketLogsPermissionWrite = "WRITE" -) - -const ( - // @enum BucketVersioningStatus - BucketVersioningStatusEnabled = "Enabled" - // @enum BucketVersioningStatus - BucketVersioningStatusSuspended = "Suspended" -) - -// Requests Amazon S3 to encode the object keys in the response and specifies -// the encoding method to use. An object key may contain any Unicode character; -// however, XML 1.0 parser cannot parse some characters, such as characters -// with an ASCII value from 0 to 10. For characters that are not supported in -// XML 1.0, you can add this parameter to request that Amazon S3 encode the -// keys in the response. -const ( - // @enum EncodingType - EncodingTypeUrl = "url" -) - -// Bucket event for which to send notifications. -const ( - // @enum Event - EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" - // @enum Event - EventS3ObjectCreated = "s3:ObjectCreated:*" - // @enum Event - EventS3ObjectCreatedPut = "s3:ObjectCreated:Put" - // @enum Event - EventS3ObjectCreatedPost = "s3:ObjectCreated:Post" - // @enum Event - EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy" - // @enum Event - EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" - // @enum Event - EventS3ObjectRemoved = "s3:ObjectRemoved:*" - // @enum Event - EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete" - // @enum Event - EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" -) - -const ( - // @enum ExpirationStatus - ExpirationStatusEnabled = "Enabled" - // @enum ExpirationStatus - ExpirationStatusDisabled = "Disabled" -) - -const ( - // @enum FilterRuleName - FilterRuleNamePrefix = "prefix" - // @enum FilterRuleName - FilterRuleNameSuffix = "suffix" -) - -const ( - // @enum MFADelete - MFADeleteEnabled = "Enabled" - // @enum MFADelete - MFADeleteDisabled = "Disabled" -) - -const ( - // @enum MFADeleteStatus - MFADeleteStatusEnabled = "Enabled" - // @enum MFADeleteStatus - MFADeleteStatusDisabled = "Disabled" -) - -const ( - // @enum MetadataDirective - MetadataDirectiveCopy = "COPY" - // @enum MetadataDirective - MetadataDirectiveReplace = "REPLACE" -) - -const ( - // @enum ObjectCannedACL - ObjectCannedACLPrivate = "private" - // @enum ObjectCannedACL - ObjectCannedACLPublicRead = "public-read" - // @enum ObjectCannedACL - ObjectCannedACLPublicReadWrite = "public-read-write" - // @enum ObjectCannedACL - ObjectCannedACLAuthenticatedRead = "authenticated-read" - // @enum ObjectCannedACL - ObjectCannedACLAwsExecRead = "aws-exec-read" - // @enum ObjectCannedACL - ObjectCannedACLBucketOwnerRead = "bucket-owner-read" - // @enum ObjectCannedACL - ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" -) - -const ( - // @enum ObjectStorageClass - ObjectStorageClassStandard = "STANDARD" - // @enum ObjectStorageClass - ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY" - // @enum ObjectStorageClass - ObjectStorageClassGlacier = "GLACIER" -) - -const ( - // @enum ObjectVersionStorageClass - ObjectVersionStorageClassStandard = "STANDARD" -) - -const ( - // @enum Payer - PayerRequester = "Requester" - // @enum Payer - PayerBucketOwner = "BucketOwner" -) - -const ( - // @enum Permission - PermissionFullControl = "FULL_CONTROL" - // @enum Permission - PermissionWrite = "WRITE" - // @enum Permission - PermissionWriteAcp = "WRITE_ACP" - // @enum Permission - PermissionRead = "READ" - // @enum Permission - PermissionReadAcp = "READ_ACP" -) - -const ( - // @enum Protocol - ProtocolHttp = "http" - // @enum Protocol - ProtocolHttps = "https" -) - -const ( - // @enum ReplicationRuleStatus - ReplicationRuleStatusEnabled = "Enabled" - // @enum ReplicationRuleStatus - ReplicationRuleStatusDisabled = "Disabled" -) - -const ( - // @enum ReplicationStatus - ReplicationStatusComplete = "COMPLETE" - // @enum ReplicationStatus - ReplicationStatusPending = "PENDING" - // @enum ReplicationStatus - ReplicationStatusFailed = "FAILED" - // @enum ReplicationStatus - ReplicationStatusReplica = "REPLICA" -) - -// If present, indicates that the requester was successfully charged for the -// request. -const ( - // @enum RequestCharged - RequestChargedRequester = "requester" -) - -// Confirms that the requester knows that she or he will be charged for the -// request. Bucket owners need not specify this parameter in their requests. -// Documentation on downloading objects from requester pays buckets can be found -// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html -const ( - // @enum RequestPayer - RequestPayerRequester = "requester" -) - -const ( - // @enum ServerSideEncryption - ServerSideEncryptionAes256 = "AES256" - // @enum ServerSideEncryption - ServerSideEncryptionAwsKms = "aws:kms" -) - -const ( - // @enum StorageClass - StorageClassStandard = "STANDARD" - // @enum StorageClass - StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" - // @enum StorageClass - StorageClassStandardIa = "STANDARD_IA" -) - -const ( - // @enum TransitionStorageClass - TransitionStorageClassGlacier = "GLACIER" - // @enum TransitionStorageClass - TransitionStorageClassStandardIa = "STANDARD_IA" -) - -const ( - // @enum Type - TypeCanonicalUser = "CanonicalUser" - // @enum Type - TypeAmazonCustomerByEmail = "AmazonCustomerByEmail" - // @enum Type - TypeGroup = "Group" -) diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go deleted file mode 100644 index c3a2702d..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go +++ /dev/null @@ -1,43 +0,0 @@ -package s3 - -import ( - "io/ioutil" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" -) - -var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) - -func buildGetBucketLocation(r *request.Request) { - if r.DataFilled() { - out := r.Data.(*GetBucketLocationOutput) - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.New("SerializationError", "failed reading response body", err) - return - } - - match := reBucketLocation.FindSubmatch(b) - if len(match) > 1 { - loc := string(match[1]) - out.LocationConstraint = &loc - } - } -} - -func populateLocationConstraint(r *request.Request) { - if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" { - in := r.Params.(*CreateBucketInput) - if in.CreateBucketConfiguration == nil { - r.Params = awsutil.CopyOf(r.Params) - in = r.Params.(*CreateBucketInput) - in.CreateBucketConfiguration = &CreateBucketConfiguration{ - LocationConstraint: r.Config.Region, - } - } - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go deleted file mode 100644 index 9fc5df94..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go +++ /dev/null @@ -1,36 +0,0 @@ -package s3 - -import ( - "crypto/md5" - "encoding/base64" - "io" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// contentMD5 computes and sets the HTTP Content-MD5 header for requests that -// require it. -func contentMD5(r *request.Request) { - h := md5.New() - - // hash the body. seek back to the first position after reading to reset - // the body for transmission. copy errors may be assumed to be from the - // body. - _, err := io.Copy(h, r.Body) - if err != nil { - r.Error = awserr.New("ContentMD5", "failed to read body", err) - return - } - _, err = r.Body.Seek(0, 0) - if err != nil { - r.Error = awserr.New("ContentMD5", "failed to seek body", err) - return - } - - // encode the md5 checksum in base64 and set the request header. - sum := h.Sum(nil) - sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) - base64.StdEncoding.Encode(sum64, sum) - r.HTTPRequest.Header.Set("Content-MD5", string(sum64)) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go deleted file mode 100644 index 84633472..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ /dev/null @@ -1,46 +0,0 @@ -package s3 - -import ( - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/request" -) - -func init() { - initClient = defaultInitClientFn - initRequest = defaultInitRequestFn -} - -func defaultInitClientFn(c *client.Client) { - // Support building custom endpoints based on config - c.Handlers.Build.PushFront(updateEndpointForS3Config) - - // Require SSL when using SSE keys - c.Handlers.Validate.PushBack(validateSSERequiresSSL) - c.Handlers.Build.PushBack(computeSSEKeys) - - // S3 uses custom error unmarshaling logic - c.Handlers.UnmarshalError.Clear() - c.Handlers.UnmarshalError.PushBack(unmarshalError) -} - -func defaultInitRequestFn(r *request.Request) { - // Add reuest handlers for specific platforms. - // e.g. 100-continue support for PUT requests using Go 1.6 - platformRequestHandlers(r) - - switch r.Operation.Name { - case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, - opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration, - opPutBucketReplication: - // These S3 operations require Content-MD5 to be set - r.Handlers.Build.PushBack(contentMD5) - case opGetBucketLocation: - // GetBucketLocation has custom parsing logic - r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) - case opCreateBucket: - // Auto-populate LocationConstraint with current region - r.Handlers.Validate.PushFront(populateLocationConstraint) - case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: - r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go deleted file mode 100644 index 51729290..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go +++ /dev/null @@ -1,165 +0,0 @@ -package s3 - -import ( - "fmt" - "net/url" - "regexp" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" -) - -// an operationBlacklist is a list of operation names that should a -// request handler should not be executed with. -type operationBlacklist []string - -// Continue will return true of the Request's operation name is not -// in the blacklist. False otherwise. -func (b operationBlacklist) Continue(r *request.Request) bool { - for i := 0; i < len(b); i++ { - if b[i] == r.Operation.Name { - return false - } - } - return true -} - -var accelerateOpBlacklist = operationBlacklist{ - opListBuckets, opCreateBucket, opDeleteBucket, -} - -// Request handler to automatically add the bucket name to the endpoint domain -// if possible. This style of bucket is valid for all bucket names which are -// DNS compatible and do not contain "." -func updateEndpointForS3Config(r *request.Request) { - forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) - accelerate := aws.BoolValue(r.Config.S3UseAccelerate) - - if accelerate && accelerateOpBlacklist.Continue(r) { - if forceHostStyle { - if r.Config.Logger != nil { - r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.") - } - } - updateEndpointForAccelerate(r) - } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation { - updateEndpointForHostStyle(r) - } -} - -func updateEndpointForHostStyle(r *request.Request) { - bucket, ok := bucketNameFromReqParams(r.Params) - if !ok { - // Ignore operation requests if the bucketname was not provided - // if this is an input validation error the validation handler - // will report it. - return - } - - if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { - // bucket name must be valid to put into the host - return - } - - moveBucketToHost(r.HTTPRequest.URL, bucket) -} - -func updateEndpointForAccelerate(r *request.Request) { - bucket, ok := bucketNameFromReqParams(r.Params) - if !ok { - // Ignore operation requests if the bucketname was not provided - // if this is an input validation error the validation handler - // will report it. - return - } - - if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { - r.Error = awserr.New("InvalidParameterException", - fmt.Sprintf("bucket name %s is not compatibile with S3 Accelerate", bucket), - nil) - return - } - - // Change endpoint from s3(-[a-z0-1-])?.amazonaws.com to s3-accelerate.amazonaws.com - r.HTTPRequest.URL.Host = replaceHostRegion(r.HTTPRequest.URL.Host, "accelerate") - moveBucketToHost(r.HTTPRequest.URL, bucket) -} - -// Attempts to retrieve the bucket name from the request input parameters. -// If no bucket is found, or the field is empty "", false will be returned. -func bucketNameFromReqParams(params interface{}) (string, bool) { - b, _ := awsutil.ValuesAtPath(params, "Bucket") - if len(b) == 0 { - return "", false - } - - if bucket, ok := b[0].(*string); ok { - if bucketStr := aws.StringValue(bucket); bucketStr != "" { - return bucketStr, true - } - } - - return "", false -} - -// hostCompatibleBucketName returns true if the request should -// put the bucket in the host. This is false if S3ForcePathStyle is -// explicitly set or if the bucket is not DNS compatible. -func hostCompatibleBucketName(u *url.URL, bucket string) bool { - // Bucket might be DNS compatible but dots in the hostname will fail - // certificate validation, so do not use host-style. - if u.Scheme == "https" && strings.Contains(bucket, ".") { - return false - } - - // if the bucket is DNS compatible - return dnsCompatibleBucketName(bucket) -} - -var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) -var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) - -// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. -// Buckets created outside of the classic region MUST be DNS compatible. -func dnsCompatibleBucketName(bucket string) bool { - return reDomain.MatchString(bucket) && - !reIPAddress.MatchString(bucket) && - !strings.Contains(bucket, "..") -} - -// moveBucketToHost moves the bucket name from the URI path to URL host. -func moveBucketToHost(u *url.URL, bucket string) { - u.Host = bucket + "." + u.Host - u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) - if u.Path == "" { - u.Path = "/" - } -} - -const s3HostPrefix = "s3" - -// replaceHostRegion replaces the S3 region string in the host with the -// value provided. If v is empty the host prefix returned will be s3. -func replaceHostRegion(host, v string) string { - if !strings.HasPrefix(host, s3HostPrefix) { - return host - } - - suffix := host[len(s3HostPrefix):] - for i := len(s3HostPrefix); i < len(host); i++ { - if host[i] == '.' { - // Trim until '.' leave the it in place. - suffix = host[i:] - break - } - } - - if len(v) == 0 { - return fmt.Sprintf("s3%s", suffix) - } - - return fmt.Sprintf("s3-%s%s", v, suffix) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go deleted file mode 100644 index 8e6f3307..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !go1.6 - -package s3 - -import "github.com/aws/aws-sdk-go/aws/request" - -func platformRequestHandlers(r *request.Request) { -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go deleted file mode 100644 index 14d05f7b..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build go1.6 - -package s3 - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -func platformRequestHandlers(r *request.Request) { - if r.Operation.HTTPMethod == "PUT" { - // 100-Continue should only be used on put requests. - r.Handlers.Sign.PushBack(add100Continue) - } -} - -func add100Continue(r *request.Request) { - if aws.BoolValue(r.Config.S3Disable100Continue) { - return - } - if r.HTTPRequest.ContentLength < 1024*1024*2 { - // Ignore requests smaller than 2MB. This helps prevent delaying - // requests unnecessarily. - return - } - - r.HTTPRequest.Header.Set("Expect", "100-Continue") -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/service.go deleted file mode 100644 index 5833952a..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/service.go +++ /dev/null @@ -1,86 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. - -package s3 - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/restxml" -) - -// S3 is a client for Amazon S3. -//The service client's operations are safe to be used concurrently. -// It is not safe to mutate any of the client's properties though. -type S3 struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// A ServiceName is the name of the service the client will make API calls to. -const ServiceName = "s3" - -// New creates a new instance of the S3 client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// // Create a S3 client from just a session. -// svc := s3.New(mySession) -// -// // Create a S3 client with additional configuration -// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { - c := p.ClientConfig(ServiceName, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *S3 { - svc := &S3{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - SigningRegion: signingRegion, - Endpoint: endpoint, - APIVersion: "2006-03-01", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a S3 operation and runs any -// custom request initialization. -func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go deleted file mode 100644 index 268ea2fb..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go +++ /dev/null @@ -1,44 +0,0 @@ -package s3 - -import ( - "crypto/md5" - "encoding/base64" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" -) - -var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) - -func validateSSERequiresSSL(r *request.Request) { - if r.HTTPRequest.URL.Scheme != "https" { - p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey") - if len(p) > 0 { - r.Error = errSSERequiresSSL - } - } -} - -func computeSSEKeys(r *request.Request) { - headers := []string{ - "x-amz-server-side-encryption-customer-key", - "x-amz-copy-source-server-side-encryption-customer-key", - } - - for _, h := range headers { - md5h := h + "-md5" - if key := r.HTTPRequest.Header.Get(h); key != "" { - // Base64-encode the value - b64v := base64.StdEncoding.EncodeToString([]byte(key)) - r.HTTPRequest.Header.Set(h, b64v) - - // Add MD5 if it wasn't computed - if r.HTTPRequest.Header.Get(md5h) == "" { - sum := md5.Sum([]byte(key)) - b64sum := base64.StdEncoding.EncodeToString(sum[:]) - r.HTTPRequest.Header.Set(md5h, b64sum) - } - } - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go deleted file mode 100644 index ce65fcda..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go +++ /dev/null @@ -1,36 +0,0 @@ -package s3 - -import ( - "bytes" - "io/ioutil" - "net/http" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -func copyMultipartStatusOKUnmarhsalError(r *request.Request) { - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.New("SerializationError", "unable to read response body", err) - return - } - body := bytes.NewReader(b) - r.HTTPResponse.Body = aws.ReadSeekCloser(body) - defer r.HTTPResponse.Body.(aws.ReaderSeekerCloser).Seek(0, 0) - - if body.Len() == 0 { - // If there is no body don't attempt to parse the body. - return - } - - unmarshalError(r) - if err, ok := r.Error.(awserr.Error); ok && err != nil { - if err.Code() == "SerializationError" { - r.Error = nil - return - } - r.HTTPResponse.StatusCode = http.StatusServiceUnavailable - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go deleted file mode 100644 index 59e4950b..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go +++ /dev/null @@ -1,59 +0,0 @@ -package s3 - -import ( - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -type xmlErrorResponse struct { - XMLName xml.Name `xml:"Error"` - Code string `xml:"Code"` - Message string `xml:"Message"` -} - -func unmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) - - if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { - r.Error = awserr.NewRequestFailure( - awserr.New("BucketRegionError", - fmt.Sprintf("incorrect region, the bucket is not in '%s' region", - aws.StringValue(r.Config.Region)), - nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - - if r.HTTPResponse.ContentLength == 0 { - // No body, use status code to generate an awserr.Error - r.Error = awserr.NewRequestFailure( - awserr.New(strings.Replace(r.HTTPResponse.Status, " ", "", -1), r.HTTPResponse.Status, nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } - - resp := &xmlErrorResponse{} - err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) - if err != nil && err != io.EOF { - r.Error = awserr.New("SerializationError", "failed to decode S3 XML error response", nil) - } else { - r.Error = awserr.NewRequestFailure( - awserr.New(resp.Code, resp.Message, nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go deleted file mode 100644 index cbd3d311..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go +++ /dev/null @@ -1,123 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. - -package s3 - -import ( - "github.com/aws/aws-sdk-go/private/waiter" -) - -func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { - waiterCfg := waiter.Config{ - Operation: "HeadBucket", - Delay: 5, - MaxAttempts: 20, - Acceptors: []waiter.WaitAcceptor{ - { - State: "success", - Matcher: "status", - Argument: "", - Expected: 200, - }, - { - State: "success", - Matcher: "status", - Argument: "", - Expected: 301, - }, - { - State: "success", - Matcher: "status", - Argument: "", - Expected: 403, - }, - { - State: "retry", - Matcher: "status", - Argument: "", - Expected: 404, - }, - }, - } - - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() -} - -func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { - waiterCfg := waiter.Config{ - Operation: "HeadBucket", - Delay: 5, - MaxAttempts: 20, - Acceptors: []waiter.WaitAcceptor{ - { - State: "success", - Matcher: "status", - Argument: "", - Expected: 404, - }, - }, - } - - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() -} - -func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { - waiterCfg := waiter.Config{ - Operation: "HeadObject", - Delay: 5, - MaxAttempts: 20, - Acceptors: []waiter.WaitAcceptor{ - { - State: "success", - Matcher: "status", - Argument: "", - Expected: 200, - }, - { - State: "retry", - Matcher: "status", - Argument: "", - Expected: 404, - }, - }, - } - - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() -} - -func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { - waiterCfg := waiter.Config{ - Operation: "HeadObject", - Delay: 5, - MaxAttempts: 20, - Acceptors: []waiter.WaitAcceptor{ - { - State: "success", - Matcher: "status", - Argument: "", - Expected: 404, - }, - }, - } - - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/.gitignore b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/.gitignore deleted file mode 100644 index 7adca943..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -testdata/conf_out.ini -ini.sublime-project -ini.sublime-workspace -testdata/conf_reflect.ini diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/LICENSE deleted file mode 100644 index 37ec93a1..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README.md deleted file mode 100644 index 1272038a..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README.md +++ /dev/null @@ -1,560 +0,0 @@ -ini [![Build Status](https://drone.io/github.com/go-ini/ini/status.png)](https://drone.io/github.com/go-ini/ini/latest) [![](http://gocover.io/_badge/github.com/go-ini/ini)](http://gocover.io/github.com/go-ini/ini) -=== - -![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) - -Package ini provides INI file read and write functionality in Go. - -[简体中文](README_ZH.md) - -## Feature - -- Load multiple data sources(`[]byte` or file) with overwrites. -- Read with recursion values. -- Read with parent-child sections. -- Read with auto-increment key names. -- Read with multiple-line values. -- Read with tons of helper methods. -- Read and convert values to Go types. -- Read and **WRITE** comments of sections and keys. -- Manipulate sections, keys and comments with ease. -- Keep sections and keys in order as you parse and save. - -## Installation - - go get gopkg.in/ini.v1 - -## Getting Started - -### Loading from data sources - -A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error. - -```go -cfg, err := ini.Load([]byte("raw data"), "filename") -``` - -Or start with an empty object: - -```go -cfg := ini.Empty() -``` - -When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later. - -```go -err := cfg.Append("other file", []byte("other raw data")) -``` - -### Working with sections - -To get a section, you would need to: - -```go -section, err := cfg.GetSection("section name") -``` - -For a shortcut for default section, just give an empty string as name: - -```go -section, err := cfg.GetSection("") -``` - -When you're pretty sure the section exists, following code could make your life easier: - -```go -section := cfg.Section("") -``` - -What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. - -To create a new section: - -```go -err := cfg.NewSection("new section") -``` - -To get a list of sections or section names: - -```go -sections := cfg.Sections() -names := cfg.SectionStrings() -``` - -### Working with keys - -To get a key under a section: - -```go -key, err := cfg.Section("").GetKey("key name") -``` - -Same rule applies to key operations: - -```go -key := cfg.Section("").Key("key name") -``` - -To create a new key: - -```go -err := cfg.Section("").NewKey("name", "value") -``` - -To get a list of keys or key names: - -```go -keys := cfg.Section("").Keys() -names := cfg.Section("").KeyStrings() -``` - -To get a clone hash of keys and corresponding values: - -```go -hash := cfg.GetSection("").KeysHash() -``` - -### Working with values - -To get a string value: - -```go -val := cfg.Section("").Key("key name").String() -``` - -To validate key value on the fly: - -```go -val := cfg.Section("").Key("key name").Validate(func(in string) string { - if len(in) == 0 { - return "default" - } - return in -}) -``` - -To get value with types: - -```go -// For boolean values: -// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On -// false when value is: 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off -v, err = cfg.Section("").Key("BOOL").Bool() -v, err = cfg.Section("").Key("FLOAT64").Float64() -v, err = cfg.Section("").Key("INT").Int() -v, err = cfg.Section("").Key("INT64").Int64() -v, err = cfg.Section("").Key("UINT").Uint() -v, err = cfg.Section("").Key("UINT64").Uint64() -v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) -v, err = cfg.Section("").Key("TIME").Time() // RFC3339 - -v = cfg.Section("").Key("BOOL").MustBool() -v = cfg.Section("").Key("FLOAT64").MustFloat64() -v = cfg.Section("").Key("INT").MustInt() -v = cfg.Section("").Key("INT64").MustInt64() -v = cfg.Section("").Key("UINT").MustUint() -v = cfg.Section("").Key("UINT64").MustUint64() -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) -v = cfg.Section("").Key("TIME").MustTime() // RFC3339 - -// Methods start with Must also accept one argument for default value -// when key not found or fail to parse value to given type. -// Except method MustString, which you have to pass a default value. - -v = cfg.Section("").Key("String").MustString("default") -v = cfg.Section("").Key("BOOL").MustBool(true) -v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) -v = cfg.Section("").Key("INT").MustInt(10) -v = cfg.Section("").Key("INT64").MustInt64(99) -v = cfg.Section("").Key("UINT").MustUint(3) -v = cfg.Section("").Key("UINT64").MustUint64(6) -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) -v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 -``` - -What if my value is three-line long? - -```ini -[advance] -ADDRESS = """404 road, -NotFound, State, 5000 -Earth""" -``` - -Not a problem! - -```go -cfg.Section("advance").Key("ADDRESS").String() - -/* --- start --- -404 road, -NotFound, State, 5000 -Earth ------- end --- */ -``` - -That's cool, how about continuation lines? - -```ini -[advance] -two_lines = how about \ - continuation lines? -lots_of_lines = 1 \ - 2 \ - 3 \ - 4 -``` - -Piece of cake! - -```go -cfg.Section("advance").Key("two_lines").String() // how about continuation lines? -cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 -``` - -Note that single quotes around values will be stripped: - -```ini -foo = "some value" // foo: some value -bar = 'some value' // bar: some value -``` - -That's all? Hmm, no. - -#### Helper methods of working with values - -To get value with given candidates: - -```go -v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) -v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) -v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) -v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) -v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) -v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) -v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) -v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 -``` - -Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. - -To validate value in a given range: - -```go -vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) -vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) -vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) -vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) -vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) -vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) -vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 -``` - -To auto-split value into slice: - -```go -vals = cfg.Section("").Key("STRINGS").Strings(",") -vals = cfg.Section("").Key("FLOAT64S").Float64s(",") -vals = cfg.Section("").Key("INTS").Ints(",") -vals = cfg.Section("").Key("INT64S").Int64s(",") -vals = cfg.Section("").Key("UINTS").Uints(",") -vals = cfg.Section("").Key("UINT64S").Uint64s(",") -vals = cfg.Section("").Key("TIMES").Times(",") -``` - -### Save your configuration - -Finally, it's time to save your configuration to somewhere. - -A typical way to save configuration is writing it to a file: - -```go -// ... -err = cfg.SaveTo("my.ini") -err = cfg.SaveToIndent("my.ini", "\t") -``` - -Another way to save is writing to a `io.Writer` interface: - -```go -// ... -cfg.WriteTo(writer) -cfg.WriteToIndent(writer, "\t") -``` - -## Advanced Usage - -### Recursive Values - -For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. - -```ini -NAME = ini - -[author] -NAME = Unknwon -GITHUB = https://github.com/%(NAME)s - -[package] -FULL_NAME = github.com/go-ini/%(NAME)s -``` - -```go -cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon -cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini -``` - -### Parent-child Sections - -You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. - -```ini -NAME = ini -VERSION = v1 -IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s - -[package] -CLONE_URL = https://%(IMPORT_PATH)s - -[package.sub] -``` - -```go -cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 -``` - -### Auto-increment Key Names - -If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. - -```ini -[features] --: Support read/write comments of keys and sections --: Support auto-increment of key names --: Support load multiple files to overwrite key values -``` - -```go -cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} -``` - -### Map To Struct - -Want more objective way to play with INI? Cool. - -```ini -Name = Unknwon -age = 21 -Male = true -Born = 1993-01-01T20:17:05Z - -[Note] -Content = Hi is a good man! -Cities = HangZhou, Boston -``` - -```go -type Note struct { - Content string - Cities []string -} - -type Person struct { - Name string - Age int `ini:"age"` - Male bool - Born time.Time - Note - Created time.Time `ini:"-"` -} - -func main() { - cfg, err := ini.Load("path/to/ini") - // ... - p := new(Person) - err = cfg.MapTo(p) - // ... - - // Things can be simpler. - err = ini.MapTo(p, "path/to/ini") - // ... - - // Just map a section? Fine. - n := new(Note) - err = cfg.Section("Note").MapTo(n) - // ... -} -``` - -Can I have default value for field? Absolutely. - -Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. - -```go -// ... -p := &Person{ - Name: "Joe", -} -// ... -``` - -It's really cool, but what's the point if you can't give me my file back from struct? - -### Reflect From Struct - -Why not? - -```go -type Embeded struct { - Dates []time.Time `delim:"|"` - Places []string - None []int -} - -type Author struct { - Name string `ini:"NAME"` - Male bool - Age int - GPA float64 - NeverMind string `ini:"-"` - *Embeded -} - -func main() { - a := &Author{"Unknwon", true, 21, 2.8, "", - &Embeded{ - []time.Time{time.Now(), time.Now()}, - []string{"HangZhou", "Boston"}, - []int{}, - }} - cfg := ini.Empty() - err = ini.ReflectFrom(cfg, a) - // ... -} -``` - -So, what do I get? - -```ini -NAME = Unknwon -Male = true -Age = 21 -GPA = 2.8 - -[Embeded] -Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 -Places = HangZhou,Boston -None = -``` - -#### Name Mapper - -To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. - -There are 2 built-in name mappers: - -- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. -- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. - -To use them: - -```go -type Info struct { - PackageName string -} - -func main() { - err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini")) - // ... - - cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) - // ... - info := new(Info) - cfg.NameMapper = ini.AllCapsUnderscore - err = cfg.MapTo(info) - // ... -} -``` - -Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. - -#### Other Notes On Map/Reflect - -Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child -} - -type Config struct { - City string - Parent -} -``` - -Example configuration: - -```ini -City = Boston - -[Parent] -Name = Unknwon - -[Child] -Age = 21 -``` - -What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child `ini:"Parent"` -} - -type Config struct { - City string - Parent -} -``` - -Example configuration: - -```ini -City = Boston - -[Parent] -Name = Unknwon -Age = 21 -``` - -## Getting Help - -- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) -- [File An Issue](https://github.com/go-ini/ini/issues/new) - -## FAQs - -### What does `BlockMode` field do? - -By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. - -### Why another INI library? - -Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. - -To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) - -## License - -This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README_ZH.md deleted file mode 100644 index 45e19edd..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README_ZH.md +++ /dev/null @@ -1,547 +0,0 @@ -本包提供了 Go 语言中读写 INI 文件的功能。 - -## 功能特性 - -- 支持覆盖加载多个数据源(`[]byte` 或文件) -- 支持递归读取键值 -- 支持读取父子分区 -- 支持读取自增键名 -- 支持读取多行的键值 -- 支持大量辅助方法 -- 支持在读取时直接转换为 Go 语言类型 -- 支持读取和 **写入** 分区和键的注释 -- 轻松操作分区、键值和注释 -- 在保存文件时分区和键值会保持原有的顺序 - -## 下载安装 - - go get gopkg.in/ini.v1 - -## 开始使用 - -### 从数据源加载 - -一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 - -```go -cfg, err := ini.Load([]byte("raw data"), "filename") -``` - -或者从一个空白的文件开始: - -```go -cfg := ini.Empty() -``` - -当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。 - -```go -err := cfg.Append("other file", []byte("other raw data")) -``` - -### 操作分区(Section) - -获取指定分区: - -```go -section, err := cfg.GetSection("section name") -``` - -如果您想要获取默认分区,则可以用空字符串代替分区名: - -```go -section, err := cfg.GetSection("") -``` - -当您非常确定某个分区是存在的,可以使用以下简便方法: - -```go -section := cfg.Section("") -``` - -如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 - -创建一个分区: - -```go -err := cfg.NewSection("new section") -``` - -获取所有分区对象或名称: - -```go -sections := cfg.Sections() -names := cfg.SectionStrings() -``` - -### 操作键(Key) - -获取某个分区下的键: - -```go -key, err := cfg.Section("").GetKey("key name") -``` - -和分区一样,您也可以直接获取键而忽略错误处理: - -```go -key := cfg.Section("").Key("key name") -``` - -创建一个新的键: - -```go -err := cfg.Section("").NewKey("name", "value") -``` - -获取分区下的所有键或键名: - -```go -keys := cfg.Section("").Keys() -names := cfg.Section("").KeyStrings() -``` - -获取分区下的所有键值对的克隆: - -```go -hash := cfg.GetSection("").KeysHash() -``` - -### 操作键值(Value) - -获取一个类型为字符串(string)的值: - -```go -val := cfg.Section("").Key("key name").String() -``` - -获取值的同时通过自定义函数进行处理验证: - -```go -val := cfg.Section("").Key("key name").Validate(func(in string) string { - if len(in) == 0 { - return "default" - } - return in -}) -``` - -获取其它类型的值: - -```go -// 布尔值的规则: -// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On -// false 当值为:0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off -v, err = cfg.Section("").Key("BOOL").Bool() -v, err = cfg.Section("").Key("FLOAT64").Float64() -v, err = cfg.Section("").Key("INT").Int() -v, err = cfg.Section("").Key("INT64").Int64() -v, err = cfg.Section("").Key("UINT").Uint() -v, err = cfg.Section("").Key("UINT64").Uint64() -v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) -v, err = cfg.Section("").Key("TIME").Time() // RFC3339 - -v = cfg.Section("").Key("BOOL").MustBool() -v = cfg.Section("").Key("FLOAT64").MustFloat64() -v = cfg.Section("").Key("INT").MustInt() -v = cfg.Section("").Key("INT64").MustInt64() -v = cfg.Section("").Key("UINT").MustUint() -v = cfg.Section("").Key("UINT64").MustUint64() -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) -v = cfg.Section("").Key("TIME").MustTime() // RFC3339 - -// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值, -// 当键不存在或者转换失败时,则会直接返回该默认值。 -// 但是,MustString 方法必须传递一个默认值。 - -v = cfg.Seciont("").Key("String").MustString("default") -v = cfg.Section("").Key("BOOL").MustBool(true) -v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) -v = cfg.Section("").Key("INT").MustInt(10) -v = cfg.Section("").Key("INT64").MustInt64(99) -v = cfg.Section("").Key("UINT").MustUint(3) -v = cfg.Section("").Key("UINT64").MustUint64(6) -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) -v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 -``` - -如果我的值有好多行怎么办? - -```ini -[advance] -ADDRESS = """404 road, -NotFound, State, 5000 -Earth""" -``` - -嗯哼?小 case! - -```go -cfg.Section("advance").Key("ADDRESS").String() - -/* --- start --- -404 road, -NotFound, State, 5000 -Earth ------- end --- */ -``` - -赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办? - -```ini -[advance] -two_lines = how about \ - continuation lines? -lots_of_lines = 1 \ - 2 \ - 3 \ - 4 -``` - -简直是小菜一碟! - -```go -cfg.Section("advance").Key("two_lines").String() // how about continuation lines? -cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 -``` - -需要注意的是,值两侧的单引号会被自动剔除: - -```ini -foo = "some value" // foo: some value -bar = 'some value' // bar: some value -``` - -这就是全部了?哈哈,当然不是。 - -#### 操作键值的辅助方法 - -获取键值时设定候选值: - -```go -v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) -v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) -v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) -v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) -v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) -v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) -v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) -v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 -``` - -如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。 - -验证获取的值是否在指定范围内: - -```go -vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) -vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) -vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) -vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) -vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) -vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) -vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 -``` - -自动分割键值为切片(slice): - -```go -vals = cfg.Section("").Key("STRINGS").Strings(",") -vals = cfg.Section("").Key("FLOAT64S").Float64s(",") -vals = cfg.Section("").Key("INTS").Ints(",") -vals = cfg.Section("").Key("INT64S").Int64s(",") -vals = cfg.Section("").Key("UINTS").Uints(",") -vals = cfg.Section("").Key("UINT64S").Uint64s(",") -vals = cfg.Section("").Key("TIMES").Times(",") -``` - -### 保存配置 - -终于到了这个时刻,是时候保存一下配置了。 - -比较原始的做法是输出配置到某个文件: - -```go -// ... -err = cfg.SaveTo("my.ini") -err = cfg.SaveToIndent("my.ini", "\t") -``` - -另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中: - -```go -// ... -cfg.WriteTo(writer) -cfg.WriteToIndent(writer, "\t") -``` - -### 高级用法 - -#### 递归读取键值 - -在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 - -```ini -NAME = ini - -[author] -NAME = Unknwon -GITHUB = https://github.com/%(NAME)s - -[package] -FULL_NAME = github.com/go-ini/%(NAME)s -``` - -```go -cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon -cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini -``` - -#### 读取父子分区 - -您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 - -```ini -NAME = ini -VERSION = v1 -IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s - -[package] -CLONE_URL = https://%(IMPORT_PATH)s - -[package.sub] -``` - -```go -cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 -``` - -#### 读取自增键名 - -如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 - -```ini -[features] --: Support read/write comments of keys and sections --: Support auto-increment of key names --: Support load multiple files to overwrite key values -``` - -```go -cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} -``` - -### 映射到结构 - -想要使用更加面向对象的方式玩转 INI 吗?好主意。 - -```ini -Name = Unknwon -age = 21 -Male = true -Born = 1993-01-01T20:17:05Z - -[Note] -Content = Hi is a good man! -Cities = HangZhou, Boston -``` - -```go -type Note struct { - Content string - Cities []string -} - -type Person struct { - Name string - Age int `ini:"age"` - Male bool - Born time.Time - Note - Created time.Time `ini:"-"` -} - -func main() { - cfg, err := ini.Load("path/to/ini") - // ... - p := new(Person) - err = cfg.MapTo(p) - // ... - - // 一切竟可以如此的简单。 - err = ini.MapTo(p, "path/to/ini") - // ... - - // 嗯哼?只需要映射一个分区吗? - n := new(Note) - err = cfg.Section("Note").MapTo(n) - // ... -} -``` - -结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。 - -```go -// ... -p := &Person{ - Name: "Joe", -} -// ... -``` - -这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用? - -### 从结构反射 - -可是,我有说不能吗? - -```go -type Embeded struct { - Dates []time.Time `delim:"|"` - Places []string - None []int -} - -type Author struct { - Name string `ini:"NAME"` - Male bool - Age int - GPA float64 - NeverMind string `ini:"-"` - *Embeded -} - -func main() { - a := &Author{"Unknwon", true, 21, 2.8, "", - &Embeded{ - []time.Time{time.Now(), time.Now()}, - []string{"HangZhou", "Boston"}, - []int{}, - }} - cfg := ini.Empty() - err = ini.ReflectFrom(cfg, a) - // ... -} -``` - -瞧瞧,奇迹发生了。 - -```ini -NAME = Unknwon -Male = true -Age = 21 -GPA = 2.8 - -[Embeded] -Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 -Places = HangZhou,Boston -None = -``` - -#### 名称映射器(Name Mapper) - -为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。 - -目前有 2 款内置的映射器: - -- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。 -- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。 - -使用方法: - -```go -type Info struct{ - PackageName string -} - -func main() { - err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini")) - // ... - - cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) - // ... - info := new(Info) - cfg.NameMapper = ini.AllCapsUnderscore - err = cfg.MapTo(info) - // ... -} -``` - -使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 - -#### 映射/反射的其它说明 - -任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child -} - -type Config struct { - City string - Parent -} -``` - -示例配置文件: - -```ini -City = Boston - -[Parent] -Name = Unknwon - -[Child] -Age = 21 -``` - -很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚! - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child `ini:"Parent"` -} - -type Config struct { - City string - Parent -} -``` - -示例配置文件: - -```ini -City = Boston - -[Parent] -Name = Unknwon -Age = 21 -``` - -## 获取帮助 - -- [API 文档](https://gowalker.org/gopkg.in/ini.v1) -- [创建工单](https://github.com/go-ini/ini/issues/new) - -## 常见问题 - -### 字段 `BlockMode` 是什么? - -默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。 - -### 为什么要写另一个 INI 解析库? - -许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。 - -为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了) diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/ini.go deleted file mode 100644 index 1fee789a..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/ini.go +++ /dev/null @@ -1,1226 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -// Package ini provides INI file read and write functionality in Go. -package ini - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "os" - "regexp" - "runtime" - "strconv" - "strings" - "sync" - "time" -) - -const ( - DEFAULT_SECTION = "DEFAULT" - // Maximum allowed depth when recursively substituing variable names. - _DEPTH_VALUES = 99 - - _VERSION = "1.6.0" -) - -func Version() string { - return _VERSION -} - -var ( - LineBreak = "\n" - - // Variable regexp pattern: %(variable)s - varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) - - // Write spaces around "=" to look better. - PrettyFormat = true -) - -func init() { - if runtime.GOOS == "windows" { - LineBreak = "\r\n" - } -} - -func inSlice(str string, s []string) bool { - for _, v := range s { - if str == v { - return true - } - } - return false -} - -// dataSource is a interface that returns file content. -type dataSource interface { - ReadCloser() (io.ReadCloser, error) -} - -type sourceFile struct { - name string -} - -func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { - return os.Open(s.name) -} - -type bytesReadCloser struct { - reader io.Reader -} - -func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { - return rc.reader.Read(p) -} - -func (rc *bytesReadCloser) Close() error { - return nil -} - -type sourceData struct { - data []byte -} - -func (s *sourceData) ReadCloser() (io.ReadCloser, error) { - return &bytesReadCloser{bytes.NewReader(s.data)}, nil -} - -// ____ __. -// | |/ _|____ ___.__. -// | <_/ __ < | | -// | | \ ___/\___ | -// |____|__ \___ > ____| -// \/ \/\/ - -// Key represents a key under a section. -type Key struct { - s *Section - Comment string - name string - value string - isAutoIncr bool -} - -// Name returns name of key. -func (k *Key) Name() string { - return k.name -} - -// Value returns raw value of key for performance purpose. -func (k *Key) Value() string { - return k.value -} - -// String returns string representation of value. -func (k *Key) String() string { - val := k.value - if strings.Index(val, "%") == -1 { - return val - } - - for i := 0; i < _DEPTH_VALUES; i++ { - vr := varPattern.FindString(val) - if len(vr) == 0 { - break - } - - // Take off leading '%(' and trailing ')s'. - noption := strings.TrimLeft(vr, "%(") - noption = strings.TrimRight(noption, ")s") - - // Search in the same section. - nk, err := k.s.GetKey(noption) - if err != nil { - // Search again in default section. - nk, _ = k.s.f.Section("").GetKey(noption) - } - - // Substitute by new value and take off leading '%(' and trailing ')s'. - val = strings.Replace(val, vr, nk.value, -1) - } - return val -} - -// Validate accepts a validate function which can -// return modifed result as key value. -func (k *Key) Validate(fn func(string) string) string { - return fn(k.String()) -} - -// parseBool returns the boolean value represented by the string. -// -// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On, -// 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off. -// Any other value returns an error. -func parseBool(str string) (value bool, err error) { - switch str { - case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "ON", "on", "On": - return true, nil - case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "OFF", "off", "Off": - return false, nil - } - return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) -} - -// Bool returns bool type value. -func (k *Key) Bool() (bool, error) { - return parseBool(k.String()) -} - -// Float64 returns float64 type value. -func (k *Key) Float64() (float64, error) { - return strconv.ParseFloat(k.String(), 64) -} - -// Int returns int type value. -func (k *Key) Int() (int, error) { - return strconv.Atoi(k.String()) -} - -// Int64 returns int64 type value. -func (k *Key) Int64() (int64, error) { - return strconv.ParseInt(k.String(), 10, 64) -} - -// Uint returns uint type valued. -func (k *Key) Uint() (uint, error) { - u, e := strconv.ParseUint(k.String(), 10, 64) - return uint(u), e -} - -// Uint64 returns uint64 type value. -func (k *Key) Uint64() (uint64, error) { - return strconv.ParseUint(k.String(), 10, 64) -} - -// Duration returns time.Duration type value. -func (k *Key) Duration() (time.Duration, error) { - return time.ParseDuration(k.String()) -} - -// TimeFormat parses with given format and returns time.Time type value. -func (k *Key) TimeFormat(format string) (time.Time, error) { - return time.Parse(format, k.String()) -} - -// Time parses with RFC3339 format and returns time.Time type value. -func (k *Key) Time() (time.Time, error) { - return k.TimeFormat(time.RFC3339) -} - -// MustString returns default value if key value is empty. -func (k *Key) MustString(defaultVal string) string { - val := k.String() - if len(val) == 0 { - return defaultVal - } - return val -} - -// MustBool always returns value without error, -// it returns false if error occurs. -func (k *Key) MustBool(defaultVal ...bool) bool { - val, err := k.Bool() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustFloat64 always returns value without error, -// it returns 0.0 if error occurs. -func (k *Key) MustFloat64(defaultVal ...float64) float64 { - val, err := k.Float64() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustInt always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt(defaultVal ...int) int { - val, err := k.Int() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustInt64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt64(defaultVal ...int64) int64 { - val, err := k.Int64() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustUint always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint(defaultVal ...uint) uint { - val, err := k.Uint() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustUint64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint64(defaultVal ...uint64) uint64 { - val, err := k.Uint64() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustDuration always returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { - val, err := k.Duration() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustTimeFormat always parses with given format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { - val, err := k.TimeFormat(format) - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustTime always parses with RFC3339 format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTime(defaultVal ...time.Time) time.Time { - return k.MustTimeFormat(time.RFC3339, defaultVal...) -} - -// In always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) In(defaultVal string, candidates []string) string { - val := k.String() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InFloat64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { - val := k.MustFloat64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt(defaultVal int, candidates []int) int { - val := k.MustInt() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { - val := k.MustInt64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint(defaultVal uint, candidates []uint) uint { - val := k.MustUint() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { - val := k.MustUint64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTimeFormat always parses with given format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { - val := k.MustTimeFormat(format) - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTime always parses with RFC3339 format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { - return k.InTimeFormat(time.RFC3339, defaultVal, candidates) -} - -// RangeFloat64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { - val := k.MustFloat64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt(defaultVal, min, max int) int { - val := k.MustInt() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { - val := k.MustInt64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeTimeFormat checks if value with given format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { - val := k.MustTimeFormat(format) - if val.Unix() < min.Unix() || val.Unix() > max.Unix() { - return defaultVal - } - return val -} - -// RangeTime checks if value with RFC3339 format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { - return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) -} - -// Strings returns list of string devide by given delimiter. -func (k *Key) Strings(delim string) []string { - str := k.String() - if len(str) == 0 { - return []string{} - } - - vals := strings.Split(str, delim) - for i := range vals { - vals[i] = strings.TrimSpace(vals[i]) - } - return vals -} - -// Float64s returns list of float64 devide by given delimiter. -func (k *Key) Float64s(delim string) []float64 { - strs := k.Strings(delim) - vals := make([]float64, len(strs)) - for i := range strs { - vals[i], _ = strconv.ParseFloat(strs[i], 64) - } - return vals -} - -// Ints returns list of int devide by given delimiter. -func (k *Key) Ints(delim string) []int { - strs := k.Strings(delim) - vals := make([]int, len(strs)) - for i := range strs { - vals[i], _ = strconv.Atoi(strs[i]) - } - return vals -} - -// Int64s returns list of int64 devide by given delimiter. -func (k *Key) Int64s(delim string) []int64 { - strs := k.Strings(delim) - vals := make([]int64, len(strs)) - for i := range strs { - vals[i], _ = strconv.ParseInt(strs[i], 10, 64) - } - return vals -} - -// Uints returns list of uint devide by given delimiter. -func (k *Key) Uints(delim string) []uint { - strs := k.Strings(delim) - vals := make([]uint, len(strs)) - for i := range strs { - u, _ := strconv.ParseUint(strs[i], 10, 64) - vals[i] = uint(u) - } - return vals -} - -// Uint64s returns list of uint64 devide by given delimiter. -func (k *Key) Uint64s(delim string) []uint64 { - strs := k.Strings(delim) - vals := make([]uint64, len(strs)) - for i := range strs { - vals[i], _ = strconv.ParseUint(strs[i], 10, 64) - } - return vals -} - -// TimesFormat parses with given format and returns list of time.Time devide by given delimiter. -func (k *Key) TimesFormat(format, delim string) []time.Time { - strs := k.Strings(delim) - vals := make([]time.Time, len(strs)) - for i := range strs { - vals[i], _ = time.Parse(format, strs[i]) - } - return vals -} - -// Times parses with RFC3339 format and returns list of time.Time devide by given delimiter. -func (k *Key) Times(delim string) []time.Time { - return k.TimesFormat(time.RFC3339, delim) -} - -// SetValue changes key value. -func (k *Key) SetValue(v string) { - k.value = v -} - -// _________ __ .__ -// / _____/ ____ _____/ |_|__| ____ ____ -// \_____ \_/ __ \_/ ___\ __\ |/ _ \ / \ -// / \ ___/\ \___| | | ( <_> ) | \ -// /_______ /\___ >\___ >__| |__|\____/|___| / -// \/ \/ \/ \/ - -// Section represents a config section. -type Section struct { - f *File - Comment string - name string - keys map[string]*Key - keyList []string - keysHash map[string]string -} - -func newSection(f *File, name string) *Section { - return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)} -} - -// Name returns name of Section. -func (s *Section) Name() string { - return s.name -} - -// NewKey creates a new key to given section. -func (s *Section) NewKey(name, val string) (*Key, error) { - if len(name) == 0 { - return nil, errors.New("error creating new key: empty key name") - } - - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - if inSlice(name, s.keyList) { - s.keys[name].value = val - return s.keys[name], nil - } - - s.keyList = append(s.keyList, name) - s.keys[name] = &Key{s, "", name, val, false} - s.keysHash[name] = val - return s.keys[name], nil -} - -// GetKey returns key in section by given name. -func (s *Section) GetKey(name string) (*Key, error) { - // FIXME: change to section level lock? - if s.f.BlockMode { - s.f.lock.RLock() - } - key := s.keys[name] - if s.f.BlockMode { - s.f.lock.RUnlock() - } - - if key == nil { - // Check if it is a child-section. - sname := s.name - for { - if i := strings.LastIndex(sname, "."); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - return sec.GetKey(name) - } else { - break - } - } - return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) - } - return key, nil -} - -// Key assumes named Key exists in section and returns a zero-value when not. -func (s *Section) Key(name string) *Key { - key, err := s.GetKey(name) - if err != nil { - // It's OK here because the only possible error is empty key name, - // but if it's empty, this piece of code won't be executed. - key, _ = s.NewKey(name, "") - return key - } - return key -} - -// Keys returns list of keys of section. -func (s *Section) Keys() []*Key { - keys := make([]*Key, len(s.keyList)) - for i := range s.keyList { - keys[i] = s.Key(s.keyList[i]) - } - return keys -} - -// KeyStrings returns list of key names of section. -func (s *Section) KeyStrings() []string { - list := make([]string, len(s.keyList)) - copy(list, s.keyList) - return list -} - -// KeysHash returns keys hash consisting of names and values. -func (s *Section) KeysHash() map[string]string { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - hash := map[string]string{} - for key, value := range s.keysHash { - hash[key] = value - } - return hash -} - -// DeleteKey deletes a key from section. -func (s *Section) DeleteKey(name string) { - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - for i, k := range s.keyList { - if k == name { - s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) - delete(s.keys, name) - return - } - } -} - -// ___________.__.__ -// \_ _____/|__| | ____ -// | __) | | | _/ __ \ -// | \ | | |_\ ___/ -// \___ / |__|____/\___ > -// \/ \/ - -// File represents a combination of a or more INI file(s) in memory. -type File struct { - // Should make things safe, but sometimes doesn't matter. - BlockMode bool - // Make sure data is safe in multiple goroutines. - lock sync.RWMutex - - // Allow combination of multiple data sources. - dataSources []dataSource - // Actual data is stored here. - sections map[string]*Section - - // To keep data in order. - sectionList []string - - NameMapper -} - -// newFile initializes File object with given data sources. -func newFile(dataSources []dataSource) *File { - return &File{ - BlockMode: true, - dataSources: dataSources, - sections: make(map[string]*Section), - sectionList: make([]string, 0, 10), - } -} - -func parseDataSource(source interface{}) (dataSource, error) { - switch s := source.(type) { - case string: - return sourceFile{s}, nil - case []byte: - return &sourceData{s}, nil - default: - return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) - } -} - -// Load loads and parses from INI data sources. -// Arguments can be mixed of file name with string type, or raw data in []byte. -func Load(source interface{}, others ...interface{}) (_ *File, err error) { - sources := make([]dataSource, len(others)+1) - sources[0], err = parseDataSource(source) - if err != nil { - return nil, err - } - for i := range others { - sources[i+1], err = parseDataSource(others[i]) - if err != nil { - return nil, err - } - } - f := newFile(sources) - return f, f.Reload() -} - -// Empty returns an empty file object. -func Empty() *File { - // Ignore error here, we sure our data is good. - f, _ := Load([]byte("")) - return f -} - -// NewSection creates a new section. -func (f *File) NewSection(name string) (*Section, error) { - if len(name) == 0 { - return nil, errors.New("error creating new section: empty section name") - } - - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if inSlice(name, f.sectionList) { - return f.sections[name], nil - } - - f.sectionList = append(f.sectionList, name) - f.sections[name] = newSection(f, name) - return f.sections[name], nil -} - -// NewSections creates a list of sections. -func (f *File) NewSections(names ...string) (err error) { - for _, name := range names { - if _, err = f.NewSection(name); err != nil { - return err - } - } - return nil -} - -// GetSection returns section by given name. -func (f *File) GetSection(name string) (*Section, error) { - if len(name) == 0 { - name = DEFAULT_SECTION - } - - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - sec := f.sections[name] - if sec == nil { - return nil, fmt.Errorf("error when getting section: section '%s' not exists", name) - } - return sec, nil -} - -// Section assumes named section exists and returns a zero-value when not. -func (f *File) Section(name string) *Section { - sec, err := f.GetSection(name) - if err != nil { - // Note: It's OK here because the only possible error is empty section name, - // but if it's empty, this piece of code won't be executed. - sec, _ = f.NewSection(name) - return sec - } - return sec -} - -// Section returns list of Section. -func (f *File) Sections() []*Section { - sections := make([]*Section, len(f.sectionList)) - for i := range f.sectionList { - sections[i] = f.Section(f.sectionList[i]) - } - return sections -} - -// SectionStrings returns list of section names. -func (f *File) SectionStrings() []string { - list := make([]string, len(f.sectionList)) - copy(list, f.sectionList) - return list -} - -// DeleteSection deletes a section. -func (f *File) DeleteSection(name string) { - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if len(name) == 0 { - name = DEFAULT_SECTION - } - - for i, s := range f.sectionList { - if s == name { - f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) - delete(f.sections, name) - return - } - } -} - -func cutComment(str string) string { - i := strings.Index(str, "#") - if i == -1 { - return str - } - return str[:i] -} - -func checkMultipleLines(buf *bufio.Reader, line, val, valQuote string) (string, error) { - isEnd := false - for { - next, err := buf.ReadString('\n') - if err != nil { - if err != io.EOF { - return "", err - } - isEnd = true - } - pos := strings.LastIndex(next, valQuote) - if pos > -1 { - val += next[:pos] - break - } - val += next - if isEnd { - return "", fmt.Errorf("error parsing line: missing closing key quote from '%s' to '%s'", line, next) - } - } - return val, nil -} - -func checkContinuationLines(buf *bufio.Reader, val string) (string, bool, error) { - isEnd := false - for { - valLen := len(val) - if valLen == 0 || val[valLen-1] != '\\' { - break - } - val = val[:valLen-1] - - next, err := buf.ReadString('\n') - if err != nil { - if err != io.EOF { - return "", isEnd, err - } - isEnd = true - } - - next = strings.TrimSpace(next) - if len(next) == 0 { - break - } - val += next - } - return val, isEnd, nil -} - -// parse parses data through an io.Reader. -func (f *File) parse(reader io.Reader) error { - buf := bufio.NewReader(reader) - - // Handle BOM-UTF8. - // http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding - mask, err := buf.Peek(3) - if err == nil && len(mask) >= 3 && mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { - buf.Read(mask) - } - - count := 1 - comments := "" - isEnd := false - - section, err := f.NewSection(DEFAULT_SECTION) - if err != nil { - return err - } - - for { - line, err := buf.ReadString('\n') - line = strings.TrimSpace(line) - length := len(line) - - // Check error and ignore io.EOF just for a moment. - if err != nil { - if err != io.EOF { - return fmt.Errorf("error reading next line: %v", err) - } - // The last line of file could be an empty line. - if length == 0 { - break - } - isEnd = true - } - - // Skip empty lines. - if length == 0 { - continue - } - - switch { - case line[0] == '#' || line[0] == ';': // Comments. - if len(comments) == 0 { - comments = line - } else { - comments += LineBreak + line - } - continue - case line[0] == '[' && line[length-1] == ']': // New sction. - section, err = f.NewSection(strings.TrimSpace(line[1 : length-1])) - if err != nil { - return err - } - - if len(comments) > 0 { - section.Comment = comments - comments = "" - } - // Reset counter. - count = 1 - continue - } - - // Other possibilities. - var ( - i int - keyQuote string - kname string - valQuote string - val string - ) - - // Key name surrounded by quotes. - if line[0] == '"' { - if length > 6 && line[0:3] == `"""` { - keyQuote = `"""` - } else { - keyQuote = `"` - } - } else if line[0] == '`' { - keyQuote = "`" - } - if len(keyQuote) > 0 { - qLen := len(keyQuote) - pos := strings.Index(line[qLen:], keyQuote) - if pos == -1 { - return fmt.Errorf("error parsing line: missing closing key quote: %s", line) - } - pos = pos + qLen - i = strings.IndexAny(line[pos:], "=:") - if i < 0 { - return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line) - } else if i == pos { - return fmt.Errorf("error parsing line: key is empty: %s", line) - } - i = i + pos - kname = line[qLen:pos] // Just keep spaces inside quotes. - } else { - i = strings.IndexAny(line, "=:") - if i < 0 { - return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line) - } else if i == 0 { - return fmt.Errorf("error parsing line: key is empty: %s", line) - } - kname = strings.TrimSpace(line[0:i]) - } - - isAutoIncr := false - // Auto increment. - if kname == "-" { - isAutoIncr = true - kname = "#" + fmt.Sprint(count) - count++ - } - - lineRight := strings.TrimSpace(line[i+1:]) - lineRightLength := len(lineRight) - firstChar := "" - if lineRightLength >= 2 { - firstChar = lineRight[0:1] - } - if firstChar == "`" { - valQuote = "`" - } else if firstChar == `"` { - if lineRightLength >= 3 && lineRight[0:3] == `"""` { - valQuote = `"""` - } else { - valQuote = `"` - } - } else if firstChar == `'` { - valQuote = `'` - } - - if len(valQuote) > 0 { - qLen := len(valQuote) - pos := strings.LastIndex(lineRight[qLen:], valQuote) - // For multiple-line value check. - if pos == -1 { - if valQuote == `"` || valQuote == `'` { - return fmt.Errorf("error parsing line: single quote does not allow multiple-line value: %s", line) - } - - val = lineRight[qLen:] + "\n" - val, err = checkMultipleLines(buf, line, val, valQuote) - if err != nil { - return err - } - } else { - val = lineRight[qLen : pos+qLen] - } - } else { - val = strings.TrimSpace(cutComment(lineRight)) - val, isEnd, err = checkContinuationLines(buf, val) - if err != nil { - return err - } - } - - k, err := section.NewKey(kname, val) - if err != nil { - return err - } - k.isAutoIncr = isAutoIncr - if len(comments) > 0 { - k.Comment = comments - comments = "" - } - - if isEnd { - break - } - } - return nil -} - -func (f *File) reload(s dataSource) error { - r, err := s.ReadCloser() - if err != nil { - return err - } - defer r.Close() - - return f.parse(r) -} - -// Reload reloads and parses all data sources. -func (f *File) Reload() (err error) { - for _, s := range f.dataSources { - if err = f.reload(s); err != nil { - return err - } - } - return nil -} - -// Append appends one or more data sources and reloads automatically. -func (f *File) Append(source interface{}, others ...interface{}) error { - ds, err := parseDataSource(source) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - for _, s := range others { - ds, err = parseDataSource(s) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - } - return f.Reload() -} - -// WriteToIndent writes file content into io.Writer with given value indention. -func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { - equalSign := "=" - if PrettyFormat { - equalSign = " = " - } - - // Use buffer to make sure target is safe until finish encoding. - buf := bytes.NewBuffer(nil) - for i, sname := range f.sectionList { - sec := f.Section(sname) - if len(sec.Comment) > 0 { - if sec.Comment[0] != '#' && sec.Comment[0] != ';' { - sec.Comment = "; " + sec.Comment - } - if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { - return 0, err - } - } - - if i > 0 { - if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { - return 0, err - } - } else { - // Write nothing if default section is empty. - if len(sec.keyList) == 0 { - continue - } - } - - for _, kname := range sec.keyList { - key := sec.Key(kname) - if len(key.Comment) > 0 { - if len(indent) > 0 && sname != DEFAULT_SECTION { - buf.WriteString(indent) - } - if key.Comment[0] != '#' && key.Comment[0] != ';' { - key.Comment = "; " + key.Comment - } - if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { - return 0, err - } - } - - if len(indent) > 0 && sname != DEFAULT_SECTION { - buf.WriteString(indent) - } - - switch { - case key.isAutoIncr: - kname = "-" - case strings.Contains(kname, "`") || strings.Contains(kname, `"`): - kname = `"""` + kname + `"""` - case strings.Contains(kname, `=`) || strings.Contains(kname, `:`): - kname = "`" + kname + "`" - } - - val := key.value - // In case key value contains "\n", "`" or "\"". - if strings.Contains(val, "\n") || strings.Contains(val, "`") || strings.Contains(val, `"`) || - strings.Contains(val, "#") { - val = `"""` + val + `"""` - } - if _, err = buf.WriteString(kname + equalSign + val + LineBreak); err != nil { - return 0, err - } - } - - // Put a line between sections. - if _, err = buf.WriteString(LineBreak); err != nil { - return 0, err - } - } - - return buf.WriteTo(w) -} - -// WriteTo writes file content into io.Writer. -func (f *File) WriteTo(w io.Writer) (int64, error) { - return f.WriteToIndent(w, "") -} - -// SaveToIndent writes content to file system with given value indention. -func (f *File) SaveToIndent(filename, indent string) error { - // Note: Because we are truncating with os.Create, - // so it's safer to save to a temporary file location and rename afte done. - tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" - defer os.Remove(tmpPath) - - fw, err := os.Create(tmpPath) - if err != nil { - return err - } - - if _, err = f.WriteToIndent(fw, indent); err != nil { - fw.Close() - return err - } - fw.Close() - - // Remove old file and rename the new one. - os.Remove(filename) - return os.Rename(tmpPath, filename) -} - -// SaveTo writes content to file system. -func (f *File) SaveTo(filename string) error { - return f.SaveToIndent(filename, "") -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/struct.go deleted file mode 100644 index c1184371..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/struct.go +++ /dev/null @@ -1,350 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "time" - "unicode" -) - -// NameMapper represents a ini tag name mapper. -type NameMapper func(string) string - -// Built-in name getters. -var ( - // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. - AllCapsUnderscore NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - } - newstr = append(newstr, unicode.ToUpper(chr)) - } - return string(newstr) - } - // TitleUnderscore converts to format title_underscore. - TitleUnderscore NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - chr -= ('A' - 'a') - } - newstr = append(newstr, chr) - } - return string(newstr) - } -) - -func (s *Section) parseFieldName(raw, actual string) string { - if len(actual) > 0 { - return actual - } - if s.f.NameMapper != nil { - return s.f.NameMapper(raw) - } - return raw -} - -func parseDelim(actual string) string { - if len(actual) > 0 { - return actual - } - return "," -} - -var reflectTime = reflect.TypeOf(time.Now()).Kind() - -// setWithProperType sets proper value to field based on its type, -// but it does not return error for failing parsing, -// because we want to use default value that is already assigned to strcut. -func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { - switch t.Kind() { - case reflect.String: - if len(key.String()) == 0 { - return nil - } - field.SetString(key.String()) - case reflect.Bool: - boolVal, err := key.Bool() - if err != nil { - return nil - } - field.SetBool(boolVal) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - durationVal, err := key.Duration() - if err == nil { - field.Set(reflect.ValueOf(durationVal)) - return nil - } - - intVal, err := key.Int64() - if err != nil { - return nil - } - field.SetInt(intVal) - // byte is an alias for uint8, so supporting uint8 breaks support for byte - case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: - durationVal, err := key.Duration() - if err == nil { - field.Set(reflect.ValueOf(durationVal)) - return nil - } - - uintVal, err := key.Uint64() - if err != nil { - return nil - } - field.SetUint(uintVal) - - case reflect.Float64: - floatVal, err := key.Float64() - if err != nil { - return nil - } - field.SetFloat(floatVal) - case reflectTime: - timeVal, err := key.Time() - if err != nil { - return nil - } - field.Set(reflect.ValueOf(timeVal)) - case reflect.Slice: - vals := key.Strings(delim) - numVals := len(vals) - if numVals == 0 { - return nil - } - - sliceOf := field.Type().Elem().Kind() - - var times []time.Time - if sliceOf == reflectTime { - times = key.Times(delim) - } - - slice := reflect.MakeSlice(field.Type(), numVals, numVals) - for i := 0; i < numVals; i++ { - switch sliceOf { - case reflectTime: - slice.Index(i).Set(reflect.ValueOf(times[i])) - default: - slice.Index(i).Set(reflect.ValueOf(vals[i])) - } - } - field.Set(slice) - default: - return fmt.Errorf("unsupported type '%s'", t) - } - return nil -} - -func (s *Section) mapTo(val reflect.Value) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - fieldName := s.parseFieldName(tpField.Name, tag) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous - isStruct := tpField.Type.Kind() == reflect.Struct - if isAnonymous { - field.Set(reflect.New(tpField.Type.Elem())) - } - - if isAnonymous || isStruct { - if sec, err := s.f.GetSection(fieldName); err == nil { - if err = sec.mapTo(field); err != nil { - return fmt.Errorf("error mapping field(%s): %v", fieldName, err) - } - continue - } - } - - if key, err := s.GetKey(fieldName); err == nil { - if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { - return fmt.Errorf("error mapping field(%s): %v", fieldName, err) - } - } - } - return nil -} - -// MapTo maps section to given struct. -func (s *Section) MapTo(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("cannot map to non-pointer struct") - } - - return s.mapTo(val) -} - -// MapTo maps file to given struct. -func (f *File) MapTo(v interface{}) error { - return f.Section("").MapTo(v) -} - -// MapTo maps data sources to given struct with name mapper. -func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.MapTo(v) -} - -// MapTo maps data sources to given struct. -func MapTo(v, source interface{}, others ...interface{}) error { - return MapToWithMapper(v, nil, source, others...) -} - -// reflectWithProperType does the opposite thing with setWithProperType. -func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { - switch t.Kind() { - case reflect.String: - key.SetValue(field.String()) - case reflect.Bool, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float64, - reflectTime: - key.SetValue(fmt.Sprint(field)) - case reflect.Slice: - vals := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - - var buf bytes.Buffer - isTime := fmt.Sprint(field.Type()) == "[]time.Time" - for i := 0; i < field.Len(); i++ { - if isTime { - buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339)) - } else { - buf.WriteString(fmt.Sprint(vals.Index(i))) - } - buf.WriteString(delim) - } - key.SetValue(buf.String()[:buf.Len()-1]) - default: - return fmt.Errorf("unsupported type '%s'", t) - } - return nil -} - -func (s *Section) reflectFrom(val reflect.Value) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - fieldName := s.parseFieldName(tpField.Name, tag) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || - (tpField.Type.Kind() == reflect.Struct) { - // Note: The only error here is section doesn't exist. - sec, err := s.f.GetSection(fieldName) - if err != nil { - // Note: fieldName can never be empty here, ignore error. - sec, _ = s.f.NewSection(fieldName) - } - if err = sec.reflectFrom(field); err != nil { - return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) - } - continue - } - - // Note: Same reason as secion. - key, err := s.GetKey(fieldName) - if err != nil { - key, _ = s.NewKey(fieldName, "") - } - if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { - return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) - } - - } - return nil -} - -// ReflectFrom reflects secion from given struct. -func (s *Section) ReflectFrom(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("cannot reflect from non-pointer struct") - } - - return s.reflectFrom(val) -} - -// ReflectFrom reflects file from given struct. -func (f *File) ReflectFrom(v interface{}) error { - return f.Section("").ReflectFrom(v) -} - -// ReflectFrom reflects data sources from given struct with name mapper. -func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { - cfg.NameMapper = mapper - return cfg.ReflectFrom(v) -} - -// ReflectFrom reflects data sources from given struct. -func ReflectFrom(cfg *File, v interface{}) error { - return ReflectFromWithMapper(cfg, v, nil) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.gitignore deleted file mode 100644 index 531fcc11..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -jpgo -jmespath-fuzz.zip -cpu.out -go-jmespath.test diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.travis.yml deleted file mode 100644 index 1f980775..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -sudo: false - -go: - - 1.4 - -install: go get -v -t ./... -script: make test diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/LICENSE deleted file mode 100644 index b03310a9..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2015 James Saryerwinnie - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/Makefile deleted file mode 100644 index a828d284..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/Makefile +++ /dev/null @@ -1,44 +0,0 @@ - -CMD = jpgo - -help: - @echo "Please use \`make ' where is one of" - @echo " test to run all the tests" - @echo " build to build the library and jp executable" - @echo " generate to run codegen" - - -generate: - go generate ./... - -build: - rm -f $(CMD) - go build ./... - rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... - mv cmd/$(CMD)/$(CMD) . - -test: - go test -v ./... - -check: - go vet ./... - @echo "golint ./..." - @lint=`golint ./...`; \ - lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ - echo "$$lint"; \ - if [ "$$lint" != "" ]; then exit 1; fi - -htmlc: - go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov - -buildfuzz: - go-fuzz-build github.com/jmespath/go-jmespath/fuzz - -fuzz: buildfuzz - go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata - -bench: - go test -bench . -cpuprofile cpu.out - -pprof-cpu: - go tool pprof ./go-jmespath.test ./cpu.out diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/README.md deleted file mode 100644 index 187ef676..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# go-jmespath - A JMESPath implementation in Go - -[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) - - - -See http://jmespath.org for more info. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/api.go deleted file mode 100644 index 9cfa988b..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/api.go +++ /dev/null @@ -1,49 +0,0 @@ -package jmespath - -import "strconv" - -// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is -// safe for concurrent use by multiple goroutines. -type JMESPath struct { - ast ASTNode - intr *treeInterpreter -} - -// Compile parses a JMESPath expression and returns, if successful, a JMESPath -// object that can be used to match against data. -func Compile(expression string) (*JMESPath, error) { - parser := NewParser() - ast, err := parser.Parse(expression) - if err != nil { - return nil, err - } - jmespath := &JMESPath{ast: ast, intr: newInterpreter()} - return jmespath, nil -} - -// MustCompile is like Compile but panics if the expression cannot be parsed. -// It simplifies safe initialization of global variables holding compiled -// JMESPaths. -func MustCompile(expression string) *JMESPath { - jmespath, err := Compile(expression) - if err != nil { - panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) - } - return jmespath -} - -// Search evaluates a JMESPath expression against input data and returns the result. -func (jp *JMESPath) Search(data interface{}) (interface{}, error) { - return jp.intr.Execute(jp.ast, data) -} - -// Search evaluates a JMESPath expression against input data and returns the result. -func Search(expression string, data interface{}) (interface{}, error) { - intr := newInterpreter() - parser := NewParser() - ast, err := parser.Parse(expression) - if err != nil { - return nil, err - } - return intr.Execute(ast, data) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go deleted file mode 100644 index 1cd2d239..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// generated by stringer -type astNodeType; DO NOT EDIT - -package jmespath - -import "fmt" - -const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" - -var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} - -func (i astNodeType) String() string { - if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { - return fmt.Sprintf("astNodeType(%d)", i) - } - return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/functions.go deleted file mode 100644 index 9b7cd89b..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/functions.go +++ /dev/null @@ -1,842 +0,0 @@ -package jmespath - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "unicode/utf8" -) - -type jpFunction func(arguments []interface{}) (interface{}, error) - -type jpType string - -const ( - jpUnknown jpType = "unknown" - jpNumber jpType = "number" - jpString jpType = "string" - jpArray jpType = "array" - jpObject jpType = "object" - jpArrayNumber jpType = "array[number]" - jpArrayString jpType = "array[string]" - jpExpref jpType = "expref" - jpAny jpType = "any" -) - -type functionEntry struct { - name string - arguments []argSpec - handler jpFunction - hasExpRef bool -} - -type argSpec struct { - types []jpType - variadic bool -} - -type byExprString struct { - intr *treeInterpreter - node ASTNode - items []interface{} - hasError bool -} - -func (a *byExprString) Len() int { - return len(a.items) -} -func (a *byExprString) Swap(i, j int) { - a.items[i], a.items[j] = a.items[j], a.items[i] -} -func (a *byExprString) Less(i, j int) bool { - first, err := a.intr.Execute(a.node, a.items[i]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - ith, ok := first.(string) - if !ok { - a.hasError = true - return true - } - second, err := a.intr.Execute(a.node, a.items[j]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - jth, ok := second.(string) - if !ok { - a.hasError = true - return true - } - return ith < jth -} - -type byExprFloat struct { - intr *treeInterpreter - node ASTNode - items []interface{} - hasError bool -} - -func (a *byExprFloat) Len() int { - return len(a.items) -} -func (a *byExprFloat) Swap(i, j int) { - a.items[i], a.items[j] = a.items[j], a.items[i] -} -func (a *byExprFloat) Less(i, j int) bool { - first, err := a.intr.Execute(a.node, a.items[i]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - ith, ok := first.(float64) - if !ok { - a.hasError = true - return true - } - second, err := a.intr.Execute(a.node, a.items[j]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - jth, ok := second.(float64) - if !ok { - a.hasError = true - return true - } - return ith < jth -} - -type functionCaller struct { - functionTable map[string]functionEntry -} - -func newFunctionCaller() *functionCaller { - caller := &functionCaller{} - caller.functionTable = map[string]functionEntry{ - "length": { - name: "length", - arguments: []argSpec{ - {types: []jpType{jpString, jpArray, jpObject}}, - }, - handler: jpfLength, - }, - "starts_with": { - name: "starts_with", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpString}}, - }, - handler: jpfStartsWith, - }, - "abs": { - name: "abs", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfAbs, - }, - "avg": { - name: "avg", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber}}, - }, - handler: jpfAvg, - }, - "ceil": { - name: "ceil", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfCeil, - }, - "contains": { - name: "contains", - arguments: []argSpec{ - {types: []jpType{jpArray, jpString}}, - {types: []jpType{jpAny}}, - }, - handler: jpfContains, - }, - "ends_with": { - name: "ends_with", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpString}}, - }, - handler: jpfEndsWith, - }, - "floor": { - name: "floor", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfFloor, - }, - "map": { - name: "amp", - arguments: []argSpec{ - {types: []jpType{jpExpref}}, - {types: []jpType{jpArray}}, - }, - handler: jpfMap, - hasExpRef: true, - }, - "max": { - name: "max", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber, jpArrayString}}, - }, - handler: jpfMax, - }, - "merge": { - name: "merge", - arguments: []argSpec{ - {types: []jpType{jpObject}, variadic: true}, - }, - handler: jpfMerge, - }, - "max_by": { - name: "max_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfMaxBy, - hasExpRef: true, - }, - "sum": { - name: "sum", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber}}, - }, - handler: jpfSum, - }, - "min": { - name: "min", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber, jpArrayString}}, - }, - handler: jpfMin, - }, - "min_by": { - name: "min_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfMinBy, - hasExpRef: true, - }, - "type": { - name: "type", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfType, - }, - "keys": { - name: "keys", - arguments: []argSpec{ - {types: []jpType{jpObject}}, - }, - handler: jpfKeys, - }, - "values": { - name: "values", - arguments: []argSpec{ - {types: []jpType{jpObject}}, - }, - handler: jpfValues, - }, - "sort": { - name: "sort", - arguments: []argSpec{ - {types: []jpType{jpArrayString, jpArrayNumber}}, - }, - handler: jpfSort, - }, - "sort_by": { - name: "sort_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfSortBy, - hasExpRef: true, - }, - "join": { - name: "join", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpArrayString}}, - }, - handler: jpfJoin, - }, - "reverse": { - name: "reverse", - arguments: []argSpec{ - {types: []jpType{jpArray, jpString}}, - }, - handler: jpfReverse, - }, - "to_array": { - name: "to_array", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToArray, - }, - "to_string": { - name: "to_string", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToString, - }, - "to_number": { - name: "to_number", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToNumber, - }, - "not_null": { - name: "not_null", - arguments: []argSpec{ - {types: []jpType{jpAny}, variadic: true}, - }, - handler: jpfNotNull, - }, - } - return caller -} - -func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { - if len(e.arguments) == 0 { - return arguments, nil - } - if !e.arguments[len(e.arguments)-1].variadic { - if len(e.arguments) != len(arguments) { - return nil, errors.New("incorrect number of args") - } - for i, spec := range e.arguments { - userArg := arguments[i] - err := spec.typeCheck(userArg) - if err != nil { - return nil, err - } - } - return arguments, nil - } - if len(arguments) < len(e.arguments) { - return nil, errors.New("Invalid arity.") - } - return arguments, nil -} - -func (a *argSpec) typeCheck(arg interface{}) error { - for _, t := range a.types { - switch t { - case jpNumber: - if _, ok := arg.(float64); ok { - return nil - } - case jpString: - if _, ok := arg.(string); ok { - return nil - } - case jpArray: - if isSliceType(arg) { - return nil - } - case jpObject: - if _, ok := arg.(map[string]interface{}); ok { - return nil - } - case jpArrayNumber: - if _, ok := toArrayNum(arg); ok { - return nil - } - case jpArrayString: - if _, ok := toArrayStr(arg); ok { - return nil - } - case jpAny: - return nil - case jpExpref: - if _, ok := arg.(expRef); ok { - return nil - } - } - } - return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) -} - -func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { - entry, ok := f.functionTable[name] - if !ok { - return nil, errors.New("unknown function: " + name) - } - resolvedArgs, err := entry.resolveArgs(arguments) - if err != nil { - return nil, err - } - if entry.hasExpRef { - var extra []interface{} - extra = append(extra, intr) - resolvedArgs = append(extra, resolvedArgs...) - } - return entry.handler(resolvedArgs) -} - -func jpfAbs(arguments []interface{}) (interface{}, error) { - num := arguments[0].(float64) - return math.Abs(num), nil -} - -func jpfLength(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if c, ok := arg.(string); ok { - return float64(utf8.RuneCountInString(c)), nil - } else if isSliceType(arg) { - v := reflect.ValueOf(arg) - return float64(v.Len()), nil - } else if c, ok := arg.(map[string]interface{}); ok { - return float64(len(c)), nil - } - return nil, errors.New("could not compute length()") -} - -func jpfStartsWith(arguments []interface{}) (interface{}, error) { - search := arguments[0].(string) - prefix := arguments[1].(string) - return strings.HasPrefix(search, prefix), nil -} - -func jpfAvg(arguments []interface{}) (interface{}, error) { - // We've already type checked the value so we can safely use - // type assertions. - args := arguments[0].([]interface{}) - length := float64(len(args)) - numerator := 0.0 - for _, n := range args { - numerator += n.(float64) - } - return numerator / length, nil -} -func jpfCeil(arguments []interface{}) (interface{}, error) { - val := arguments[0].(float64) - return math.Ceil(val), nil -} -func jpfContains(arguments []interface{}) (interface{}, error) { - search := arguments[0] - el := arguments[1] - if searchStr, ok := search.(string); ok { - if elStr, ok := el.(string); ok { - return strings.Index(searchStr, elStr) != -1, nil - } - return false, nil - } - // Otherwise this is a generic contains for []interface{} - general := search.([]interface{}) - for _, item := range general { - if item == el { - return true, nil - } - } - return false, nil -} -func jpfEndsWith(arguments []interface{}) (interface{}, error) { - search := arguments[0].(string) - suffix := arguments[1].(string) - return strings.HasSuffix(search, suffix), nil -} -func jpfFloor(arguments []interface{}) (interface{}, error) { - val := arguments[0].(float64) - return math.Floor(val), nil -} -func jpfMap(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - exp := arguments[1].(expRef) - node := exp.ref - arr := arguments[2].([]interface{}) - mapped := make([]interface{}, 0, len(arr)) - for _, value := range arr { - current, err := intr.Execute(node, value) - if err != nil { - return nil, err - } - mapped = append(mapped, current) - } - return mapped, nil -} -func jpfMax(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item > best { - best = item - } - } - return best, nil - } - // Otherwise we're dealing with a max() of strings. - items, _ := toArrayStr(arguments[0]) - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item > best { - best = item - } - } - return best, nil -} -func jpfMerge(arguments []interface{}) (interface{}, error) { - final := make(map[string]interface{}) - for _, m := range arguments { - mapped := m.(map[string]interface{}) - for key, value := range mapped { - final[key] = value - } - } - return final, nil -} -func jpfMaxBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return nil, nil - } else if len(arr) == 1 { - return arr[0], nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - switch t := start.(type) { - case float64: - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(float64) - if !ok { - return nil, errors.New("invalid type, must be number") - } - if current > bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - case string: - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(string) - if !ok { - return nil, errors.New("invalid type, must be string") - } - if current > bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - default: - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfSum(arguments []interface{}) (interface{}, error) { - items, _ := toArrayNum(arguments[0]) - sum := 0.0 - for _, item := range items { - sum += item - } - return sum, nil -} - -func jpfMin(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item < best { - best = item - } - } - return best, nil - } - items, _ := toArrayStr(arguments[0]) - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item < best { - best = item - } - } - return best, nil -} - -func jpfMinBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return nil, nil - } else if len(arr) == 1 { - return arr[0], nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - if t, ok := start.(float64); ok { - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(float64) - if !ok { - return nil, errors.New("invalid type, must be number") - } - if current < bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - } else if t, ok := start.(string); ok { - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(string) - if !ok { - return nil, errors.New("invalid type, must be string") - } - if current < bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - } else { - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfType(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if _, ok := arg.(float64); ok { - return "number", nil - } - if _, ok := arg.(string); ok { - return "string", nil - } - if _, ok := arg.([]interface{}); ok { - return "array", nil - } - if _, ok := arg.(map[string]interface{}); ok { - return "object", nil - } - if arg == nil { - return "null", nil - } - if arg == true || arg == false { - return "boolean", nil - } - return nil, errors.New("unknown type") -} -func jpfKeys(arguments []interface{}) (interface{}, error) { - arg := arguments[0].(map[string]interface{}) - collected := make([]interface{}, 0, len(arg)) - for key := range arg { - collected = append(collected, key) - } - return collected, nil -} -func jpfValues(arguments []interface{}) (interface{}, error) { - arg := arguments[0].(map[string]interface{}) - collected := make([]interface{}, 0, len(arg)) - for _, value := range arg { - collected = append(collected, value) - } - return collected, nil -} -func jpfSort(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - d := sort.Float64Slice(items) - sort.Stable(d) - final := make([]interface{}, len(d)) - for i, val := range d { - final[i] = val - } - return final, nil - } - // Otherwise we're dealing with sort()'ing strings. - items, _ := toArrayStr(arguments[0]) - d := sort.StringSlice(items) - sort.Stable(d) - final := make([]interface{}, len(d)) - for i, val := range d { - final[i] = val - } - return final, nil -} -func jpfSortBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return arr, nil - } else if len(arr) == 1 { - return arr, nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - if _, ok := start.(float64); ok { - sortable := &byExprFloat{intr, node, arr, false} - sort.Stable(sortable) - if sortable.hasError { - return nil, errors.New("error in sort_by comparison") - } - return arr, nil - } else if _, ok := start.(string); ok { - sortable := &byExprString{intr, node, arr, false} - sort.Stable(sortable) - if sortable.hasError { - return nil, errors.New("error in sort_by comparison") - } - return arr, nil - } else { - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfJoin(arguments []interface{}) (interface{}, error) { - sep := arguments[0].(string) - // We can't just do arguments[1].([]string), we have to - // manually convert each item to a string. - arrayStr := []string{} - for _, item := range arguments[1].([]interface{}) { - arrayStr = append(arrayStr, item.(string)) - } - return strings.Join(arrayStr, sep), nil -} -func jpfReverse(arguments []interface{}) (interface{}, error) { - if s, ok := arguments[0].(string); ok { - r := []rune(s) - for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { - r[i], r[j] = r[j], r[i] - } - return string(r), nil - } - items := arguments[0].([]interface{}) - length := len(items) - reversed := make([]interface{}, length) - for i, item := range items { - reversed[length-(i+1)] = item - } - return reversed, nil -} -func jpfToArray(arguments []interface{}) (interface{}, error) { - if _, ok := arguments[0].([]interface{}); ok { - return arguments[0], nil - } - return arguments[:1:1], nil -} -func jpfToString(arguments []interface{}) (interface{}, error) { - if v, ok := arguments[0].(string); ok { - return v, nil - } - result, err := json.Marshal(arguments[0]) - if err != nil { - return nil, err - } - return string(result), nil -} -func jpfToNumber(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if v, ok := arg.(float64); ok { - return v, nil - } - if v, ok := arg.(string); ok { - conv, err := strconv.ParseFloat(v, 64) - if err != nil { - return nil, nil - } - return conv, nil - } - if _, ok := arg.([]interface{}); ok { - return nil, nil - } - if _, ok := arg.(map[string]interface{}); ok { - return nil, nil - } - if arg == nil { - return nil, nil - } - if arg == true || arg == false { - return nil, nil - } - return nil, errors.New("unknown type") -} -func jpfNotNull(arguments []interface{}) (interface{}, error) { - for _, arg := range arguments { - if arg != nil { - return arg, nil - } - } - return nil, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/interpreter.go deleted file mode 100644 index 13c74604..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/interpreter.go +++ /dev/null @@ -1,418 +0,0 @@ -package jmespath - -import ( - "errors" - "reflect" - "unicode" - "unicode/utf8" -) - -/* This is a tree based interpreter. It walks the AST and directly - interprets the AST to search through a JSON document. -*/ - -type treeInterpreter struct { - fCall *functionCaller -} - -func newInterpreter() *treeInterpreter { - interpreter := treeInterpreter{} - interpreter.fCall = newFunctionCaller() - return &interpreter -} - -type expRef struct { - ref ASTNode -} - -// Execute takes an ASTNode and input data and interprets the AST directly. -// It will produce the result of applying the JMESPath expression associated -// with the ASTNode to the input data "value". -func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { - switch node.nodeType { - case ASTComparator: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - right, err := intr.Execute(node.children[1], value) - if err != nil { - return nil, err - } - switch node.value { - case tEQ: - return objsEqual(left, right), nil - case tNE: - return !objsEqual(left, right), nil - } - leftNum, ok := left.(float64) - if !ok { - return nil, nil - } - rightNum, ok := right.(float64) - if !ok { - return nil, nil - } - switch node.value { - case tGT: - return leftNum > rightNum, nil - case tGTE: - return leftNum >= rightNum, nil - case tLT: - return leftNum < rightNum, nil - case tLTE: - return leftNum <= rightNum, nil - } - case ASTExpRef: - return expRef{ref: node.children[0]}, nil - case ASTFunctionExpression: - resolvedArgs := []interface{}{} - for _, arg := range node.children { - current, err := intr.Execute(arg, value) - if err != nil { - return nil, err - } - resolvedArgs = append(resolvedArgs, current) - } - return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) - case ASTField: - if m, ok := value.(map[string]interface{}); ok { - key := node.value.(string) - return m[key], nil - } - return intr.fieldFromStruct(node.value.(string), value) - case ASTFilterProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - sliceType, ok := left.([]interface{}) - if !ok { - if isSliceType(left) { - return intr.filterProjectionWithReflection(node, left) - } - return nil, nil - } - compareNode := node.children[2] - collected := []interface{}{} - for _, element := range sliceType { - result, err := intr.Execute(compareNode, element) - if err != nil { - return nil, err - } - if !isFalse(result) { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - } - return collected, nil - case ASTFlatten: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - sliceType, ok := left.([]interface{}) - if !ok { - // If we can't type convert to []interface{}, there's - // a chance this could still work via reflection if we're - // dealing with user provided types. - if isSliceType(left) { - return intr.flattenWithReflection(left) - } - return nil, nil - } - flattened := []interface{}{} - for _, element := range sliceType { - if elementSlice, ok := element.([]interface{}); ok { - flattened = append(flattened, elementSlice...) - } else if isSliceType(element) { - reflectFlat := []interface{}{} - v := reflect.ValueOf(element) - for i := 0; i < v.Len(); i++ { - reflectFlat = append(reflectFlat, v.Index(i).Interface()) - } - flattened = append(flattened, reflectFlat...) - } else { - flattened = append(flattened, element) - } - } - return flattened, nil - case ASTIdentity, ASTCurrentNode: - return value, nil - case ASTIndex: - if sliceType, ok := value.([]interface{}); ok { - index := node.value.(int) - if index < 0 { - index += len(sliceType) - } - if index < len(sliceType) && index >= 0 { - return sliceType[index], nil - } - return nil, nil - } - // Otherwise try via reflection. - rv := reflect.ValueOf(value) - if rv.Kind() == reflect.Slice { - index := node.value.(int) - if index < 0 { - index += rv.Len() - } - if index < rv.Len() && index >= 0 { - v := rv.Index(index) - return v.Interface(), nil - } - } - return nil, nil - case ASTKeyValPair: - return intr.Execute(node.children[0], value) - case ASTLiteral: - return node.value, nil - case ASTMultiSelectHash: - if value == nil { - return nil, nil - } - collected := make(map[string]interface{}) - for _, child := range node.children { - current, err := intr.Execute(child, value) - if err != nil { - return nil, err - } - key := child.value.(string) - collected[key] = current - } - return collected, nil - case ASTMultiSelectList: - if value == nil { - return nil, nil - } - collected := []interface{}{} - for _, child := range node.children { - current, err := intr.Execute(child, value) - if err != nil { - return nil, err - } - collected = append(collected, current) - } - return collected, nil - case ASTOrExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - matched, err = intr.Execute(node.children[1], value) - if err != nil { - return nil, err - } - } - return matched, nil - case ASTAndExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - return matched, nil - } - return intr.Execute(node.children[1], value) - case ASTNotExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - return true, nil - } - return false, nil - case ASTPipe: - result := value - var err error - for _, child := range node.children { - result, err = intr.Execute(child, result) - if err != nil { - return nil, err - } - } - return result, nil - case ASTProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - sliceType, ok := left.([]interface{}) - if !ok { - if isSliceType(left) { - return intr.projectWithReflection(node, left) - } - return nil, nil - } - collected := []interface{}{} - var current interface{} - for _, element := range sliceType { - current, err = intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - return collected, nil - case ASTSubexpression, ASTIndexExpression: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - return intr.Execute(node.children[1], left) - case ASTSlice: - sliceType, ok := value.([]interface{}) - if !ok { - if isSliceType(value) { - return intr.sliceWithReflection(node, value) - } - return nil, nil - } - parts := node.value.([]*int) - sliceParams := make([]sliceParam, 3) - for i, part := range parts { - if part != nil { - sliceParams[i].Specified = true - sliceParams[i].N = *part - } - } - return slice(sliceType, sliceParams) - case ASTValueProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - mapType, ok := left.(map[string]interface{}) - if !ok { - return nil, nil - } - values := make([]interface{}, len(mapType)) - for _, value := range mapType { - values = append(values, value) - } - collected := []interface{}{} - for _, element := range values { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - return collected, nil - } - return nil, errors.New("Unknown AST node: " + node.nodeType.String()) -} - -func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { - rv := reflect.ValueOf(value) - first, n := utf8.DecodeRuneInString(key) - fieldName := string(unicode.ToUpper(first)) + key[n:] - if rv.Kind() == reflect.Struct { - v := rv.FieldByName(fieldName) - if !v.IsValid() { - return nil, nil - } - return v.Interface(), nil - } else if rv.Kind() == reflect.Ptr { - // Handle multiple levels of indirection? - if rv.IsNil() { - return nil, nil - } - rv = rv.Elem() - v := rv.FieldByName(fieldName) - if !v.IsValid() { - return nil, nil - } - return v.Interface(), nil - } - return nil, nil -} - -func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { - v := reflect.ValueOf(value) - flattened := []interface{}{} - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - if reflect.TypeOf(element).Kind() == reflect.Slice { - // Then insert the contents of the element - // slice into the flattened slice, - // i.e flattened = append(flattened, mySlice...) - elementV := reflect.ValueOf(element) - for j := 0; j < elementV.Len(); j++ { - flattened = append( - flattened, elementV.Index(j).Interface()) - } - } else { - flattened = append(flattened, element) - } - } - return flattened, nil -} - -func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { - v := reflect.ValueOf(value) - parts := node.value.([]*int) - sliceParams := make([]sliceParam, 3) - for i, part := range parts { - if part != nil { - sliceParams[i].Specified = true - sliceParams[i].N = *part - } - } - final := []interface{}{} - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - final = append(final, element) - } - return slice(final, sliceParams) -} - -func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { - compareNode := node.children[2] - collected := []interface{}{} - v := reflect.ValueOf(value) - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - result, err := intr.Execute(compareNode, element) - if err != nil { - return nil, err - } - if !isFalse(result) { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - } - return collected, nil -} - -func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { - collected := []interface{}{} - v := reflect.ValueOf(value) - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - result, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if result != nil { - collected = append(collected, result) - } - } - return collected, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/lexer.go deleted file mode 100644 index 817900c8..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/lexer.go +++ /dev/null @@ -1,420 +0,0 @@ -package jmespath - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "strings" - "unicode/utf8" -) - -type token struct { - tokenType tokType - value string - position int - length int -} - -type tokType int - -const eof = -1 - -// Lexer contains information about the expression being tokenized. -type Lexer struct { - expression string // The expression provided by the user. - currentPos int // The current position in the string. - lastWidth int // The width of the current rune. This - buf bytes.Buffer // Internal buffer used for building up values. -} - -// SyntaxError is the main error used whenever a lexing or parsing error occurs. -type SyntaxError struct { - msg string // Error message displayed to user - Expression string // Expression that generated a SyntaxError - Offset int // The location in the string where the error occurred -} - -func (e SyntaxError) Error() string { - // In the future, it would be good to underline the specific - // location where the error occurred. - return "SyntaxError: " + e.msg -} - -// HighlightLocation will show where the syntax error occurred. -// It will place a "^" character on a line below the expression -// at the point where the syntax error occurred. -func (e SyntaxError) HighlightLocation() string { - return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" -} - -//go:generate stringer -type=tokType -const ( - tUnknown tokType = iota - tStar - tDot - tFilter - tFlatten - tLparen - tRparen - tLbracket - tRbracket - tLbrace - tRbrace - tOr - tPipe - tNumber - tUnquotedIdentifier - tQuotedIdentifier - tComma - tColon - tLT - tLTE - tGT - tGTE - tEQ - tNE - tJSONLiteral - tStringLiteral - tCurrent - tExpref - tAnd - tNot - tEOF -) - -var basicTokens = map[rune]tokType{ - '.': tDot, - '*': tStar, - ',': tComma, - ':': tColon, - '{': tLbrace, - '}': tRbrace, - ']': tRbracket, // tLbracket not included because it could be "[]" - '(': tLparen, - ')': tRparen, - '@': tCurrent, -} - -// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. -// When using this bitmask just be sure to shift the rune down 64 bits -// before checking against identifierStartBits. -const identifierStartBits uint64 = 576460745995190270 - -// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. -var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} - -var whiteSpace = map[rune]bool{ - ' ': true, '\t': true, '\n': true, '\r': true, -} - -func (t token) String() string { - return fmt.Sprintf("Token{%+v, %s, %d, %d}", - t.tokenType, t.value, t.position, t.length) -} - -// NewLexer creates a new JMESPath lexer. -func NewLexer() *Lexer { - lexer := Lexer{} - return &lexer -} - -func (lexer *Lexer) next() rune { - if lexer.currentPos >= len(lexer.expression) { - lexer.lastWidth = 0 - return eof - } - r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) - lexer.lastWidth = w - lexer.currentPos += w - return r -} - -func (lexer *Lexer) back() { - lexer.currentPos -= lexer.lastWidth -} - -func (lexer *Lexer) peek() rune { - t := lexer.next() - lexer.back() - return t -} - -// tokenize takes an expression and returns corresponding tokens. -func (lexer *Lexer) tokenize(expression string) ([]token, error) { - var tokens []token - lexer.expression = expression - lexer.currentPos = 0 - lexer.lastWidth = 0 -loop: - for { - r := lexer.next() - if identifierStartBits&(1<<(uint64(r)-64)) > 0 { - t := lexer.consumeUnquotedIdentifier() - tokens = append(tokens, t) - } else if val, ok := basicTokens[r]; ok { - // Basic single char token. - t := token{ - tokenType: val, - value: string(r), - position: lexer.currentPos - lexer.lastWidth, - length: 1, - } - tokens = append(tokens, t) - } else if r == '-' || (r >= '0' && r <= '9') { - t := lexer.consumeNumber() - tokens = append(tokens, t) - } else if r == '[' { - t := lexer.consumeLBracket() - tokens = append(tokens, t) - } else if r == '"' { - t, err := lexer.consumeQuotedIdentifier() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '\'' { - t, err := lexer.consumeRawStringLiteral() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '`' { - t, err := lexer.consumeLiteral() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '|' { - t := lexer.matchOrElse(r, '|', tOr, tPipe) - tokens = append(tokens, t) - } else if r == '<' { - t := lexer.matchOrElse(r, '=', tLTE, tLT) - tokens = append(tokens, t) - } else if r == '>' { - t := lexer.matchOrElse(r, '=', tGTE, tGT) - tokens = append(tokens, t) - } else if r == '!' { - t := lexer.matchOrElse(r, '=', tNE, tNot) - tokens = append(tokens, t) - } else if r == '=' { - t := lexer.matchOrElse(r, '=', tEQ, tUnknown) - tokens = append(tokens, t) - } else if r == '&' { - t := lexer.matchOrElse(r, '&', tAnd, tExpref) - tokens = append(tokens, t) - } else if r == eof { - break loop - } else if _, ok := whiteSpace[r]; ok { - // Ignore whitespace - } else { - return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) - } - } - tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) - return tokens, nil -} - -// Consume characters until the ending rune "r" is reached. -// If the end of the expression is reached before seeing the -// terminating rune "r", then an error is returned. -// If no error occurs then the matching substring is returned. -// The returned string will not include the ending rune. -func (lexer *Lexer) consumeUntil(end rune) (string, error) { - start := lexer.currentPos - current := lexer.next() - for current != end && current != eof { - if current == '\\' && lexer.peek() != eof { - lexer.next() - } - current = lexer.next() - } - if lexer.lastWidth == 0 { - // Then we hit an EOF so we never reached the closing - // delimiter. - return "", SyntaxError{ - msg: "Unclosed delimiter: " + string(end), - Expression: lexer.expression, - Offset: len(lexer.expression), - } - } - return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil -} - -func (lexer *Lexer) consumeLiteral() (token, error) { - start := lexer.currentPos - value, err := lexer.consumeUntil('`') - if err != nil { - return token{}, err - } - value = strings.Replace(value, "\\`", "`", -1) - return token{ - tokenType: tJSONLiteral, - value: value, - position: start, - length: len(value), - }, nil -} - -func (lexer *Lexer) consumeRawStringLiteral() (token, error) { - start := lexer.currentPos - currentIndex := start - current := lexer.next() - for current != '\'' && lexer.peek() != eof { - if current == '\\' && lexer.peek() == '\'' { - chunk := lexer.expression[currentIndex : lexer.currentPos-1] - lexer.buf.WriteString(chunk) - lexer.buf.WriteString("'") - lexer.next() - currentIndex = lexer.currentPos - } - current = lexer.next() - } - if lexer.lastWidth == 0 { - // Then we hit an EOF so we never reached the closing - // delimiter. - return token{}, SyntaxError{ - msg: "Unclosed delimiter: '", - Expression: lexer.expression, - Offset: len(lexer.expression), - } - } - if currentIndex < lexer.currentPos { - lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) - } - value := lexer.buf.String() - // Reset the buffer so it can reused again. - lexer.buf.Reset() - return token{ - tokenType: tStringLiteral, - value: value, - position: start, - length: len(value), - }, nil -} - -func (lexer *Lexer) syntaxError(msg string) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: lexer.expression, - Offset: lexer.currentPos - 1, - } -} - -// Checks for a two char token, otherwise matches a single character -// token. This is used whenever a two char token overlaps a single -// char token, e.g. "||" -> tPipe, "|" -> tOr. -func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { - start := lexer.currentPos - lexer.lastWidth - nextRune := lexer.next() - var t token - if nextRune == second { - t = token{ - tokenType: matchedType, - value: string(first) + string(second), - position: start, - length: 2, - } - } else { - lexer.back() - t = token{ - tokenType: singleCharType, - value: string(first), - position: start, - length: 1, - } - } - return t -} - -func (lexer *Lexer) consumeLBracket() token { - // There's three options here: - // 1. A filter expression "[?" - // 2. A flatten operator "[]" - // 3. A bare rbracket "[" - start := lexer.currentPos - lexer.lastWidth - nextRune := lexer.next() - var t token - if nextRune == '?' { - t = token{ - tokenType: tFilter, - value: "[?", - position: start, - length: 2, - } - } else if nextRune == ']' { - t = token{ - tokenType: tFlatten, - value: "[]", - position: start, - length: 2, - } - } else { - t = token{ - tokenType: tLbracket, - value: "[", - position: start, - length: 1, - } - lexer.back() - } - return t -} - -func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { - start := lexer.currentPos - value, err := lexer.consumeUntil('"') - if err != nil { - return token{}, err - } - var decoded string - asJSON := []byte("\"" + value + "\"") - if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { - return token{}, err - } - return token{ - tokenType: tQuotedIdentifier, - value: decoded, - position: start - 1, - length: len(decoded), - }, nil -} - -func (lexer *Lexer) consumeUnquotedIdentifier() token { - // Consume runes until we reach the end of an unquoted - // identifier. - start := lexer.currentPos - lexer.lastWidth - for { - r := lexer.next() - if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { - lexer.back() - break - } - } - value := lexer.expression[start:lexer.currentPos] - return token{ - tokenType: tUnquotedIdentifier, - value: value, - position: start, - length: lexer.currentPos - start, - } -} - -func (lexer *Lexer) consumeNumber() token { - // Consume runes until we reach something that's not a number. - start := lexer.currentPos - lexer.lastWidth - for { - r := lexer.next() - if r < '0' || r > '9' { - lexer.back() - break - } - } - value := lexer.expression[start:lexer.currentPos] - return token{ - tokenType: tNumber, - value: value, - position: start, - length: lexer.currentPos - start, - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/parser.go deleted file mode 100644 index 1240a175..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/parser.go +++ /dev/null @@ -1,603 +0,0 @@ -package jmespath - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" -) - -type astNodeType int - -//go:generate stringer -type astNodeType -const ( - ASTEmpty astNodeType = iota - ASTComparator - ASTCurrentNode - ASTExpRef - ASTFunctionExpression - ASTField - ASTFilterProjection - ASTFlatten - ASTIdentity - ASTIndex - ASTIndexExpression - ASTKeyValPair - ASTLiteral - ASTMultiSelectHash - ASTMultiSelectList - ASTOrExpression - ASTAndExpression - ASTNotExpression - ASTPipe - ASTProjection - ASTSubexpression - ASTSlice - ASTValueProjection -) - -// ASTNode represents the abstract syntax tree of a JMESPath expression. -type ASTNode struct { - nodeType astNodeType - value interface{} - children []ASTNode -} - -func (node ASTNode) String() string { - return node.PrettyPrint(0) -} - -// PrettyPrint will pretty print the parsed AST. -// The AST is an implementation detail and this pretty print -// function is provided as a convenience method to help with -// debugging. You should not rely on its output as the internal -// structure of the AST may change at any time. -func (node ASTNode) PrettyPrint(indent int) string { - spaces := strings.Repeat(" ", indent) - output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) - nextIndent := indent + 2 - if node.value != nil { - if converted, ok := node.value.(fmt.Stringer); ok { - // Account for things like comparator nodes - // that are enums with a String() method. - output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) - } else { - output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) - } - } - lastIndex := len(node.children) - if lastIndex > 0 { - output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) - childIndent := nextIndent + 2 - for _, elem := range node.children { - output += elem.PrettyPrint(childIndent) - } - } - output += fmt.Sprintf("%s}\n", spaces) - return output -} - -var bindingPowers = map[tokType]int{ - tEOF: 0, - tUnquotedIdentifier: 0, - tQuotedIdentifier: 0, - tRbracket: 0, - tRparen: 0, - tComma: 0, - tRbrace: 0, - tNumber: 0, - tCurrent: 0, - tExpref: 0, - tColon: 0, - tPipe: 1, - tOr: 2, - tAnd: 3, - tEQ: 5, - tLT: 5, - tLTE: 5, - tGT: 5, - tGTE: 5, - tNE: 5, - tFlatten: 9, - tStar: 20, - tFilter: 21, - tDot: 40, - tNot: 45, - tLbrace: 50, - tLbracket: 55, - tLparen: 60, -} - -// Parser holds state about the current expression being parsed. -type Parser struct { - expression string - tokens []token - index int -} - -// NewParser creates a new JMESPath parser. -func NewParser() *Parser { - p := Parser{} - return &p -} - -// Parse will compile a JMESPath expression. -func (p *Parser) Parse(expression string) (ASTNode, error) { - lexer := NewLexer() - p.expression = expression - p.index = 0 - tokens, err := lexer.tokenize(expression) - if err != nil { - return ASTNode{}, err - } - p.tokens = tokens - parsed, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if p.current() != tEOF { - return ASTNode{}, p.syntaxError(fmt.Sprintf( - "Unexpected token at the end of the expresssion: %s", p.current())) - } - return parsed, nil -} - -func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { - var err error - leftToken := p.lookaheadToken(0) - p.advance() - leftNode, err := p.nud(leftToken) - if err != nil { - return ASTNode{}, err - } - currentToken := p.current() - for bindingPower < bindingPowers[currentToken] { - p.advance() - leftNode, err = p.led(currentToken, leftNode) - if err != nil { - return ASTNode{}, err - } - currentToken = p.current() - } - return leftNode, nil -} - -func (p *Parser) parseIndexExpression() (ASTNode, error) { - if p.lookahead(0) == tColon || p.lookahead(1) == tColon { - return p.parseSliceExpression() - } - indexStr := p.lookaheadToken(0).value - parsedInt, err := strconv.Atoi(indexStr) - if err != nil { - return ASTNode{}, err - } - indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} - p.advance() - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - return indexNode, nil -} - -func (p *Parser) parseSliceExpression() (ASTNode, error) { - parts := []*int{nil, nil, nil} - index := 0 - current := p.current() - for current != tRbracket && index < 3 { - if current == tColon { - index++ - p.advance() - } else if current == tNumber { - parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) - if err != nil { - return ASTNode{}, err - } - parts[index] = &parsedInt - p.advance() - } else { - return ASTNode{}, p.syntaxError( - "Expected tColon or tNumber" + ", received: " + p.current().String()) - } - current = p.current() - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTSlice, - value: parts, - }, nil -} - -func (p *Parser) match(tokenType tokType) error { - if p.current() == tokenType { - p.advance() - return nil - } - return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) -} - -func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { - switch tokenType { - case tDot: - if p.current() != tStar { - right, err := p.parseDotRHS(bindingPowers[tDot]) - return ASTNode{ - nodeType: ASTSubexpression, - children: []ASTNode{node, right}, - }, err - } - p.advance() - right, err := p.parseProjectionRHS(bindingPowers[tDot]) - return ASTNode{ - nodeType: ASTValueProjection, - children: []ASTNode{node, right}, - }, err - case tPipe: - right, err := p.parseExpression(bindingPowers[tPipe]) - return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err - case tOr: - right, err := p.parseExpression(bindingPowers[tOr]) - return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err - case tAnd: - right, err := p.parseExpression(bindingPowers[tAnd]) - return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err - case tLparen: - name := node.value - var args []ASTNode - for p.current() != tRparen { - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if p.current() == tComma { - if err := p.match(tComma); err != nil { - return ASTNode{}, err - } - } - args = append(args, expression) - } - if err := p.match(tRparen); err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTFunctionExpression, - value: name, - children: args, - }, nil - case tFilter: - return p.parseFilter(node) - case tFlatten: - left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} - right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{left, right}, - }, err - case tEQ, tNE, tGT, tGTE, tLT, tLTE: - right, err := p.parseExpression(bindingPowers[tokenType]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTComparator, - value: tokenType, - children: []ASTNode{node, right}, - }, nil - case tLbracket: - tokenType := p.current() - var right ASTNode - var err error - if tokenType == tNumber || tokenType == tColon { - right, err = p.parseIndexExpression() - if err != nil { - return ASTNode{}, err - } - return p.projectIfSlice(node, right) - } - // Otherwise this is a projection. - if err := p.match(tStar); err != nil { - return ASTNode{}, err - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - right, err = p.parseProjectionRHS(bindingPowers[tStar]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{node, right}, - }, nil - } - return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) -} - -func (p *Parser) nud(token token) (ASTNode, error) { - switch token.tokenType { - case tJSONLiteral: - var parsed interface{} - err := json.Unmarshal([]byte(token.value), &parsed) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTLiteral, value: parsed}, nil - case tStringLiteral: - return ASTNode{nodeType: ASTLiteral, value: token.value}, nil - case tUnquotedIdentifier: - return ASTNode{ - nodeType: ASTField, - value: token.value, - }, nil - case tQuotedIdentifier: - node := ASTNode{nodeType: ASTField, value: token.value} - if p.current() == tLparen { - return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) - } - return node, nil - case tStar: - left := ASTNode{nodeType: ASTIdentity} - var right ASTNode - var err error - if p.current() == tRbracket { - right = ASTNode{nodeType: ASTIdentity} - } else { - right, err = p.parseProjectionRHS(bindingPowers[tStar]) - } - return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err - case tFilter: - return p.parseFilter(ASTNode{nodeType: ASTIdentity}) - case tLbrace: - return p.parseMultiSelectHash() - case tFlatten: - left := ASTNode{ - nodeType: ASTFlatten, - children: []ASTNode{{nodeType: ASTIdentity}}, - } - right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil - case tLbracket: - tokenType := p.current() - //var right ASTNode - if tokenType == tNumber || tokenType == tColon { - right, err := p.parseIndexExpression() - if err != nil { - return ASTNode{}, nil - } - return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) - } else if tokenType == tStar && p.lookahead(1) == tRbracket { - p.advance() - p.advance() - right, err := p.parseProjectionRHS(bindingPowers[tStar]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{{nodeType: ASTIdentity}, right}, - }, nil - } else { - return p.parseMultiSelectList() - } - case tCurrent: - return ASTNode{nodeType: ASTCurrentNode}, nil - case tExpref: - expression, err := p.parseExpression(bindingPowers[tExpref]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil - case tNot: - expression, err := p.parseExpression(bindingPowers[tNot]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil - case tLparen: - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if err := p.match(tRparen); err != nil { - return ASTNode{}, err - } - return expression, nil - case tEOF: - return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) - } - - return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) -} - -func (p *Parser) parseMultiSelectList() (ASTNode, error) { - var expressions []ASTNode - for { - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - expressions = append(expressions, expression) - if p.current() == tRbracket { - break - } - err = p.match(tComma) - if err != nil { - return ASTNode{}, err - } - } - err := p.match(tRbracket) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTMultiSelectList, - children: expressions, - }, nil -} - -func (p *Parser) parseMultiSelectHash() (ASTNode, error) { - var children []ASTNode - for { - keyToken := p.lookaheadToken(0) - if err := p.match(tUnquotedIdentifier); err != nil { - if err := p.match(tQuotedIdentifier); err != nil { - return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") - } - } - keyName := keyToken.value - err := p.match(tColon) - if err != nil { - return ASTNode{}, err - } - value, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - node := ASTNode{ - nodeType: ASTKeyValPair, - value: keyName, - children: []ASTNode{value}, - } - children = append(children, node) - if p.current() == tComma { - err := p.match(tComma) - if err != nil { - return ASTNode{}, nil - } - } else if p.current() == tRbrace { - err := p.match(tRbrace) - if err != nil { - return ASTNode{}, nil - } - break - } - } - return ASTNode{ - nodeType: ASTMultiSelectHash, - children: children, - }, nil -} - -func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { - indexExpr := ASTNode{ - nodeType: ASTIndexExpression, - children: []ASTNode{left, right}, - } - if right.nodeType == ASTSlice { - right, err := p.parseProjectionRHS(bindingPowers[tStar]) - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{indexExpr, right}, - }, err - } - return indexExpr, nil -} -func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { - var right, condition ASTNode - var err error - condition, err = p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - if p.current() == tFlatten { - right = ASTNode{nodeType: ASTIdentity} - } else { - right, err = p.parseProjectionRHS(bindingPowers[tFilter]) - if err != nil { - return ASTNode{}, err - } - } - - return ASTNode{ - nodeType: ASTFilterProjection, - children: []ASTNode{node, right, condition}, - }, nil -} - -func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { - lookahead := p.current() - if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { - return p.parseExpression(bindingPower) - } else if lookahead == tLbracket { - if err := p.match(tLbracket); err != nil { - return ASTNode{}, err - } - return p.parseMultiSelectList() - } else if lookahead == tLbrace { - if err := p.match(tLbrace); err != nil { - return ASTNode{}, err - } - return p.parseMultiSelectHash() - } - return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") -} - -func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { - current := p.current() - if bindingPowers[current] < 10 { - return ASTNode{nodeType: ASTIdentity}, nil - } else if current == tLbracket { - return p.parseExpression(bindingPower) - } else if current == tFilter { - return p.parseExpression(bindingPower) - } else if current == tDot { - err := p.match(tDot) - if err != nil { - return ASTNode{}, err - } - return p.parseDotRHS(bindingPower) - } else { - return ASTNode{}, p.syntaxError("Error") - } -} - -func (p *Parser) lookahead(number int) tokType { - return p.lookaheadToken(number).tokenType -} - -func (p *Parser) current() tokType { - return p.lookahead(0) -} - -func (p *Parser) lookaheadToken(number int) token { - return p.tokens[p.index+number] -} - -func (p *Parser) advance() { - p.index++ -} - -func tokensOneOf(elements []tokType, token tokType) bool { - for _, elem := range elements { - if elem == token { - return true - } - } - return false -} - -func (p *Parser) syntaxError(msg string) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: p.expression, - Offset: p.lookaheadToken(0).position, - } -} - -// Create a SyntaxError based on the provided token. -// This differs from syntaxError() which creates a SyntaxError -// based on the current lookahead token. -func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: p.expression, - Offset: t.position, - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/toktype_string.go deleted file mode 100644 index dae79cbd..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/toktype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// generated by stringer -type=tokType; DO NOT EDIT - -package jmespath - -import "fmt" - -const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" - -var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} - -func (i tokType) String() string { - if i < 0 || i >= tokType(len(_tokType_index)-1) { - return fmt.Sprintf("tokType(%d)", i) - } - return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/util.go deleted file mode 100644 index ddc1b7d7..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/util.go +++ /dev/null @@ -1,185 +0,0 @@ -package jmespath - -import ( - "errors" - "reflect" -) - -// IsFalse determines if an object is false based on the JMESPath spec. -// JMESPath defines false values to be any of: -// - An empty string array, or hash. -// - The boolean value false. -// - nil -func isFalse(value interface{}) bool { - switch v := value.(type) { - case bool: - return !v - case []interface{}: - return len(v) == 0 - case map[string]interface{}: - return len(v) == 0 - case string: - return len(v) == 0 - case nil: - return true - } - // Try the reflection cases before returning false. - rv := reflect.ValueOf(value) - switch rv.Kind() { - case reflect.Struct: - // A struct type will never be false, even if - // all of its values are the zero type. - return false - case reflect.Slice, reflect.Map: - return rv.Len() == 0 - case reflect.Ptr: - if rv.IsNil() { - return true - } - // If it's a pointer type, we'll try to deref the pointer - // and evaluate the pointer value for isFalse. - element := rv.Elem() - return isFalse(element.Interface()) - } - return false -} - -// ObjsEqual is a generic object equality check. -// It will take two arbitrary objects and recursively determine -// if they are equal. -func objsEqual(left interface{}, right interface{}) bool { - return reflect.DeepEqual(left, right) -} - -// SliceParam refers to a single part of a slice. -// A slice consists of a start, a stop, and a step, similar to -// python slices. -type sliceParam struct { - N int - Specified bool -} - -// Slice supports [start:stop:step] style slicing that's supported in JMESPath. -func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { - computed, err := computeSliceParams(len(slice), parts) - if err != nil { - return nil, err - } - start, stop, step := computed[0], computed[1], computed[2] - result := []interface{}{} - if step > 0 { - for i := start; i < stop; i += step { - result = append(result, slice[i]) - } - } else { - for i := start; i > stop; i += step { - result = append(result, slice[i]) - } - } - return result, nil -} - -func computeSliceParams(length int, parts []sliceParam) ([]int, error) { - var start, stop, step int - if !parts[2].Specified { - step = 1 - } else if parts[2].N == 0 { - return nil, errors.New("Invalid slice, step cannot be 0") - } else { - step = parts[2].N - } - var stepValueNegative bool - if step < 0 { - stepValueNegative = true - } else { - stepValueNegative = false - } - - if !parts[0].Specified { - if stepValueNegative { - start = length - 1 - } else { - start = 0 - } - } else { - start = capSlice(length, parts[0].N, step) - } - - if !parts[1].Specified { - if stepValueNegative { - stop = -1 - } else { - stop = length - } - } else { - stop = capSlice(length, parts[1].N, step) - } - return []int{start, stop, step}, nil -} - -func capSlice(length int, actual int, step int) int { - if actual < 0 { - actual += length - if actual < 0 { - if step < 0 { - actual = -1 - } else { - actual = 0 - } - } - } else if actual >= length { - if step < 0 { - actual = length - 1 - } else { - actual = length - } - } - return actual -} - -// ToArrayNum converts an empty interface type to a slice of float64. -// If any element in the array cannot be converted, then nil is returned -// along with a second value of false. -func toArrayNum(data interface{}) ([]float64, bool) { - // Is there a better way to do this with reflect? - if d, ok := data.([]interface{}); ok { - result := make([]float64, len(d)) - for i, el := range d { - item, ok := el.(float64) - if !ok { - return nil, false - } - result[i] = item - } - return result, true - } - return nil, false -} - -// ToArrayStr converts an empty interface type to a slice of strings. -// If any element in the array cannot be converted, then nil is returned -// along with a second value of false. If the input data could be entirely -// converted, then the converted data, along with a second value of true, -// will be returned. -func toArrayStr(data interface{}) ([]string, bool) { - // Is there a better way to do this with reflect? - if d, ok := data.([]interface{}); ok { - result := make([]string, len(d)) - for i, el := range d { - item, ok := el.(string) - if !ok { - return nil, false - } - result[i] = item - } - return result, true - } - return nil, false -} - -func isSliceType(v interface{}) bool { - if v == nil { - return false - } - return reflect.TypeOf(v).Kind() == reflect.Slice -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/.travis.yml deleted file mode 100644 index a77773b8..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.1 - - 1.2 - - 1.3 - - tip - -install: - - go get github.com/bugsnag/panicwrap - - go get github.com/bugsnag/osext - - go get github.com/bitly/go-simplejson - - go get github.com/revel/revel diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/LICENSE.txt b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/LICENSE.txt deleted file mode 100644 index 3cb0ec0f..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/LICENSE.txt +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2014 Bugsnag - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/README.md b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/README.md deleted file mode 100644 index b5432293..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/README.md +++ /dev/null @@ -1,489 +0,0 @@ -Bugsnag Notifier for Golang -=========================== - -The Bugsnag Notifier for Golang gives you instant notification of panics, or -unexpected errors, in your golang app. Any unhandled panics will trigger a -notification to be sent to your Bugsnag project. - -[Bugsnag](http://bugsnag.com) captures errors in real-time from your web, -mobile and desktop applications, helping you to understand and resolve them -as fast as possible. [Create a free account](http://bugsnag.com) to start -capturing exceptions from your applications. - -## How to Install - -1. Download the code - - ```shell - go get github.com/bugsnag/bugsnag-go - ``` - -### Using with net/http apps - -For a golang app based on [net/http](https://godoc.org/net/http), integrating -Bugsnag takes two steps. You should also use these instructions if you're using -the [gorilla toolkit](http://www.gorillatoolkit.org/), or the -[pat](https://github.com/bmizerany/pat/) muxer. - -1. Configure bugsnag at the start of your `main()` function: - - ```go - import "github.com/bugsnag/bugsnag-go" - - func main() { - bugsnag.Configure(bugsnag.Configuration{ - APIKey: "YOUR_API_KEY_HERE", - ReleaseStage: "production", - // more configuration options - }) - - // rest of your program. - } - ``` - -2. Wrap your server in a [bugsnag.Handler](https://godoc.org/github.com/bugsnag/bugsnag-go/#Handler) - - ```go - // a. If you're using the builtin http mux, you can just pass - // bugsnag.Handler(nil) to http.ListenAndServer - http.ListenAndServe(":8080", bugsnag.Handler(nil)) - - // b. If you're creating a server manually yourself, you can set - // its handlers the same way - srv := http.Server{ - Handler: bugsnag.Handler(nil) - } - - // c. If you're not using the builtin http mux, wrap your own handler - // (though make sure that it doesn't already catch panics) - http.ListenAndServe(":8080", bugsnag.Handler(handler)) - ``` - -### Using with Revel apps - -There are two steps to get panic handling in [revel](https://revel.github.io) apps. - -1. Add the `bugsnagrevel.Filter` immediately after the `revel.PanicFilter` in `app/init.go`: - - ```go - - import "github.com/bugsnag/bugsnag-go/revel" - - revel.Filters = []revel.Filter{ - revel.PanicFilter, - bugsnagrevel.Filter, - // ... - } - ``` - -2. Set bugsnag.apikey in the top section of `conf/app.conf`. - - ``` - module.static=github.com/revel/revel/modules/static - - bugsnag.apikey=YOUR_API_KEY_HERE - - [dev] - ``` - -### Using with Google App Engine - -1. Configure bugsnag at the start of your `init()` function: - - ```go - import "github.com/bugsnag/bugsnag-go" - - func init() { - bugsnag.Configure(bugsnag.Configuration{ - APIKey: "YOUR_API_KEY_HERE", - }) - - // ... - } - ``` - -2. Wrap *every* http.Handler or http.HandlerFunc with Bugsnag: - - ```go - // a. If you're using HandlerFuncs - http.HandleFunc("/", bugsnag.HandlerFunc( - func (w http.ResponseWriter, r *http.Request) { - // ... - })) - - // b. If you're using Handlers - http.Handle("/", bugsnag.Handler(myHttpHandler)) - ``` - -3. In order to use Bugsnag, you must provide the current -[`appengine.Context`](https://developers.google.com/appengine/docs/go/reference#Context), or -current `*http.Request` as rawData. The easiest way to do this is to create a new notifier. - - ```go - c := appengine.NewContext(r) - notifier := bugsnag.New(c) - - if err != nil { - notifier.Notify(err) - } - - go func () { - defer notifier.Recover() - - // ... - }() - ``` - - -## Notifying Bugsnag manually - -Bugsnag will automatically handle any panics that crash your program and notify -you of them. If you've integrated with `revel` or `net/http`, then you'll also -be notified of any panics() that happen while processing a request. - -Sometimes however it's useful to manually notify Bugsnag of a problem. To do this, -call [`bugsnag.Notify()`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Notify) - -```go -if err != nil { - bugsnag.Notify(err) -} -``` - -### Manual panic handling - -To avoid a panic in a goroutine from crashing your entire app, you can use -[`bugsnag.Recover()`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Recover) -to stop a panic from unwinding the stack any further. When `Recover()` is hit, -it will send any current panic to Bugsnag and then stop panicking. This is -most useful at the start of a goroutine: - -```go -go func() { - defer bugsnag.Recover() - - // ... -}() -``` - -Alternatively you can use -[`bugsnag.AutoNotify()`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Recover) -to notify bugsnag of a panic while letting the program continue to panic. This -is useful if you're using a Framework that already has some handling of panics -and you are retrofitting bugsnag support. - -```go -defer bugsnag.AutoNotify() -``` - -## Sending Custom Data - -Most functions in the Bugsnag API, including `bugsnag.Notify()`, -`bugsnag.Recover()`, `bugsnag.AutoNotify()`, and `bugsnag.Handler()` let you -attach data to the notifications that they send. To do this you pass in rawData, -which can be any of the supported types listed here. To add support for more -types of rawData see [OnBeforeNotify](#custom-data-with-onbeforenotify). - -### Custom MetaData - -Custom metaData appears as tabs on Bugsnag.com. You can set it by passing -a [`bugsnag.MetaData`](https://godoc.org/github.com/bugsnag/bugsnag-go/#MetaData) -object as rawData. - -```go -bugsnag.Notify(err, - bugsnag.MetaData{ - "Account": { - "Name": Account.Name, - "Paying": Account.Plan.Premium, - }, - }) -``` - -### Request data - -Bugsnag can extract interesting data from -[`*http.Request`](https://godoc.org/net/http/#Request) objects, and -[`*revel.Controller`](https://godoc.org/github.com/revel/revel/#Controller) -objects. These are automatically passed in when handling panics, and you can -pass them yourself. - -```go -func (w http.ResponseWriter, r *http.Request) { - bugsnag.Notify(err, r) -} -``` - -### User data - -User data is searchable, and the `Id` powers the count of users affected. You -can set which user an error affects by passing a -[`bugsnag.User`](https://godoc.org/github.com/bugsnag/bugsnag-go/#User) object as -rawData. - -```go -bugsnag.Notify(err, - bugsnag.User{Id: "1234", Name: "Conrad", Email: "me@cirw.in"}) -``` - -### Context - -The context shows up prominently in the list view so that you can get an idea -of where a problem occurred. You can set it by passing a -[`bugsnag.Context`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Context) -object as rawData. - -```go -bugsnag.Notify(err, bugsnag.Context{"backgroundJob"}) -``` - -### Severity - -Bugsnag supports three severities, `SeverityError`, `SeverityWarning`, and `SeverityInfo`. -You can set the severity of an error by passing one of these objects as rawData. - -```go -bugsnag.Notify(err, bugsnag.SeverityInfo) -``` - -## Configuration - -You must call `bugsnag.Configure()` at the start of your program to use Bugsnag, you pass it -a [`bugsnag.Configuration`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Configuration) object -containing any of the following values. - -### APIKey - -The Bugsnag API key can be found on your [Bugsnag dashboard](https://bugsnag.com) under "Settings". - -```go -bugsnag.Configure(bugsnag.Configuration{ - APIKey: "YOUR_API_KEY_HERE", -}) -``` - -### Endpoint - -The Bugsnag endpoint defaults to `https://notify.bugsnag.com/`. If you're using Bugsnag enterprise, -you should set this to the endpoint of your local instance. - -```go -bugsnag.Configure(bugsnag.Configuration{ - Endpoint: "http://bugsnag.internal:49000/", -}) -``` - -### ReleaseStage - -The ReleaseStage tracks where your app is deployed. You should set this to `production`, `staging`, -`development` or similar as appropriate. - -```go -bugsnag.Configure(bugsnag.Configuration{ - ReleaseStage: "development", -}) -``` - -### NotifyReleaseStages - -The list of ReleaseStages to notify in. By default Bugsnag will notify you in all release stages, but -you can use this to silence development errors. - -```go -bugsnag.Configure(bugsnag.Configuration{ - NotifyReleaseStages: []string{"production", "staging"}, -}) -``` - -### AppVersion - -If you use a versioning scheme for deploys of your app, Bugsnag can use the `AppVersion` to only -re-open errors if they occur in later version of the app. - -```go -bugsnag.Configure(bugsnag.Configuration{ - AppVersion: "1.2.3", -}) -``` - -### Hostname - -The hostname is used to track where exceptions are coming from in the Bugsnag dashboard. The -default value is obtained from `os.Hostname()` so you won't often need to change this. - -```go -bugsnag.Configure(bugsnag.Configuration{ - Hostname: "go1", -}) -``` - -### ProjectPackages - -In order to determine where a crash happens Bugsnag needs to know which packages you consider to -be part of your app (as opposed to a library). By default this is set to `[]string{"main*"}`. Strings -are matched to package names using [`filepath.Match`](http://godoc.org/path/filepath#Match). - -```go -bugsnag.Configure(bugsnag.Configuration{ - ProjectPackages: []string{"main", "github.com/domain/myapp/*"}, -} -``` - -### ParamsFilters - -Sometimes sensitive data is accidentally included in Bugsnag MetaData. You can remove it by -setting `ParamsFilters`. Any key in the `MetaData` that includes any string in the filters -will be redacted. The default is `[]string{"password", "secret"}`, which prevents fields like -`password`, `password_confirmation` and `secret_answer` from being sent. - -```go -bugsnag.Configure(bugsnag.Configuration{ - ParamsFilters: []string{"password", "secret"}, -} -``` - -### Logger - -The Logger to write to in case of an error inside Bugsnag. This defaults to the global logger. - -```go -bugsnag.Configure(bugsnag.Configuration{ - Logger: app.Logger, -} -``` - -### PanicHandler - -The first time Bugsnag is configured, it wraps the running program in a panic -handler using [panicwrap](http://godoc.org/github.com/ConradIrwin/panicwrap). This -forks a sub-process which monitors unhandled panics. To prevent this, set -`PanicHandler` to `func() {}` the first time you call -`bugsnag.Configure`. This will prevent bugsnag from being able to notify you about -unhandled panics. - -```go -bugsnag.Configure(bugsnag.Configuration{ - PanicHandler: func() {}, -}) -``` - -### Synchronous - -Bugsnag usually starts a new goroutine before sending notifications. This means -that notifications can be lost if you do a bugsnag.Notify and then immediately -os.Exit. To avoid this problem, set Bugsnag to Synchronous (or just `panic()` -instead ;). - -```go -bugsnag.Configure(bugsnag.Configuration{ - Synchronous: true -}) -``` - -Or just for one error: - -```go -bugsnag.Notify(err, bugsnag.Configuration{Synchronous: true}) -``` - -### Transport - -The transport configures how Bugsnag makes http requests. By default we use -[`http.DefaultTransport`](http://godoc.org/net/http#RoundTripper) which handles -HTTP proxies automatically using the `$HTTP_PROXY` environment variable. - -```go -bugsnag.Configure(bugsnag.Configuration{ - Transport: http.DefaultTransport, -}) -``` - -## Custom data with OnBeforeNotify - -While it's nice that you can pass `MetaData` directly into `bugsnag.Notify`, -`bugsnag.AutoNotify`, and `bugsnag.Recover`, this can be a bit cumbersome and -inefficient — you're constructing the meta-data whether or not it will actually -be used. A better idea is to pass raw data in to these functions, and add an -`OnBeforeNotify` filter that converts them into `MetaData`. - -For example, lets say our system processes jobs: - -```go -type Job struct{ - Retry bool - UserId string - UserEmail string - Name string - Params map[string]string -} -``` - -You can pass a job directly into Bugsnag.notify: - -```go -bugsnag.Notify(err, job) -``` - -And then add a filter to extract information from that job and attach it to the -Bugsnag event: - -```go -bugsnag.OnBeforeNotify( - func(event *bugsnag.Event, config *bugsnag.Configuration) error { - - // Search all the RawData for any *Job pointers that we're passed in - // to bugsnag.Notify() and friends. - for _, datum := range event.RawData { - if job, ok := datum.(*Job); ok { - // don't notify bugsnag about errors in retries - if job.Retry { - return fmt.Errorf("not notifying about retried jobs") - } - - // add the job as a tab on Bugsnag.com - event.MetaData.AddStruct("Job", job) - - // set the user correctly - event.User = &User{Id: job.UserId, Email: job.UserEmail} - } - } - - // continue notifying as normal - return nil - }) -``` - -## Advanced Usage - -If you want to have multiple different configurations around in one program, -you can use `bugsnag.New()` to create multiple independent instances of -Bugsnag. You can use these without calling `bugsnag.Configure()`, but bear in -mind that until you call `bugsnag.Configure()` unhandled panics will not be -sent to bugsnag. - -```go -notifier := bugsnag.New(bugsnag.Configuration{ - APIKey: "YOUR_OTHER_API_KEY", -}) -``` - -In fact any place that lets you pass in `rawData` also allows you to pass in -configuration. For example to send http errors to one bugsnag project, you -could do: - -```go -bugsnag.Handler(nil, bugsnag.Configuration{APIKey: "YOUR_OTHER_API_KEY"}) -``` - -### GroupingHash - -If you need to override Bugsnag's grouping algorithm, you can set the -`GroupingHash` in an `OnBeforeNotify`: - -```go -bugsnag.OnBeforeNotify( - func (event *bugsnag.Event, config *bugsnag.Configuration) error { - event.GroupingHash = calculateGroupingHash(event) - return nil - }) -``` diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/appengine.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/appengine.go deleted file mode 100644 index 73aa2d77..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/appengine.go +++ /dev/null @@ -1,76 +0,0 @@ -// +build appengine - -package bugsnag - -import ( - "appengine" - "appengine/urlfetch" - "appengine/user" - "fmt" - "log" - "net/http" -) - -func defaultPanicHandler() {} - -func init() { - OnBeforeNotify(appengineMiddleware) -} - -func appengineMiddleware(event *Event, config *Configuration) (err error) { - var c appengine.Context - - for _, datum := range event.RawData { - if r, ok := datum.(*http.Request); ok { - c = appengine.NewContext(r) - break - } else if context, ok := datum.(appengine.Context); ok { - c = context - break - } - } - - if c == nil { - return fmt.Errorf("No appengine context given") - } - - // You can only use the builtin http library if you pay for appengine, - // so we use the appengine urlfetch service instead. - config.Transport = &urlfetch.Transport{ - Context: c, - } - - // Anything written to stderr/stdout is discarded, so lets log to the request. - config.Logger = log.New(appengineWriter{c}, config.Logger.Prefix(), config.Logger.Flags()) - - // Set the releaseStage appropriately - if config.ReleaseStage == "" { - if appengine.IsDevAppServer() { - config.ReleaseStage = "development" - } else { - config.ReleaseStage = "production" - } - } - - if event.User == nil { - u := user.Current(c) - if u != nil { - event.User = &User{ - Id: u.ID, - Email: u.Email, - } - } - } - - return nil -} - -// Convert an appengine.Context into an io.Writer so we can create a log.Logger. -type appengineWriter struct { - appengine.Context -} - -func (c appengineWriter) Write(b []byte) (int, error) { - c.Warningf(string(b)) - return len(b), nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/bugsnag.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/bugsnag.go deleted file mode 100644 index acd0fed3..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/bugsnag.go +++ /dev/null @@ -1,131 +0,0 @@ -package bugsnag - -import ( - "github.com/bugsnag/bugsnag-go/errors" - "log" - "net/http" - "os" - "sync" - - // Fixes a bug with SHA-384 intermediate certs on some platforms. - // - https://github.com/bugsnag/bugsnag-go/issues/9 - _ "crypto/sha512" -) - -// The current version of bugsnag-go. -const VERSION = "1.0.2" - -var once sync.Once -var middleware middlewareStack - -// The configuration for the default bugsnag notifier. -var Config Configuration - -var defaultNotifier = Notifier{&Config, nil} - -// Configure Bugsnag. The only required setting is the APIKey, which can be -// obtained by clicking on "Settings" in your Bugsnag dashboard. This function -// is also responsible for installing the global panic handler, so it should be -// called as early as possible in your initialization process. -func Configure(config Configuration) { - Config.update(&config) - once.Do(Config.PanicHandler) -} - -// Notify sends an error to Bugsnag along with the current stack trace. The -// rawData is used to send extra information along with the error. For example -// you can pass the current http.Request to Bugsnag to see information about it -// in the dashboard, or set the severity of the notification. -func Notify(err error, rawData ...interface{}) error { - return defaultNotifier.Notify(errors.New(err, 1), rawData...) -} - -// AutoNotify logs a panic on a goroutine and then repanics. -// It should only be used in places that have existing panic handlers further -// up the stack. See bugsnag.Recover(). The rawData is used to send extra -// information along with any panics that are handled this way. -// Usage: defer bugsnag.AutoNotify() -func AutoNotify(rawData ...interface{}) { - if err := recover(); err != nil { - rawData = defaultNotifier.addDefaultSeverity(rawData, SeverityError) - defaultNotifier.Notify(errors.New(err, 2), rawData...) - panic(err) - } -} - -// Recover logs a panic on a goroutine and then recovers. -// The rawData is used to send extra information along with -// any panics that are handled this way -// Usage: defer bugsnag.Recover() -func Recover(rawData ...interface{}) { - if err := recover(); err != nil { - rawData = defaultNotifier.addDefaultSeverity(rawData, SeverityWarning) - defaultNotifier.Notify(errors.New(err, 2), rawData...) - } -} - -// OnBeforeNotify adds a callback to be run before a notification is sent to -// Bugsnag. It can be used to modify the event or its MetaData. Changes made -// to the configuration are local to notifying about this event. To prevent the -// event from being sent to Bugsnag return an error, this error will be -// returned from bugsnag.Notify() and the event will not be sent. -func OnBeforeNotify(callback func(event *Event, config *Configuration) error) { - middleware.OnBeforeNotify(callback) -} - -// Handler creates an http Handler that notifies Bugsnag any panics that -// happen. It then repanics so that the default http Server panic handler can -// handle the panic too. The rawData is used to send extra information along -// with any panics that are handled this way. -func Handler(h http.Handler, rawData ...interface{}) http.Handler { - notifier := New(rawData...) - if h == nil { - h = http.DefaultServeMux - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer notifier.AutoNotify(r) - h.ServeHTTP(w, r) - }) -} - -// HandlerFunc creates an http HandlerFunc that notifies Bugsnag about any -// panics that happen. It then repanics so that the default http Server panic -// handler can handle the panic too. The rawData is used to send extra -// information along with any panics that are handled this way. If you have -// already wrapped your http server using bugsnag.Handler() you don't also need -// to wrap each HandlerFunc. -func HandlerFunc(h http.HandlerFunc, rawData ...interface{}) http.HandlerFunc { - notifier := New(rawData...) - - return func(w http.ResponseWriter, r *http.Request) { - defer notifier.AutoNotify(r) - h(w, r) - } -} - -func init() { - // Set up builtin middlewarez - OnBeforeNotify(httpRequestMiddleware) - - // Default configuration - Config.update(&Configuration{ - APIKey: "", - Endpoint: "https://notify.bugsnag.com/", - Hostname: "", - AppVersion: "", - ReleaseStage: "", - ParamsFilters: []string{"password", "secret"}, - // * for app-engine - ProjectPackages: []string{"main*"}, - NotifyReleaseStages: nil, - Logger: log.New(os.Stdout, log.Prefix(), log.Flags()), - PanicHandler: defaultPanicHandler, - Transport: http.DefaultTransport, - }) - - hostname, err := os.Hostname() - if err == nil { - Config.Hostname = hostname - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/configuration.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/configuration.go deleted file mode 100644 index 7ff26e56..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/configuration.go +++ /dev/null @@ -1,159 +0,0 @@ -package bugsnag - -import ( - "log" - "net/http" - "path/filepath" - "strings" -) - -// Configuration sets up and customizes communication with the Bugsnag API. -type Configuration struct { - // Your Bugsnag API key, e.g. "c9d60ae4c7e70c4b6c4ebd3e8056d2b8". You can - // find this by clicking Settings on https://bugsnag.com/. - APIKey string - // The Endpoint to notify about crashes. This defaults to - // "https://notify.bugsnag.com/", if you're using Bugsnag Enterprise then - // set it to your internal Bugsnag endpoint. - Endpoint string - - // The current release stage. This defaults to "production" and is used to - // filter errors in the Bugsnag dashboard. - ReleaseStage string - // The currently running version of the app. This is used to filter errors - // in the Bugsnag dasboard. If you set this then Bugsnag will only re-open - // resolved errors if they happen in different app versions. - AppVersion string - // The hostname of the current server. This defaults to the return value of - // os.Hostname() and is graphed in the Bugsnag dashboard. - Hostname string - - // The Release stages to notify in. If you set this then bugsnag-go will - // only send notifications to Bugsnag if the ReleaseStage is listed here. - NotifyReleaseStages []string - - // packages that are part of your app. Bugsnag uses this to determine how - // to group errors and how to display them on your dashboard. You should - // include any packages that are part of your app, and exclude libraries - // and helpers. You can list wildcards here, and they'll be expanded using - // filepath.Glob. The default value is []string{"main*"} - ProjectPackages []string - - // Any meta-data that matches these filters will be marked as [REDACTED] - // before sending a Notification to Bugsnag. It defaults to - // []string{"password", "secret"} so that request parameters like password, - // password_confirmation and auth_secret will not be sent to Bugsnag. - ParamsFilters []string - - // The PanicHandler is used by Bugsnag to catch unhandled panics in your - // application. The default panicHandler uses mitchellh's panicwrap library, - // and you can disable this feature by passing an empty: func() {} - PanicHandler func() - - // The logger that Bugsnag should log to. Uses the same defaults as go's - // builtin logging package. bugsnag-go logs whenever it notifies Bugsnag - // of an error, and when any error occurs inside the library itself. - Logger *log.Logger - // The http Transport to use, defaults to the default http Transport. This - // can be configured if you are in an environment like Google App Engine - // that has stringent conditions on making http requests. - Transport http.RoundTripper - // Whether bugsnag should notify synchronously. This defaults to false which - // causes bugsnag-go to spawn a new goroutine for each notification. - Synchronous bool - // TODO: remember to update the update() function when modifying this struct -} - -func (config *Configuration) update(other *Configuration) *Configuration { - if other.APIKey != "" { - config.APIKey = other.APIKey - } - if other.Endpoint != "" { - config.Endpoint = other.Endpoint - } - if other.Hostname != "" { - config.Hostname = other.Hostname - } - if other.AppVersion != "" { - config.AppVersion = other.AppVersion - } - if other.ReleaseStage != "" { - config.ReleaseStage = other.ReleaseStage - } - if other.ParamsFilters != nil { - config.ParamsFilters = other.ParamsFilters - } - if other.ProjectPackages != nil { - config.ProjectPackages = other.ProjectPackages - } - if other.Logger != nil { - config.Logger = other.Logger - } - if other.NotifyReleaseStages != nil { - config.NotifyReleaseStages = other.NotifyReleaseStages - } - if other.PanicHandler != nil { - config.PanicHandler = other.PanicHandler - } - if other.Transport != nil { - config.Transport = other.Transport - } - if other.Synchronous { - config.Synchronous = true - } - - return config -} - -func (config *Configuration) merge(other *Configuration) *Configuration { - return config.clone().update(other) -} - -func (config *Configuration) clone() *Configuration { - clone := *config - return &clone -} - -func (config *Configuration) isProjectPackage(pkg string) bool { - for _, p := range config.ProjectPackages { - if match, _ := filepath.Match(p, pkg); match { - return true - } - } - return false -} - -func (config *Configuration) stripProjectPackages(file string) string { - for _, p := range config.ProjectPackages { - if len(p) > 2 && p[len(p)-2] == '/' && p[len(p)-1] == '*' { - p = p[:len(p)-1] - } else { - p = p + "/" - } - if strings.HasPrefix(file, p) { - return strings.TrimPrefix(file, p) - } - } - - return file -} - -func (config *Configuration) log(fmt string, args ...interface{}) { - if config != nil && config.Logger != nil { - config.Logger.Printf(fmt, args...) - } else { - log.Printf(fmt, args...) - } -} - -func (config *Configuration) notifyInReleaseStage() bool { - if config.NotifyReleaseStages == nil { - return true - } - for _, r := range config.NotifyReleaseStages { - if r == config.ReleaseStage { - return true - } - } - return false -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/doc.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/doc.go deleted file mode 100644 index 827e03b8..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/doc.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Package bugsnag captures errors in real-time and reports them to Bugsnag (http://bugsnag.com). - -Using bugsnag-go is a three-step process. - -1. As early as possible in your program configure the notifier with your APIKey. This sets up -handling of panics that would otherwise crash your app. - - func init() { - bugsnag.Configure(bugsnag.Configuration{ - APIKey: "YOUR_API_KEY_HERE", - }) - } - -2. Add bugsnag to places that already catch panics. For example you should add it to the HTTP server -when you call ListenAndServer: - - http.ListenAndServe(":8080", bugsnag.Handler(nil)) - -If that's not possible, for example because you're using Google App Engine, you can also wrap each -HTTP handler manually: - - http.HandleFunc("/" bugsnag.HandlerFunc(func (w http.ResponseWriter, r *http.Request) { - ... - }) - -3. To notify Bugsnag of an error that is not a panic, pass it to bugsnag.Notify. This will also -log the error message using the configured Logger. - - if err != nil { - bugsnag.Notify(err) - } - -For detailed integration instructions see https://bugsnag.com/docs/notifiers/go. - -Configuration - -The only required configuration is the Bugsnag API key which can be obtained by clicking "Settings" -on the top of https://bugsnag.com/ after signing up. We also recommend you set the ReleaseStage -and AppVersion if these make sense for your deployment workflow. - -RawData - -If you need to attach extra data to Bugsnag notifications you can do that using -the rawData mechanism. Most of the functions that send errors to Bugsnag allow -you to pass in any number of interface{} values as rawData. The rawData can -consist of the Severity, Context, User or MetaData types listed below, and -there is also builtin support for *http.Requests. - - bugsnag.Notify(err, bugsnag.SeverityError) - -If you want to add custom tabs to your bugsnag dashboard you can pass any value in as rawData, -and then process it into the event's metadata using a bugsnag.OnBeforeNotify() hook. - - bugsnag.Notify(err, account) - - bugsnag.OnBeforeNotify(func (e *bugsnag.Event, c *bugsnag.Configuration) { - for datum := range e.RawData { - if account, ok := datum.(Account); ok { - e.MetaData.Add("account", "name", account.Name) - e.MetaData.Add("account", "url", account.URL) - } - } - }) - -If necessary you can pass Configuration in as rawData, or modify the Configuration object passed -into OnBeforeNotify hooks. Configuration passed in this way only affects the current notification. -*/ -package bugsnag diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/README.md b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/README.md deleted file mode 100644 index 8d8e097a..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/README.md +++ /dev/null @@ -1,6 +0,0 @@ -Adds stacktraces to errors in golang. - -This was made to help build the Bugsnag notifier but can be used standalone if -you like to have stacktraces on errors. - -See [Godoc](https://godoc.org/github.com/bugsnag/bugsnag-go/errors) for the API docs. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/error.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/error.go deleted file mode 100644 index 0081c0a8..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/error.go +++ /dev/null @@ -1,90 +0,0 @@ -// Package errors provides errors that have stack-traces. -package errors - -import ( - "bytes" - "fmt" - "reflect" - "runtime" -) - -// The maximum number of stackframes on any error. -var MaxStackDepth = 50 - -// Error is an error with an attached stacktrace. It can be used -// wherever the builtin error interface is expected. -type Error struct { - Err error - stack []uintptr - frames []StackFrame -} - -// New makes an Error from the given value. If that value is already an -// error then it will be used directly, if not, it will be passed to -// fmt.Errorf("%v"). The skip parameter indicates how far up the stack -// to start the stacktrace. 0 is from the current call, 1 from its caller, etc. -func New(e interface{}, skip int) *Error { - var err error - - switch e := e.(type) { - case *Error: - return e - case error: - err = e - default: - err = fmt.Errorf("%v", e) - } - - stack := make([]uintptr, MaxStackDepth) - length := runtime.Callers(2+skip, stack[:]) - return &Error{ - Err: err, - stack: stack[:length], - } -} - -// Errorf creates a new error with the given message. You can use it -// as a drop-in replacement for fmt.Errorf() to provide descriptive -// errors in return values. -func Errorf(format string, a ...interface{}) *Error { - return New(fmt.Errorf(format, a...), 1) -} - -// Error returns the underlying error's message. -func (err *Error) Error() string { - return err.Err.Error() -} - -// Stack returns the callstack formatted the same way that go does -// in runtime/debug.Stack() -func (err *Error) Stack() []byte { - buf := bytes.Buffer{} - - for _, frame := range err.StackFrames() { - buf.WriteString(frame.String()) - } - - return buf.Bytes() -} - -// StackFrames returns an array of frames containing information about the -// stack. -func (err *Error) StackFrames() []StackFrame { - if err.frames == nil { - err.frames = make([]StackFrame, len(err.stack)) - - for i, pc := range err.stack { - err.frames[i] = NewStackFrame(pc) - } - } - - return err.frames -} - -// TypeName returns the type this error. e.g. *errors.stringError. -func (err *Error) TypeName() string { - if _, ok := err.Err.(uncaughtPanic); ok { - return "panic" - } - return reflect.TypeOf(err.Err).String() -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/parse_panic.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/parse_panic.go deleted file mode 100644 index cc37052d..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/parse_panic.go +++ /dev/null @@ -1,127 +0,0 @@ -package errors - -import ( - "strconv" - "strings" -) - -type uncaughtPanic struct{ message string } - -func (p uncaughtPanic) Error() string { - return p.message -} - -// ParsePanic allows you to get an error object from the output of a go program -// that panicked. This is particularly useful with https://github.com/mitchellh/panicwrap. -func ParsePanic(text string) (*Error, error) { - lines := strings.Split(text, "\n") - - state := "start" - - var message string - var stack []StackFrame - - for i := 0; i < len(lines); i++ { - line := lines[i] - - if state == "start" { - if strings.HasPrefix(line, "panic: ") { - message = strings.TrimPrefix(line, "panic: ") - state = "seek" - } else { - return nil, Errorf("bugsnag.panicParser: Invalid line (no prefix): %s", line) - } - - } else if state == "seek" { - if strings.HasPrefix(line, "goroutine ") && strings.HasSuffix(line, "[running]:") { - state = "parsing" - } - - } else if state == "parsing" { - if line == "" { - state = "done" - break - } - createdBy := false - if strings.HasPrefix(line, "created by ") { - line = strings.TrimPrefix(line, "created by ") - createdBy = true - } - - i++ - - if i >= len(lines) { - return nil, Errorf("bugsnag.panicParser: Invalid line (unpaired): %s", line) - } - - frame, err := parsePanicFrame(line, lines[i], createdBy) - if err != nil { - return nil, err - } - - stack = append(stack, *frame) - if createdBy { - state = "done" - break - } - } - } - - if state == "done" || state == "parsing" { - return &Error{Err: uncaughtPanic{message}, frames: stack}, nil - } - return nil, Errorf("could not parse panic: %v", text) -} - -// The lines we're passing look like this: -// -// main.(*foo).destruct(0xc208067e98) -// /0/go/src/github.com/bugsnag/bugsnag-go/pan/main.go:22 +0x151 -func parsePanicFrame(name string, line string, createdBy bool) (*StackFrame, error) { - idx := strings.LastIndex(name, "(") - if idx == -1 && !createdBy { - return nil, Errorf("bugsnag.panicParser: Invalid line (no call): %s", name) - } - if idx != -1 { - name = name[:idx] - } - pkg := "" - - if lastslash := strings.LastIndex(name, "/"); lastslash >= 0 { - pkg += name[:lastslash] + "/" - name = name[lastslash+1:] - } - if period := strings.Index(name, "."); period >= 0 { - pkg += name[:period] - name = name[period+1:] - } - - name = strings.Replace(name, "·", ".", -1) - - if !strings.HasPrefix(line, "\t") { - return nil, Errorf("bugsnag.panicParser: Invalid line (no tab): %s", line) - } - - idx = strings.LastIndex(line, ":") - if idx == -1 { - return nil, Errorf("bugsnag.panicParser: Invalid line (no line number): %s", line) - } - file := line[1:idx] - - number := line[idx+1:] - if idx = strings.Index(number, " +"); idx > -1 { - number = number[:idx] - } - - lno, err := strconv.ParseInt(number, 10, 32) - if err != nil { - return nil, Errorf("bugsnag.panicParser: Invalid line (bad line number): %s", line) - } - - return &StackFrame{ - File: file, - LineNumber: int(lno), - Package: pkg, - Name: name, - }, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/stackframe.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/stackframe.go deleted file mode 100644 index 4edadbc5..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/errors/stackframe.go +++ /dev/null @@ -1,97 +0,0 @@ -package errors - -import ( - "bytes" - "fmt" - "io/ioutil" - "runtime" - "strings" -) - -// A StackFrame contains all necessary information about to generate a line -// in a callstack. -type StackFrame struct { - File string - LineNumber int - Name string - Package string - ProgramCounter uintptr -} - -// NewStackFrame popoulates a stack frame object from the program counter. -func NewStackFrame(pc uintptr) (frame StackFrame) { - - frame = StackFrame{ProgramCounter: pc} - if frame.Func() == nil { - return - } - frame.Package, frame.Name = packageAndName(frame.Func()) - - // pc -1 because the program counters we use are usually return addresses, - // and we want to show the line that corresponds to the function call - frame.File, frame.LineNumber = frame.Func().FileLine(pc - 1) - return - -} - -// Func returns the function that this stackframe corresponds to -func (frame *StackFrame) Func() *runtime.Func { - if frame.ProgramCounter == 0 { - return nil - } - return runtime.FuncForPC(frame.ProgramCounter) -} - -// String returns the stackframe formatted in the same way as go does -// in runtime/debug.Stack() -func (frame *StackFrame) String() string { - str := fmt.Sprintf("%s:%d (0x%x)\n", frame.File, frame.LineNumber, frame.ProgramCounter) - - source, err := frame.SourceLine() - if err != nil { - return str - } - - return str + fmt.Sprintf("\t%s: %s\n", frame.Name, source) -} - -// SourceLine gets the line of code (from File and Line) of the original source if possible -func (frame *StackFrame) SourceLine() (string, error) { - data, err := ioutil.ReadFile(frame.File) - - if err != nil { - return "", err - } - - lines := bytes.Split(data, []byte{'\n'}) - if frame.LineNumber <= 0 || frame.LineNumber >= len(lines) { - return "???", nil - } - // -1 because line-numbers are 1 based, but our array is 0 based - return string(bytes.Trim(lines[frame.LineNumber-1], " \t")), nil -} - -func packageAndName(fn *runtime.Func) (string, string) { - name := fn.Name() - pkg := "" - - // The name includes the path name to the package, which is unnecessary - // since the file name is already included. Plus, it has center dots. - // That is, we see - // runtime/debug.*T·ptrmethod - // and want - // *T.ptrmethod - // Since the package path might contains dots (e.g. code.google.com/...), - // we first remove the path prefix if there is one. - if lastslash := strings.LastIndex(name, "/"); lastslash >= 0 { - pkg += name[:lastslash] + "/" - name = name[lastslash+1:] - } - if period := strings.Index(name, "."); period >= 0 { - pkg += name[:period] - name = name[period+1:] - } - - name = strings.Replace(name, "·", ".", -1) - return pkg, name -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/event.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/event.go deleted file mode 100644 index 1586ef3f..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/event.go +++ /dev/null @@ -1,134 +0,0 @@ -package bugsnag - -import ( - "strings" - - "github.com/bugsnag/bugsnag-go/errors" -) - -// Context is the context of the error in Bugsnag. -// This can be passed to Notify, Recover or AutoNotify as rawData. -type Context struct { - String string -} - -// User represents the searchable user-data on Bugsnag. The Id is also used -// to determine the number of users affected by a bug. This can be -// passed to Notify, Recover or AutoNotify as rawData. -type User struct { - Id string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - Email string `json:"email,omitempty"` -} - -// Sets the severity of the error on Bugsnag. These values can be -// passed to Notify, Recover or AutoNotify as rawData. -var ( - SeverityError = severity{"error"} - SeverityWarning = severity{"warning"} - SeverityInfo = severity{"info"} -) - -// The severity tag type, private so that people can only use Error,Warning,Info -type severity struct { - String string -} - -// The form of stacktrace that Bugsnag expects -type stackFrame struct { - Method string `json:"method"` - File string `json:"file"` - LineNumber int `json:"lineNumber"` - InProject bool `json:"inProject,omitempty"` -} - -// Event represents a payload of data that gets sent to Bugsnag. -// This is passed to each OnBeforeNotify hook. -type Event struct { - - // The original error that caused this event, not sent to Bugsnag. - Error *errors.Error - - // The rawData affecting this error, not sent to Bugsnag. - RawData []interface{} - - // The error class to be sent to Bugsnag. This defaults to the type name of the Error, for - // example *error.String - ErrorClass string - // The error message to be sent to Bugsnag. This defaults to the return value of Error.Error() - Message string - // The stacktrrace of the error to be sent to Bugsnag. - Stacktrace []stackFrame - - // The context to be sent to Bugsnag. This should be set to the part of the app that was running, - // e.g. for http requests, set it to the path. - Context string - // The severity of the error. Can be SeverityError, SeverityWarning or SeverityInfo. - Severity severity - // The grouping hash is used to override Bugsnag's grouping. Set this if you'd like all errors with - // the same grouping hash to group together in the dashboard. - GroupingHash string - - // User data to send to Bugsnag. This is searchable on the dashboard. - User *User - // Other MetaData to send to Bugsnag. Appears as a set of tabbed tables in the dashboard. - MetaData MetaData -} - -func newEvent(err *errors.Error, rawData []interface{}, notifier *Notifier) (*Event, *Configuration) { - - config := notifier.Config - event := &Event{ - Error: err, - RawData: append(notifier.RawData, rawData...), - - ErrorClass: err.TypeName(), - Message: err.Error(), - Stacktrace: make([]stackFrame, len(err.StackFrames())), - - Severity: SeverityWarning, - - MetaData: make(MetaData), - } - - for _, datum := range event.RawData { - switch datum := datum.(type) { - case severity: - event.Severity = datum - - case Context: - event.Context = datum.String - - case Configuration: - config = config.merge(&datum) - - case MetaData: - event.MetaData.Update(datum) - - case User: - event.User = &datum - } - } - - for i, frame := range err.StackFrames() { - file := frame.File - inProject := config.isProjectPackage(frame.Package) - - // remove $GOROOT and $GOHOME from other frames - if idx := strings.Index(file, frame.Package); idx > -1 { - file = file[idx:] - } - if inProject { - file = config.stripProjectPackages(file) - } - - event.Stacktrace[i] = stackFrame{ - Method: frame.Name, - File: file, - LineNumber: frame.LineNumber, - InProject: inProject, - } - } - - return event, config -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/json_tags.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/json_tags.go deleted file mode 100644 index 45be38fa..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/json_tags.go +++ /dev/null @@ -1,43 +0,0 @@ -// The code is stripped from: -// http://golang.org/src/pkg/encoding/json/tags.go?m=text - -package bugsnag - -import ( - "strings" -) - -// tagOptions is the string following a comma in a struct field's "json" -// tag, or the empty string. It does not include the leading comma. -type tagOptions string - -// parseTag splits a struct field's json tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// Contains reports whether a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/metadata.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/metadata.go deleted file mode 100644 index ffe64e21..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/metadata.go +++ /dev/null @@ -1,185 +0,0 @@ -package bugsnag - -import ( - "fmt" - "reflect" - "strings" -) - -// MetaData is added to the Bugsnag dashboard in tabs. Each tab is -// a map of strings -> values. You can pass MetaData to Notify, Recover -// and AutoNotify as rawData. -type MetaData map[string]map[string]interface{} - -// Update the meta-data with more information. Tabs are merged together such -// that unique keys from both sides are preserved, and duplicate keys end up -// with the provided values. -func (meta MetaData) Update(other MetaData) { - for name, tab := range other { - - if meta[name] == nil { - meta[name] = make(map[string]interface{}) - } - - for key, value := range tab { - meta[name][key] = value - } - } -} - -// Add creates a tab of Bugsnag meta-data. -// If the tab doesn't yet exist it will be created. -// If the key already exists, it will be overwritten. -func (meta MetaData) Add(tab string, key string, value interface{}) { - if meta[tab] == nil { - meta[tab] = make(map[string]interface{}) - } - - meta[tab][key] = value -} - -// AddStruct creates a tab of Bugsnag meta-data. -// The struct will be converted to an Object using the -// reflect library so any private fields will not be exported. -// As a safety measure, if you pass a non-struct the value will be -// sent to Bugsnag under the "Extra data" tab. -func (meta MetaData) AddStruct(tab string, obj interface{}) { - val := sanitizer{}.Sanitize(obj) - content, ok := val.(map[string]interface{}) - if ok { - meta[tab] = content - } else { - // Wasn't a struct - meta.Add("Extra data", tab, obj) - } - -} - -// Remove any values from meta-data that have keys matching the filters, -// and any that are recursive data-structures -func (meta MetaData) sanitize(filters []string) interface{} { - return sanitizer{ - Filters: filters, - Seen: make([]interface{}, 0), - }.Sanitize(meta) - -} - -// The sanitizer is used to remove filtered params and recursion from meta-data. -type sanitizer struct { - Filters []string - Seen []interface{} -} - -func (s sanitizer) Sanitize(data interface{}) interface{} { - for _, s := range s.Seen { - // TODO: we don't need deep equal here, just type-ignoring equality - if reflect.DeepEqual(data, s) { - return "[RECURSION]" - } - } - - // Sanitizers are passed by value, so we can modify s and it only affects - // s.Seen for nested calls. - s.Seen = append(s.Seen, data) - - t := reflect.TypeOf(data) - v := reflect.ValueOf(data) - - switch t.Kind() { - case reflect.Bool, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, - reflect.Float32, reflect.Float64: - return data - - case reflect.String: - return data - - case reflect.Interface, reflect.Ptr: - return s.Sanitize(v.Elem().Interface()) - - case reflect.Array, reflect.Slice: - ret := make([]interface{}, v.Len()) - for i := 0; i < v.Len(); i++ { - ret[i] = s.Sanitize(v.Index(i).Interface()) - } - return ret - - case reflect.Map: - return s.sanitizeMap(v) - - case reflect.Struct: - return s.sanitizeStruct(v, t) - - // Things JSON can't serialize: - // case t.Chan, t.Func, reflect.Complex64, reflect.Complex128, reflect.UnsafePointer: - default: - return "[" + t.String() + "]" - - } - -} - -func (s sanitizer) sanitizeMap(v reflect.Value) interface{} { - ret := make(map[string]interface{}) - - for _, key := range v.MapKeys() { - val := s.Sanitize(v.MapIndex(key).Interface()) - newKey := fmt.Sprintf("%v", key.Interface()) - - if s.shouldRedact(newKey) { - val = "[REDACTED]" - } - - ret[newKey] = val - } - - return ret -} - -func (s sanitizer) sanitizeStruct(v reflect.Value, t reflect.Type) interface{} { - ret := make(map[string]interface{}) - - for i := 0; i < v.NumField(); i++ { - - val := v.Field(i) - // Don't export private fields - if !val.CanInterface() { - continue - } - - name := t.Field(i).Name - var opts tagOptions - - // Parse JSON tags. Supports name and "omitempty" - if jsonTag := t.Field(i).Tag.Get("json"); len(jsonTag) != 0 { - name, opts = parseTag(jsonTag) - } - - if s.shouldRedact(name) { - ret[name] = "[REDACTED]" - } else { - sanitized := s.Sanitize(val.Interface()) - if str, ok := sanitized.(string); ok { - if !(opts.Contains("omitempty") && len(str) == 0) { - ret[name] = str - } - } else { - ret[name] = sanitized - } - - } - } - - return ret -} - -func (s sanitizer) shouldRedact(key string) bool { - for _, filter := range s.Filters { - if strings.Contains(strings.ToLower(filter), strings.ToLower(key)) { - return true - } - } - return false -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/middleware.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/middleware.go deleted file mode 100644 index 266d5e46..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/middleware.go +++ /dev/null @@ -1,96 +0,0 @@ -package bugsnag - -import ( - "net/http" - "strings" -) - -type ( - beforeFunc func(*Event, *Configuration) error - - // MiddlewareStacks keep middleware in the correct order. They are - // called in reverse order, so if you add a new middleware it will - // be called before all existing middleware. - middlewareStack struct { - before []beforeFunc - } -) - -// AddMiddleware adds a new middleware to the outside of the existing ones, -// when the middlewareStack is Run it will be run before all middleware that -// have been added before. -func (stack *middlewareStack) OnBeforeNotify(middleware beforeFunc) { - stack.before = append(stack.before, middleware) -} - -// Run causes all the middleware to be run. If they all permit it the next callback -// will be called with all the middleware on the stack. -func (stack *middlewareStack) Run(event *Event, config *Configuration, next func() error) error { - // run all the before filters in reverse order - for i := range stack.before { - before := stack.before[len(stack.before)-i-1] - - err := stack.runBeforeFilter(before, event, config) - if err != nil { - return err - } - } - - return next() -} - -func (stack *middlewareStack) runBeforeFilter(f beforeFunc, event *Event, config *Configuration) error { - defer func() { - if err := recover(); err != nil { - config.log("bugsnag/middleware: unexpected panic: %v", err) - } - }() - - return f(event, config) -} - -// catchMiddlewarePanic is used to log any panics that happen inside Middleware, -// we wouldn't want to not notify Bugsnag in this case. -func catchMiddlewarePanic(event *Event, config *Configuration, next func() error) { -} - -// httpRequestMiddleware is added OnBeforeNotify by default. It takes information -// from an http.Request passed in as rawData, and adds it to the Event. You can -// use this as a template for writing your own Middleware. -func httpRequestMiddleware(event *Event, config *Configuration) error { - for _, datum := range event.RawData { - if request, ok := datum.(*http.Request); ok { - proto := "http://" - if request.TLS != nil { - proto = "https://" - } - - event.MetaData.Update(MetaData{ - "Request": { - "RemoteAddr": request.RemoteAddr, - "Method": request.Method, - "Url": proto + request.Host + request.RequestURI, - "Params": request.URL.Query(), - }, - }) - - // Add headers as a separate tab. - event.MetaData.AddStruct("Headers", request.Header) - - // Default context to Path - if event.Context == "" { - event.Context = request.URL.Path - } - - // Default user.id to IP so that users-affected works. - if event.User == nil { - ip := request.RemoteAddr - if idx := strings.LastIndex(ip, ":"); idx != -1 { - ip = ip[:idx] - } - event.User = &User{Id: ip} - } - } - } - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/notifier.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/notifier.go deleted file mode 100644 index 6b108178..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/notifier.go +++ /dev/null @@ -1,95 +0,0 @@ -package bugsnag - -import ( - "fmt" - - "github.com/bugsnag/bugsnag-go/errors" -) - -// Notifier sends errors to Bugsnag. -type Notifier struct { - Config *Configuration - RawData []interface{} -} - -// New creates a new notifier. -// You can pass an instance of bugsnag.Configuration in rawData to change the configuration. -// Other values of rawData will be passed to Notify. -func New(rawData ...interface{}) *Notifier { - config := Config.clone() - for i, datum := range rawData { - if c, ok := datum.(Configuration); ok { - config.update(&c) - rawData[i] = nil - } - } - - return &Notifier{ - Config: config, - RawData: rawData, - } -} - -// Notify sends an error to Bugsnag. Any rawData you pass here will be sent to -// Bugsnag after being converted to JSON. e.g. bugsnag.SeverityError, bugsnag.Context, -// or bugsnag.MetaData. -func (notifier *Notifier) Notify(err error, rawData ...interface{}) (e error) { - event, config := newEvent(errors.New(err, 1), rawData, notifier) - - // Never block, start throwing away errors if we have too many. - e = middleware.Run(event, config, func() error { - config.log("notifying bugsnag: %s", event.Message) - if config.notifyInReleaseStage() { - if config.Synchronous { - return (&payload{event, config}).deliver() - } - go (&payload{event, config}).deliver() - return nil - } - return fmt.Errorf("not notifying in %s", config.ReleaseStage) - }) - - if e != nil { - config.log("bugsnag.Notify: %v", e) - } - return e -} - -// AutoNotify notifies Bugsnag of any panics, then repanics. -// It sends along any rawData that gets passed in. -// Usage: defer AutoNotify() -func (notifier *Notifier) AutoNotify(rawData ...interface{}) { - if err := recover(); err != nil { - rawData = notifier.addDefaultSeverity(rawData, SeverityError) - notifier.Notify(errors.New(err, 2), rawData...) - panic(err) - } -} - -// Recover logs any panics, then recovers. -// It sends along any rawData that gets passed in. -// Usage: defer Recover() -func (notifier *Notifier) Recover(rawData ...interface{}) { - if err := recover(); err != nil { - rawData = notifier.addDefaultSeverity(rawData, SeverityWarning) - notifier.Notify(errors.New(err, 2), rawData...) - } -} - -func (notifier *Notifier) dontPanic() { - if err := recover(); err != nil { - notifier.Config.log("bugsnag/notifier.Notify: panic! %s", err) - } -} - -// Add a severity to raw data only if the default is not set. -func (notifier *Notifier) addDefaultSeverity(rawData []interface{}, s severity) []interface{} { - - for _, datum := range append(notifier.RawData, rawData...) { - if _, ok := datum.(severity); ok { - return rawData - } - } - - return append(rawData, s) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/panicwrap.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/panicwrap.go deleted file mode 100644 index 14fb9fa8..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/panicwrap.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !appengine - -package bugsnag - -import ( - "github.com/bugsnag/panicwrap" - "github.com/bugsnag/bugsnag-go/errors" -) - -// NOTE: this function does not return when you call it, instead it -// re-exec()s the current process with panic monitoring. -func defaultPanicHandler() { - defer defaultNotifier.dontPanic() - - err := panicwrap.BasicMonitor(func(output string) { - toNotify, err := errors.ParsePanic(output) - - if err != nil { - defaultNotifier.Config.log("bugsnag.handleUncaughtPanic: %v", err) - } - Notify(toNotify, SeverityError, Configuration{Synchronous: true}) - }) - - if err != nil { - defaultNotifier.Config.log("bugsnag.handleUncaughtPanic: %v", err) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/payload.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/payload.go deleted file mode 100644 index a516a5d2..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/bugsnag-go/payload.go +++ /dev/null @@ -1,96 +0,0 @@ -package bugsnag - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" -) - -type payload struct { - *Event - *Configuration -} - -type hash map[string]interface{} - -func (p *payload) deliver() error { - - if len(p.APIKey) != 32 { - return fmt.Errorf("bugsnag/payload.deliver: invalid api key") - } - - buf, err := json.Marshal(p) - - if err != nil { - return fmt.Errorf("bugsnag/payload.deliver: %v", err) - } - - client := http.Client{ - Transport: p.Transport, - } - - resp, err := client.Post(p.Endpoint, "application/json", bytes.NewBuffer(buf)) - - if err != nil { - return fmt.Errorf("bugsnag/payload.deliver: %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - return fmt.Errorf("bugsnag/payload.deliver: Got HTTP %s\n", resp.Status) - } - - return nil -} - -func (p *payload) MarshalJSON() ([]byte, error) { - - data := hash{ - "apiKey": p.APIKey, - - "notifier": hash{ - "name": "Bugsnag Go", - "url": "https://github.com/bugsnag/bugsnag-go", - "version": VERSION, - }, - - "events": []hash{ - { - "payloadVersion": "2", - "exceptions": []hash{ - { - "errorClass": p.ErrorClass, - "message": p.Message, - "stacktrace": p.Stacktrace, - }, - }, - "severity": p.Severity.String, - "app": hash{ - "releaseStage": p.ReleaseStage, - }, - "user": p.User, - "metaData": p.MetaData.sanitize(p.ParamsFilters), - }, - }, - } - - event := data["events"].([]hash)[0] - - if p.Context != "" { - event["context"] = p.Context - } - if p.GroupingHash != "" { - event["groupingHash"] = p.GroupingHash - } - if p.Hostname != "" { - event["device"] = hash{ - "hostname": p.Hostname, - } - } - if p.AppVersion != "" { - event["app"].(hash)["version"] = p.AppVersion - } - return json.Marshal(data) - -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/LICENSE deleted file mode 100644 index 18527a28..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2012 Daniel Theophanes - -This software is provided 'as-is', without any express or implied -warranty. In no event will the authors be held liable for any damages -arising from the use of this software. - -Permission is granted to anyone to use this software for any purpose, -including commercial applications, and to alter it and redistribute it -freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - - 3. This notice may not be removed or altered from any source - distribution. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext.go deleted file mode 100644 index 37efbb22..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Extensions to the standard "os" package. -package osext - -import "path/filepath" - -// Executable returns an absolute path that can be used to -// re-invoke the current program. -// It may not be valid after the current program exits. -func Executable() (string, error) { - p, err := executable() - return filepath.Clean(p), err -} - -// Returns same path as Executable, returns just the folder -// path. Excludes the executable name. -func ExecutableFolder() (string, error) { - p, err := Executable() - if err != nil { - return "", err - } - folder, _ := filepath.Split(p) - return folder, nil -} - -// Depricated. Same as Executable(). -func GetExePath() (exePath string, err error) { - return Executable() -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_plan9.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_plan9.go deleted file mode 100644 index e88c1e09..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_plan9.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package osext - -import "syscall" - -func executable() (string, error) { - f, err := Open("/proc/" + itoa(Getpid()) + "/text") - if err != nil { - return "", err - } - defer f.Close() - return syscall.Fd2path(int(f.Fd())) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_procfs.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_procfs.go deleted file mode 100644 index 546fec91..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_procfs.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux netbsd openbsd - -package osext - -import ( - "errors" - "os" - "runtime" -) - -func executable() (string, error) { - switch runtime.GOOS { - case "linux": - return os.Readlink("/proc/self/exe") - case "netbsd": - return os.Readlink("/proc/curproc/exe") - case "openbsd": - return os.Readlink("/proc/curproc/file") - } - return "", errors.New("ExecPath not implemented for " + runtime.GOOS) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_sysctl.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_sysctl.go deleted file mode 100644 index d7646462..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_sysctl.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin freebsd - -package osext - -import ( - "os" - "runtime" - "syscall" - "unsafe" -) - -var startUpcwd, getwdError = os.Getwd() - -func executable() (string, error) { - var mib [4]int32 - switch runtime.GOOS { - case "freebsd": - mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1} - case "darwin": - mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1} - } - - n := uintptr(0) - // get length - _, _, err := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0) - if err != 0 { - return "", err - } - if n == 0 { // shouldn't happen - return "", nil - } - buf := make([]byte, n) - _, _, err = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0) - if err != 0 { - return "", err - } - if n == 0 { // shouldn't happen - return "", nil - } - for i, v := range buf { - if v == 0 { - buf = buf[:i] - break - } - } - if buf[0] != '/' { - if getwdError != nil { - return string(buf), getwdError - } else { - if buf[0] == '.' { - buf = buf[1:] - } - if startUpcwd[len(startUpcwd)-1] != '/' { - return startUpcwd + "/" + string(buf), nil - } - return startUpcwd + string(buf), nil - } - } - return string(buf), nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_windows.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_windows.go deleted file mode 100644 index 72d282cf..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/osext/osext_windows.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package osext - -import ( - "syscall" - "unicode/utf16" - "unsafe" -) - -var ( - kernel = syscall.MustLoadDLL("kernel32.dll") - getModuleFileNameProc = kernel.MustFindProc("GetModuleFileNameW") -) - -// GetModuleFileName() with hModule = NULL -func executable() (exePath string, err error) { - return getModuleFileName() -} - -func getModuleFileName() (string, error) { - var n uint32 - b := make([]uint16, syscall.MAX_PATH) - size := uint32(len(b)) - - r0, _, e1 := getModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size)) - n = uint32(r0) - if n == 0 { - return "", e1 - } - return string(utf16.Decode(b[0:n])), nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/LICENSE deleted file mode 100644 index f9c841a5..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/README.md b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/README.md deleted file mode 100644 index d0a59675..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/README.md +++ /dev/null @@ -1,101 +0,0 @@ -# panicwrap - -panicwrap is a Go library that re-executes a Go binary and monitors stderr -output from the binary for a panic. When it find a panic, it executes a -user-defined handler function. Stdout, stderr, stdin, signals, and exit -codes continue to work as normal, making the existence of panicwrap mostly -invisble to the end user until a panic actually occurs. - -Since a panic is truly a bug in the program meant to crash the runtime, -globally catching panics within Go applications is not supposed to be possible. -Despite this, it is often useful to have a way to know when panics occur. -panicwrap allows you to do something with these panics, such as writing them -to a file, so that you can track when panics occur. - -panicwrap is ***not a panic recovery system***. Panics indicate serious -problems with your application and _should_ crash the runtime. panicwrap -is just meant as a way to monitor for panics. If you still think this is -the worst idea ever, read the section below on why. - -## Features - -* **SIMPLE!** -* Works with all Go applications on all platforms Go supports -* Custom behavior when a panic occurs -* Stdout, stderr, stdin, exit codes, and signals continue to work as - expected. - -## Usage - -Using panicwrap is simple. It behaves a lot like `fork`, if you know -how that works. A basic example is shown below. - -Because it would be sad to panic while capturing a panic, it is recommended -that the handler functions for panicwrap remain relatively simple and well -tested. panicwrap itself contains many tests. - -```go -package main - -import ( - "fmt" - "github.com/mitchellh/panicwrap" - "os" -) - -func main() { - exitStatus, err := panicwrap.BasicWrap(panicHandler) - if err != nil { - // Something went wrong setting up the panic wrapper. Unlikely, - // but possible. - panic(err) - } - - // If exitStatus >= 0, then we're the parent process and the panicwrap - // re-executed ourselves and completed. Just exit with the proper status. - if exitStatus >= 0 { - os.Exit(exitStatus) - } - - // Otherwise, exitStatus < 0 means we're the child. Continue executing as - // normal... - - // Let's say we panic - panic("oh shucks") -} - -func panicHandler(output string) { - // output contains the full output (including stack traces) of the - // panic. Put it in a file or something. - fmt.Printf("The child panicked:\n\n%s\n", output) - os.Exit(1) -} -``` - -## How Does it Work? - -panicwrap works by re-executing the running program (retaining arguments, -environmental variables, etc.) and monitoring the stderr of the program. -Since Go always outputs panics in a predictable way with a predictable -exit code, panicwrap is able to reliably detect panics and allow the parent -process to handle them. - -## WHY?! Panics should CRASH! - -Yes, panics _should_ crash. They are 100% always indicative of bugs. -However, in some cases, such as user-facing programs (programs like -[Packer](http://github.com/mitchellh/packer) or -[Docker](http://github.com/dotcloud/docker)), it is up to the user to -report such panics. This is unreliable, at best, and it would be better if the -program could have a way to automatically report panics. panicwrap provides -a way to do this. - -For backend applications, it is easier to detect crashes (since the application -exits). However, it is still nice sometimes to more intelligently log -panics in some way. For example, at [HashiCorp](http://www.hashicorp.com), -we use panicwrap to log panics to timestamped files with some additional -data (configuration settings at the time, environmental variables, etc.) - -The goal of panicwrap is _not_ to hide panics. It is instead to provide -a clean mechanism for handling them before bubbling the up to the user -and ultimately crashing. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/dup2.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/dup2.go deleted file mode 100644 index de523c83..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/dup2.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build darwin dragonfly freebsd linux,!arm64 netbsd openbsd - -package panicwrap - -import ( - "syscall" -) - -func dup2(oldfd, newfd int) error { - return syscall.Dup2(oldfd, newfd) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/dup3.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/dup3.go deleted file mode 100644 index 9721b36c..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/dup3.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build linux,arm64 - -package panicwrap - -import ( - "syscall" -) - -func dup2(oldfd, newfd int) error { - return syscall.Dup3(oldfd, newfd, 0) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/monitor.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/monitor.go deleted file mode 100644 index 72b418a2..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/monitor.go +++ /dev/null @@ -1,62 +0,0 @@ -// +build !windows - -package panicwrap - -import ( - "github.com/bugsnag/osext" - "os" - "os/exec" -) - -func monitor(c *WrapConfig) (int, error) { - - // If we're the child process, absorb panics. - if Wrapped(c) { - panicCh := make(chan string) - - go trackPanic(os.Stdin, os.Stderr, c.DetectDuration, panicCh) - - // Wait on the panic data - panicTxt := <-panicCh - if panicTxt != "" { - if !c.HidePanic { - os.Stderr.Write([]byte(panicTxt)) - } - - c.Handler(panicTxt) - } - - os.Exit(0) - } - - exePath, err := osext.Executable() - if err != nil { - return -1, err - } - cmd := exec.Command(exePath, os.Args[1:]...) - - read, write, err := os.Pipe() - if err != nil { - return -1, err - } - - cmd.Stdin = read - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.Env = append(os.Environ(), c.CookieKey+"="+c.CookieValue) - - if err != nil { - return -1, err - } - err = cmd.Start() - if err != nil { - return -1, err - } - - err = dup2(int(write.Fd()), int(os.Stderr.Fd())) - if err != nil { - return -1, err - } - - return -1, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/monitor_windows.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/monitor_windows.go deleted file mode 100644 index d07a6921..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/monitor_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package panicwrap - -import "fmt" - -func monitor(c *WrapConfig) (int, error) { - return -1, fmt.Errorf("Monitor is not supported on windows") -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/panicwrap.go b/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/panicwrap.go deleted file mode 100644 index f9ea3e3e..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/bugsnag/panicwrap/panicwrap.go +++ /dev/null @@ -1,339 +0,0 @@ -// The panicwrap package provides functions for capturing and handling -// panics in your application. It does this by re-executing the running -// application and monitoring stderr for any panics. At the same time, -// stdout/stderr/etc. are set to the same values so that data is shuttled -// through properly, making the existence of panicwrap mostly transparent. -// -// Panics are only detected when the subprocess exits with a non-zero -// exit status, since this is the only time panics are real. Otherwise, -// "panic-like" output is ignored. -package panicwrap - -import ( - "bytes" - "errors" - "github.com/bugsnag/osext" - "io" - "os" - "os/exec" - "os/signal" - "runtime" - "syscall" - "time" -) - -const ( - DEFAULT_COOKIE_KEY = "cccf35992f8f3cd8d1d28f0109dd953e26664531" - DEFAULT_COOKIE_VAL = "7c28215aca87789f95b406b8dd91aa5198406750" -) - -// HandlerFunc is the type called when a panic is detected. -type HandlerFunc func(string) - -// WrapConfig is the configuration for panicwrap when wrapping an existing -// binary. To get started, in general, you only need the BasicWrap function -// that will set this up for you. However, for more customizability, -// WrapConfig and Wrap can be used. -type WrapConfig struct { - // Handler is the function called when a panic occurs. - Handler HandlerFunc - - // The cookie key and value are used within environmental variables - // to tell the child process that it is already executing so that - // wrap doesn't re-wrap itself. - CookieKey string - CookieValue string - - // If true, the panic will not be mirrored to the configured writer - // and will instead ONLY go to the handler. This lets you effectively - // hide panics from the end user. This is not recommended because if - // your handler fails, the panic is effectively lost. - HidePanic bool - - // If true, panicwrap will boot a monitor sub-process and let the parent - // run the app. This mode is useful for processes run under supervisors - // like runit as signals get sent to the correct codebase. This is not - // supported when GOOS=windows, and ignores c.Stderr and c.Stdout. - Monitor bool - - // The amount of time that a process must exit within after detecting - // a panic header for panicwrap to assume it is a panic. Defaults to - // 300 milliseconds. - DetectDuration time.Duration - - // The writer to send the stderr to. If this is nil, then it defaults - // to os.Stderr. - Writer io.Writer - - // The writer to send stdout to. If this is nil, then it defaults to - // os.Stdout. - Stdout io.Writer -} - -// BasicWrap calls Wrap with the given handler function, using defaults -// for everything else. See Wrap and WrapConfig for more information on -// functionality and return values. -func BasicWrap(f HandlerFunc) (int, error) { - return Wrap(&WrapConfig{ - Handler: f, - }) -} - -// BasicMonitor calls Wrap with Monitor set to true on supported platforms. -// It forks your program and runs it again form the start. In one process -// BasicMonitor never returns, it just listens on stderr of the other process, -// and calls your handler when a panic is seen. In the other it either returns -// nil to indicate that the panic monitoring is enabled, or an error to indicate -// that something else went wrong. -func BasicMonitor(f HandlerFunc) error { - exitStatus, err := Wrap(&WrapConfig{ - Handler: f, - Monitor: runtime.GOOS != "windows", - }) - - if err != nil { - return err - } - - if exitStatus >= 0 { - os.Exit(exitStatus) - } - - return nil -} - -// Wrap wraps the current executable in a handler to catch panics. It -// returns an error if there was an error during the wrapping process. -// If the error is nil, then the int result indicates the exit status of the -// child process. If the exit status is -1, then this is the child process, -// and execution should continue as normal. Otherwise, this is the parent -// process and the child successfully ran already, and you should exit the -// process with the returned exit status. -// -// This function should be called very very early in your program's execution. -// Ideally, this runs as the first line of code of main. -// -// Once this is called, the given WrapConfig shouldn't be modified or used -// any further. -func Wrap(c *WrapConfig) (int, error) { - if c.Handler == nil { - return -1, errors.New("Handler must be set") - } - - if c.DetectDuration == 0 { - c.DetectDuration = 300 * time.Millisecond - } - - if c.Writer == nil { - c.Writer = os.Stderr - } - - if c.Monitor { - return monitor(c) - } else { - return wrap(c) - } -} - -func wrap(c *WrapConfig) (int, error) { - - // If we're already wrapped, exit out. - if Wrapped(c) { - return -1, nil - } - - // Get the path to our current executable - exePath, err := osext.Executable() - if err != nil { - return -1, err - } - - // Pipe the stderr so we can read all the data as we look for panics - stderr_r, stderr_w := io.Pipe() - - // doneCh is closed when we're done, signaling any other goroutines - // to end immediately. - doneCh := make(chan struct{}) - - // panicCh is the channel on which the panic text will actually be - // sent. - panicCh := make(chan string) - - // On close, make sure to finish off the copying of data to stderr - defer func() { - defer close(doneCh) - stderr_w.Close() - <-panicCh - }() - - // Start the goroutine that will watch stderr for any panics - go trackPanic(stderr_r, c.Writer, c.DetectDuration, panicCh) - - // Create the writer for stdout that we're going to use - var stdout_w io.Writer = os.Stdout - if c.Stdout != nil { - stdout_w = c.Stdout - } - - // Build a subcommand to re-execute ourselves. We make sure to - // set the environmental variable to include our cookie. We also - // set stdin/stdout to match the config. Finally, we pipe stderr - // through ourselves in order to watch for panics. - cmd := exec.Command(exePath, os.Args[1:]...) - cmd.Env = append(os.Environ(), c.CookieKey+"="+c.CookieValue) - cmd.Stdin = os.Stdin - cmd.Stdout = stdout_w - cmd.Stderr = stderr_w - if err := cmd.Start(); err != nil { - return 1, err - } - - // Listen to signals and capture them forever. We allow the child - // process to handle them in some way. - sigCh := make(chan os.Signal) - signal.Notify(sigCh, os.Interrupt) - go func() { - defer signal.Stop(sigCh) - for { - select { - case <-doneCh: - return - case <-sigCh: - } - } - }() - - if err := cmd.Wait(); err != nil { - exitErr, ok := err.(*exec.ExitError) - if !ok { - // This is some other kind of subprocessing error. - return 1, err - } - - exitStatus := 1 - if status, ok := exitErr.Sys().(syscall.WaitStatus); ok { - exitStatus = status.ExitStatus() - } - - // Close the writer end so that the tracker goroutine ends at some point - stderr_w.Close() - - // Wait on the panic data - panicTxt := <-panicCh - if panicTxt != "" { - if !c.HidePanic { - c.Writer.Write([]byte(panicTxt)) - } - - c.Handler(panicTxt) - } - - return exitStatus, nil - } - - return 0, nil -} - -// Wrapped checks if we're already wrapped according to the configuration -// given. -// -// Wrapped is very cheap and can be used early to short-circuit some pre-wrap -// logic your application may have. -func Wrapped(c *WrapConfig) bool { - if c.CookieKey == "" { - c.CookieKey = DEFAULT_COOKIE_KEY - } - - if c.CookieValue == "" { - c.CookieValue = DEFAULT_COOKIE_VAL - } - - // If the cookie key/value match our environment, then we are the - // child, so just exit now and tell the caller that we're the child - return os.Getenv(c.CookieKey) == c.CookieValue -} - -// trackPanic monitors the given reader for a panic. If a panic is detected, -// it is outputted on the result channel. This will close the channel once -// it is complete. -func trackPanic(r io.Reader, w io.Writer, dur time.Duration, result chan<- string) { - defer close(result) - - var panicTimer <-chan time.Time - panicBuf := new(bytes.Buffer) - panicHeader := []byte("panic:") - - tempBuf := make([]byte, 2048) - for { - var buf []byte - var n int - - if panicTimer == nil && panicBuf.Len() > 0 { - // We're not tracking a panic but the buffer length is - // greater than 0. We need to clear out that buffer, but - // look for another panic along the way. - - // First, remove the previous panic header so we don't loop - w.Write(panicBuf.Next(len(panicHeader))) - - // Next, assume that this is our new buffer to inspect - n = panicBuf.Len() - buf = make([]byte, n) - copy(buf, panicBuf.Bytes()) - panicBuf.Reset() - } else { - var err error - buf = tempBuf - n, err = r.Read(buf) - if n <= 0 && err == io.EOF { - if panicBuf.Len() > 0 { - // We were tracking a panic, assume it was a panic - // and return that as the result. - result <- panicBuf.String() - } - - return - } - } - - if panicTimer != nil { - // We're tracking what we think is a panic right now. - // If the timer ended, then it is not a panic. - isPanic := true - select { - case <-panicTimer: - isPanic = false - default: - } - - // No matter what, buffer the text some more. - panicBuf.Write(buf[0:n]) - - if !isPanic { - // It isn't a panic, stop tracking. Clean-up will happen - // on the next iteration. - panicTimer = nil - } - - continue - } - - flushIdx := n - idx := bytes.Index(buf[0:n], panicHeader) - if idx >= 0 { - flushIdx = idx - } - - // Flush to stderr what isn't a panic - w.Write(buf[0:flushIdx]) - - if idx < 0 { - // Not a panic so just continue along - continue - } - - // We have a panic header. Write we assume is a panic os far. - panicBuf.Write(buf[idx:n]) - panicTimer = time.After(dur) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/LICENSE.txt b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/LICENSE.txt deleted file mode 100644 index 91829713..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/LICENSE.txt +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015-2015 Li Yi (denverdino@gmail.com). - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/client.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/client.go deleted file mode 100644 index 4ed0a06f..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/client.go +++ /dev/null @@ -1,145 +0,0 @@ -package common - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "log" - "net/http" - "net/url" - "time" - - "github.com/denverdino/aliyungo/util" -) - -// A Client represents a client of ECS services -type Client struct { - AccessKeyId string //Access Key Id - AccessKeySecret string //Access Key Secret - debug bool - httpClient *http.Client - endpoint string - version string -} - -// NewClient creates a new instance of ECS client -func (client *Client) Init(endpoint, version, accessKeyId, accessKeySecret string) { - client.AccessKeyId = accessKeyId - client.AccessKeySecret = accessKeySecret + "&" - client.debug = false - client.httpClient = &http.Client{} - client.endpoint = endpoint - client.version = version -} - -// SetEndpoint sets custom endpoint -func (client *Client) SetEndpoint(endpoint string) { - client.endpoint = endpoint -} - -// SetEndpoint sets custom version -func (client *Client) SetVersion(version string) { - client.version = version -} - -// SetAccessKeyId sets new AccessKeyId -func (client *Client) SetAccessKeyId(id string) { - client.AccessKeyId = id -} - -// SetAccessKeySecret sets new AccessKeySecret -func (client *Client) SetAccessKeySecret(secret string) { - client.AccessKeySecret = secret + "&" -} - -// SetDebug sets debug mode to log the request/response message -func (client *Client) SetDebug(debug bool) { - client.debug = debug -} - -// Invoke sends the raw HTTP request for ECS services -func (client *Client) Invoke(action string, args interface{}, response interface{}) error { - - request := Request{} - request.init(client.version, action, client.AccessKeyId) - - query := util.ConvertToQueryValues(request) - util.SetQueryValues(args, &query) - - // Sign request - signature := util.CreateSignatureForRequest(ECSRequestMethod, &query, client.AccessKeySecret) - - // Generate the request URL - requestURL := client.endpoint + "?" + query.Encode() + "&Signature=" + url.QueryEscape(signature) - - httpReq, err := http.NewRequest(ECSRequestMethod, requestURL, nil) - - // TODO move to util and add build val flag - httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version) - - if err != nil { - return GetClientError(err) - } - - t0 := time.Now() - httpResp, err := client.httpClient.Do(httpReq) - t1 := time.Now() - if err != nil { - return GetClientError(err) - } - statusCode := httpResp.StatusCode - - if client.debug { - log.Printf("Invoke %s %s %d (%v)", ECSRequestMethod, requestURL, statusCode, t1.Sub(t0)) - } - - defer httpResp.Body.Close() - body, err := ioutil.ReadAll(httpResp.Body) - - if err != nil { - return GetClientError(err) - } - - if client.debug { - var prettyJSON bytes.Buffer - err = json.Indent(&prettyJSON, body, "", " ") - log.Println(string(prettyJSON.Bytes())) - } - - if statusCode >= 400 && statusCode <= 599 { - errorResponse := ErrorResponse{} - err = json.Unmarshal(body, &errorResponse) - ecsError := &Error{ - ErrorResponse: errorResponse, - StatusCode: statusCode, - } - return ecsError - } - - err = json.Unmarshal(body, response) - //log.Printf("%++v", response) - if err != nil { - return GetClientError(err) - } - - return nil -} - -// GenerateClientToken generates the Client Token with random string -func (client *Client) GenerateClientToken() string { - return util.CreateRandomString() -} - -func GetClientErrorFromString(str string) error { - return &Error{ - ErrorResponse: ErrorResponse{ - Code: "AliyunGoClientFailure", - Message: str, - }, - StatusCode: -1, - } -} - -func GetClientError(err error) error { - return GetClientErrorFromString(err.Error()) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/regions.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/regions.go deleted file mode 100644 index 9ceda465..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/regions.go +++ /dev/null @@ -1,19 +0,0 @@ -package common - -// Region represents ECS region -type Region string - -// Constants of region definition -const ( - Hangzhou = Region("cn-hangzhou") - Qingdao = Region("cn-qingdao") - Beijing = Region("cn-beijing") - Hongkong = Region("cn-hongkong") - Shenzhen = Region("cn-shenzhen") - USWest1 = Region("us-west-1") - USEast1 = Region("us-east-1") - APSouthEast1 = Region("ap-southeast-1") - Shanghai = Region("cn-shanghai") -) - -var ValidRegions = []Region{Hangzhou, Qingdao, Beijing, Shenzhen, Hongkong, Shanghai, USWest1, USEast1, APSouthEast1} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/request.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/request.go deleted file mode 100644 index 2a883f19..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/request.go +++ /dev/null @@ -1,101 +0,0 @@ -package common - -import ( - "fmt" - "log" - "time" - - "github.com/denverdino/aliyungo/util" -) - -// Constants for Aliyun API requests -const ( - SignatureVersion = "1.0" - SignatureMethod = "HMAC-SHA1" - JSONResponseFormat = "JSON" - XMLResponseFormat = "XML" - ECSRequestMethod = "GET" -) - -type Request struct { - Format string - Version string - AccessKeyId string - Signature string - SignatureMethod string - Timestamp util.ISO6801Time - SignatureVersion string - SignatureNonce string - ResourceOwnerAccount string - Action string -} - -func (request *Request) init(version string, action string, AccessKeyId string) { - request.Format = JSONResponseFormat - request.Timestamp = util.NewISO6801Time(time.Now().UTC()) - request.Version = version - request.SignatureVersion = SignatureVersion - request.SignatureMethod = SignatureMethod - request.SignatureNonce = util.CreateRandomString() - request.Action = action - request.AccessKeyId = AccessKeyId -} - -type Response struct { - RequestId string -} - -type ErrorResponse struct { - Response - HostId string - Code string - Message string -} - -// An Error represents a custom error for Aliyun API failure response -type Error struct { - ErrorResponse - StatusCode int //Status Code of HTTP Response -} - -func (e *Error) Error() string { - return fmt.Sprintf("Aliyun API Error: RequestId: %s Status Code: %d Code: %s Message: %s", e.RequestId, e.StatusCode, e.Code, e.Message) -} - -type Pagination struct { - PageNumber int - PageSize int -} - -func (p *Pagination) SetPageSize(size int) { - p.PageSize = size -} - -func (p *Pagination) Validate() { - if p.PageNumber < 0 { - log.Printf("Invalid PageNumber: %d", p.PageNumber) - p.PageNumber = 1 - } - if p.PageSize < 0 { - log.Printf("Invalid PageSize: %d", p.PageSize) - p.PageSize = 10 - } else if p.PageSize > 50 { - log.Printf("Invalid PageSize: %d", p.PageSize) - p.PageSize = 50 - } -} - -// A PaginationResponse represents a response with pagination information -type PaginationResult struct { - TotalCount int - PageNumber int - PageSize int -} - -// NextPage gets the next page of the result set -func (r *PaginationResult) NextPage() *Pagination { - if r.PageNumber*r.PageSize >= r.TotalCount { - return nil - } - return &Pagination{PageNumber: r.PageNumber + 1, PageSize: r.PageSize} -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/types.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/types.go deleted file mode 100644 index c562aedf..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/types.go +++ /dev/null @@ -1,15 +0,0 @@ -package common - -type InternetChargeType string - -const ( - PayByBandwidth = InternetChargeType("PayByBandwidth") - PayByTraffic = InternetChargeType("PayByTraffic") -) - -type InstanceChargeType string - -const ( - PrePaid = InstanceChargeType("PrePaid") - PostPaid = InstanceChargeType("PostPaid") -) diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/version.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/version.go deleted file mode 100644 index 7cb3d3af..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/common/version.go +++ /dev/null @@ -1,3 +0,0 @@ -package common - -const Version = "0.1" diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/client.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/client.go deleted file mode 100644 index bc1bd6b0..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/client.go +++ /dev/null @@ -1,1394 +0,0 @@ -package oss - -import ( - "bytes" - "crypto/hmac" - "crypto/md5" - "crypto/sha1" - "encoding/base64" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "log" - "mime" - "net" - "net/http" - "net/http/httputil" - "net/url" - "os" - "path" - "strconv" - "strings" - "time" - - "github.com/denverdino/aliyungo/common" - "github.com/denverdino/aliyungo/util" -) - -const DefaultContentType = "application/octet-stream" - -// The Client type encapsulates operations with an OSS region. -type Client struct { - AccessKeyId string - AccessKeySecret string - SecurityToken string - Region Region - Internal bool - Secure bool - ConnectTimeout time.Duration - - endpoint string - debug bool -} - -// The Bucket type encapsulates operations with an bucket. -type Bucket struct { - *Client - Name string -} - -// The Owner type represents the owner of the object in an bucket. -type Owner struct { - ID string - DisplayName string -} - -// Options struct -// -type Options struct { - ServerSideEncryption bool - Meta map[string][]string - ContentEncoding string - CacheControl string - ContentMD5 string - ContentDisposition string - //Range string - //Expires int -} - -type CopyOptions struct { - Headers http.Header - CopySourceOptions string - MetadataDirective string - //ContentType string -} - -// CopyObjectResult is the output from a Copy request -type CopyObjectResult struct { - ETag string - LastModified string -} - -var attempts = util.AttemptStrategy{ - Min: 5, - Total: 5 * time.Second, - Delay: 200 * time.Millisecond, -} - -// NewOSSClient creates a new OSS. - -func NewOSSClientForAssumeRole(region Region, internal bool, accessKeyId string, accessKeySecret string, securityToken string, secure bool) *Client { - return &Client{ - AccessKeyId: accessKeyId, - AccessKeySecret: accessKeySecret, - SecurityToken: securityToken, - Region: region, - Internal: internal, - debug: false, - Secure: secure, - } -} - -func NewOSSClient(region Region, internal bool, accessKeyId string, accessKeySecret string, secure bool) *Client { - return &Client{ - AccessKeyId: accessKeyId, - AccessKeySecret: accessKeySecret, - Region: region, - Internal: internal, - debug: false, - Secure: secure, - } -} - -// SetDebug sets debug mode to log the request/response message -func (client *Client) SetDebug(debug bool) { - client.debug = debug -} - -// Bucket returns a Bucket with the given name. -func (client *Client) Bucket(name string) *Bucket { - name = strings.ToLower(name) - return &Bucket{ - Client: client, - Name: name, - } -} - -type BucketInfo struct { - Name string - CreationDate string - ExtranetEndpoint string - IntranetEndpoint string - Location string - Grant string `xml:"AccessControlList>Grant"` -} - -type GetServiceResp struct { - Owner Owner - Buckets []BucketInfo `xml:">Bucket"` -} - -type GetBucketInfoResp struct { - Bucket BucketInfo -} - -// GetService gets a list of all buckets owned by an account. -func (client *Client) GetService() (*GetServiceResp, error) { - bucket := client.Bucket("") - - r, err := bucket.Get("") - if err != nil { - return nil, err - } - - // Parse the XML response. - var resp GetServiceResp - if err = xml.Unmarshal(r, &resp); err != nil { - return nil, err - } - - return &resp, nil -} - -type ACL string - -const ( - Private = ACL("private") - PublicRead = ACL("public-read") - PublicReadWrite = ACL("public-read-write") - AuthenticatedRead = ACL("authenticated-read") - BucketOwnerRead = ACL("bucket-owner-read") - BucketOwnerFull = ACL("bucket-owner-full-control") -) - -var createBucketConfiguration = ` - %s -` - -// locationConstraint returns an io.Reader specifying a LocationConstraint if -// required for the region. -func (client *Client) locationConstraint() io.Reader { - constraint := fmt.Sprintf(createBucketConfiguration, client.Region) - return strings.NewReader(constraint) -} - -// override default endpoint -func (client *Client) SetEndpoint(endpoint string) { - // TODO check endpoint - client.endpoint = endpoint -} - -// Info query basic information about the bucket -// -// You can read doc at https://help.aliyun.com/document_detail/31968.html -func (b *Bucket) Info() (BucketInfo, error) { - params := make(url.Values) - params.Set("bucketInfo", "") - r, err := b.GetWithParams("/", params) - - if err != nil { - return BucketInfo{}, err - } - - // Parse the XML response. - var resp GetBucketInfoResp - if err = xml.Unmarshal(r, &resp); err != nil { - return BucketInfo{}, err - } - - return resp.Bucket, nil -} - -// PutBucket creates a new bucket. -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&PutBucket -func (b *Bucket) PutBucket(perm ACL) error { - headers := make(http.Header) - if perm != "" { - headers.Set("x-oss-acl", string(perm)) - } - req := &request{ - method: "PUT", - bucket: b.Name, - path: "/", - headers: headers, - payload: b.Client.locationConstraint(), - } - return b.Client.query(req, nil) -} - -// DelBucket removes an existing bucket. All objects in the bucket must -// be removed before the bucket itself can be removed. -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&DeleteBucket -func (b *Bucket) DelBucket() (err error) { - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "DELETE", - bucket: b.Name, - path: "/", - } - - err = b.Client.query(req, nil) - if !shouldRetry(err) { - break - } - } - return err -} - -// Get retrieves an object from an bucket. -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&GetObject -func (b *Bucket) Get(path string) (data []byte, err error) { - body, err := b.GetReader(path) - if err != nil { - return nil, err - } - data, err = ioutil.ReadAll(body) - body.Close() - return data, err -} - -// GetReader retrieves an object from an bucket, -// returning the body of the HTTP response. -// It is the caller's responsibility to call Close on rc when -// finished reading. -func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) { - resp, err := b.GetResponse(path) - if resp != nil { - return resp.Body, err - } - return nil, err -} - -// GetResponse retrieves an object from an bucket, -// returning the HTTP response. -// It is the caller's responsibility to call Close on rc when -// finished reading -func (b *Bucket) GetResponse(path string) (resp *http.Response, err error) { - return b.GetResponseWithHeaders(path, make(http.Header)) -} - -// GetResponseWithHeaders retrieves an object from an bucket -// Accepts custom headers to be sent as the second parameter -// returning the body of the HTTP response. -// It is the caller's responsibility to call Close on rc when -// finished reading -func (b *Bucket) GetResponseWithHeaders(path string, headers http.Header) (resp *http.Response, err error) { - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - bucket: b.Name, - path: path, - headers: headers, - } - err = b.Client.prepare(req) - if err != nil { - return nil, err - } - - resp, err := b.Client.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - return resp, nil - } - panic("unreachable") -} - -// Get retrieves an object from an bucket. -func (b *Bucket) GetWithParams(path string, params url.Values) (data []byte, err error) { - resp, err := b.GetResponseWithParamsAndHeaders(path, params, nil) - if err != nil { - return nil, err - } - data, err = ioutil.ReadAll(resp.Body) - resp.Body.Close() - return data, err -} - -func (b *Bucket) GetResponseWithParamsAndHeaders(path string, params url.Values, headers http.Header) (resp *http.Response, err error) { - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - bucket: b.Name, - path: path, - params: params, - headers: headers, - } - err = b.Client.prepare(req) - if err != nil { - return nil, err - } - - resp, err := b.Client.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - return resp, nil - } - panic("unreachable") -} - -// Exists checks whether or not an object exists on an bucket using a HEAD request. -func (b *Bucket) Exists(path string) (exists bool, err error) { - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "HEAD", - bucket: b.Name, - path: path, - } - err = b.Client.prepare(req) - if err != nil { - return - } - - resp, err := b.Client.run(req, nil) - - if shouldRetry(err) && attempt.HasNext() { - continue - } - - if err != nil { - // We can treat a 403 or 404 as non existance - if e, ok := err.(*Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) { - return false, nil - } - return false, err - } - - if resp.StatusCode/100 == 2 { - exists = true - } - if resp.Body != nil { - resp.Body.Close() - } - return exists, err - } - return false, fmt.Errorf("OSS Currently Unreachable") -} - -// Head HEADs an object in the bucket, returns the response with -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&HeadObject -func (b *Bucket) Head(path string, headers http.Header) (*http.Response, error) { - - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "HEAD", - bucket: b.Name, - path: path, - headers: headers, - } - err := b.Client.prepare(req) - if err != nil { - return nil, err - } - - resp, err := b.Client.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - return resp, err - } - return nil, fmt.Errorf("OSS Currently Unreachable") -} - -// Put inserts an object into the bucket. -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&PutObject -func (b *Bucket) Put(path string, data []byte, contType string, perm ACL, options Options) error { - body := bytes.NewBuffer(data) - return b.PutReader(path, body, int64(len(data)), contType, perm, options) -} - -// PutCopy puts a copy of an object given by the key path into bucket b using b.Path as the target key -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&CopyObject -func (b *Bucket) PutCopy(path string, perm ACL, options CopyOptions, source string) (*CopyObjectResult, error) { - headers := make(http.Header) - - headers.Set("x-oss-acl", string(perm)) - headers.Set("x-oss-copy-source", source) - - options.addHeaders(headers) - req := &request{ - method: "PUT", - bucket: b.Name, - path: path, - headers: headers, - timeout: 5 * time.Minute, - } - resp := &CopyObjectResult{} - err := b.Client.query(req, resp) - if err != nil { - return resp, err - } - return resp, nil -} - -// PutReader inserts an object into the bucket by consuming data -// from r until EOF. -func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL, options Options) error { - headers := make(http.Header) - headers.Set("Content-Length", strconv.FormatInt(length, 10)) - headers.Set("Content-Type", contType) - headers.Set("x-oss-acl", string(perm)) - - options.addHeaders(headers) - req := &request{ - method: "PUT", - bucket: b.Name, - path: path, - headers: headers, - payload: r, - } - return b.Client.query(req, nil) -} - -// PutFile creates/updates object with file -func (b *Bucket) PutFile(path string, file *os.File, perm ACL, options Options) error { - var contentType string - if dotPos := strings.LastIndex(file.Name(), "."); dotPos == -1 { - contentType = DefaultContentType - } else { - if mimeType := mime.TypeByExtension(file.Name()[dotPos:]); mimeType == "" { - contentType = DefaultContentType - } else { - contentType = mimeType - } - } - stats, err := file.Stat() - if err != nil { - log.Printf("Unable to read file %s stats.\n", file.Name()) - return err - } - - return b.PutReader(path, file, stats.Size(), contentType, perm, options) -} - -// addHeaders adds o's specified fields to headers -func (o Options) addHeaders(headers http.Header) { - if o.ServerSideEncryption { - headers.Set("x-oss-server-side-encryption", "AES256") - } - if len(o.ContentEncoding) != 0 { - headers.Set("Content-Encoding", o.ContentEncoding) - } - if len(o.CacheControl) != 0 { - headers.Set("Cache-Control", o.CacheControl) - } - if len(o.ContentMD5) != 0 { - headers.Set("Content-MD5", o.ContentMD5) - } - if len(o.ContentDisposition) != 0 { - headers.Set("Content-Disposition", o.ContentDisposition) - } - - for k, v := range o.Meta { - for _, mv := range v { - headers.Add("x-oss-meta-"+k, mv) - } - } -} - -// addHeaders adds o's specified fields to headers -func (o CopyOptions) addHeaders(headers http.Header) { - if len(o.MetadataDirective) != 0 { - headers.Set("x-oss-metadata-directive", o.MetadataDirective) - } - if len(o.CopySourceOptions) != 0 { - headers.Set("x-oss-copy-source-range", o.CopySourceOptions) - } - if o.Headers != nil { - for k, v := range o.Headers { - newSlice := make([]string, len(v)) - copy(newSlice, v) - headers[k] = newSlice - } - } -} - -func makeXMLBuffer(doc []byte) *bytes.Buffer { - buf := new(bytes.Buffer) - buf.WriteString(xml.Header) - buf.Write(doc) - return buf -} - -type IndexDocument struct { - Suffix string `xml:"Suffix"` -} - -type ErrorDocument struct { - Key string `xml:"Key"` -} - -type RoutingRule struct { - ConditionKeyPrefixEquals string `xml:"Condition>KeyPrefixEquals"` - RedirectReplaceKeyPrefixWith string `xml:"Redirect>ReplaceKeyPrefixWith,omitempty"` - RedirectReplaceKeyWith string `xml:"Redirect>ReplaceKeyWith,omitempty"` -} - -type RedirectAllRequestsTo struct { - HostName string `xml:"HostName"` - Protocol string `xml:"Protocol,omitempty"` -} - -type WebsiteConfiguration struct { - XMLName xml.Name `xml:"http://doc.oss-cn-hangzhou.aliyuncs.com WebsiteConfiguration"` - IndexDocument *IndexDocument `xml:"IndexDocument,omitempty"` - ErrorDocument *ErrorDocument `xml:"ErrorDocument,omitempty"` - RoutingRules *[]RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"` - RedirectAllRequestsTo *RedirectAllRequestsTo `xml:"RedirectAllRequestsTo,omitempty"` -} - -// PutBucketWebsite configures a bucket as a website. -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&PutBucketWebsite -func (b *Bucket) PutBucketWebsite(configuration WebsiteConfiguration) error { - doc, err := xml.Marshal(configuration) - if err != nil { - return err - } - - buf := makeXMLBuffer(doc) - - return b.PutBucketSubresource("website", buf, int64(buf.Len())) -} - -func (b *Bucket) PutBucketSubresource(subresource string, r io.Reader, length int64) error { - headers := make(http.Header) - headers.Set("Content-Length", strconv.FormatInt(length, 10)) - - req := &request{ - path: "/", - method: "PUT", - bucket: b.Name, - headers: headers, - payload: r, - params: url.Values{subresource: {""}}, - } - - return b.Client.query(req, nil) -} - -// Del removes an object from the bucket. -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&DeleteObject -func (b *Bucket) Del(path string) error { - req := &request{ - method: "DELETE", - bucket: b.Name, - path: path, - } - return b.Client.query(req, nil) -} - -type Delete struct { - Quiet bool `xml:"Quiet,omitempty"` - Objects []Object `xml:"Object"` -} - -type Object struct { - Key string `xml:"Key"` - VersionId string `xml:"VersionId,omitempty"` -} - -// DelMulti removes up to 1000 objects from the bucket. -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&DeleteMultipleObjects -func (b *Bucket) DelMulti(objects Delete) error { - doc, err := xml.Marshal(objects) - if err != nil { - return err - } - - buf := makeXMLBuffer(doc) - digest := md5.New() - size, err := digest.Write(buf.Bytes()) - if err != nil { - return err - } - - headers := make(http.Header) - headers.Set("Content-Length", strconv.FormatInt(int64(size), 10)) - headers.Set("Content-MD5", base64.StdEncoding.EncodeToString(digest.Sum(nil))) - headers.Set("Content-Type", "text/xml") - - req := &request{ - path: "/", - method: "POST", - params: url.Values{"delete": {""}}, - bucket: b.Name, - headers: headers, - payload: buf, - } - - return b.Client.query(req, nil) -} - -// The ListResp type holds the results of a List bucket operation. -type ListResp struct { - Name string - Prefix string - Delimiter string - Marker string - MaxKeys int - // IsTruncated is true if the results have been truncated because - // there are more keys and prefixes than can fit in MaxKeys. - // N.B. this is the opposite sense to that documented (incorrectly) in - // http://goo.gl/YjQTc - IsTruncated bool - Contents []Key - CommonPrefixes []string `xml:">Prefix"` - // if IsTruncated is true, pass NextMarker as marker argument to List() - // to get the next set of keys - NextMarker string -} - -// The Key type represents an item stored in an bucket. -type Key struct { - Key string - LastModified string - Type string - Size int64 - // ETag gives the hex-encoded MD5 sum of the contents, - // surrounded with double-quotes. - ETag string - StorageClass string - Owner Owner -} - -// List returns information about objects in an bucket. -// -// The prefix parameter limits the response to keys that begin with the -// specified prefix. -// -// The delim parameter causes the response to group all of the keys that -// share a common prefix up to the next delimiter in a single entry within -// the CommonPrefixes field. You can use delimiters to separate a bucket -// into different groupings of keys, similar to how folders would work. -// -// The marker parameter specifies the key to start with when listing objects -// in a bucket. OSS lists objects in alphabetical order and -// will return keys alphabetically greater than the marker. -// -// The max parameter specifies how many keys + common prefixes to return in -// the response, at most 1000. The default is 100. -// -// For example, given these keys in a bucket: -// -// index.html -// index2.html -// photos/2006/January/sample.jpg -// photos/2006/February/sample2.jpg -// photos/2006/February/sample3.jpg -// photos/2006/February/sample4.jpg -// -// Listing this bucket with delimiter set to "/" would yield the -// following result: -// -// &ListResp{ -// Name: "sample-bucket", -// MaxKeys: 1000, -// Delimiter: "/", -// Contents: []Key{ -// {Key: "index.html", "index2.html"}, -// }, -// CommonPrefixes: []string{ -// "photos/", -// }, -// } -// -// Listing the same bucket with delimiter set to "/" and prefix set to -// "photos/2006/" would yield the following result: -// -// &ListResp{ -// Name: "sample-bucket", -// MaxKeys: 1000, -// Delimiter: "/", -// Prefix: "photos/2006/", -// CommonPrefixes: []string{ -// "photos/2006/February/", -// "photos/2006/January/", -// }, -// } -// -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&GetBucket -func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) { - params := make(url.Values) - params.Set("prefix", prefix) - params.Set("delimiter", delim) - params.Set("marker", marker) - if max != 0 { - params.Set("max-keys", strconv.FormatInt(int64(max), 10)) - } - result = &ListResp{} - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - bucket: b.Name, - params: params, - } - err = b.Client.query(req, result) - if !shouldRetry(err) { - break - } - } - if err != nil { - return nil, err - } - // if NextMarker is not returned, it should be set to the name of last key, - // so let's do it so that each caller doesn't have to - if result.IsTruncated && result.NextMarker == "" { - n := len(result.Contents) - if n > 0 { - result.NextMarker = result.Contents[n-1].Key - } - } - return result, nil -} - -type GetLocationResp struct { - Location string `xml:",innerxml"` -} - -func (b *Bucket) Location() (string, error) { - params := make(url.Values) - params.Set("location", "") - r, err := b.GetWithParams("/", params) - - if err != nil { - return "", err - } - - // Parse the XML response. - var resp GetLocationResp - if err = xml.Unmarshal(r, &resp); err != nil { - return "", err - } - - if resp.Location == "" { - return string(Hangzhou), nil - } - return resp.Location, nil -} - -func (b *Bucket) Path(path string) string { - if !strings.HasPrefix(path, "/") { - path = "/" + path - } - return "/" + b.Name + path -} - -// URL returns a non-signed URL that allows retriving the -// object at path. It only works if the object is publicly -// readable (see SignedURL). -func (b *Bucket) URL(path string) string { - req := &request{ - bucket: b.Name, - path: path, - } - err := b.Client.prepare(req) - if err != nil { - panic(err) - } - u, err := req.url() - if err != nil { - panic(err) - } - u.RawQuery = "" - return u.String() -} - -// SignedURL returns a signed URL that allows anyone holding the URL -// to retrieve the object at path. The signature is valid until expires. -func (b *Bucket) SignedURL(path string, expires time.Time) string { - return b.SignedURLWithArgs(path, expires, nil, nil) -} - -// SignedURLWithArgs returns a signed URL that allows anyone holding the URL -// to retrieve the object at path. The signature is valid until expires. -func (b *Bucket) SignedURLWithArgs(path string, expires time.Time, params url.Values, headers http.Header) string { - return b.SignedURLWithMethod("GET", path, expires, params, headers) -} - -// SignedURLWithMethod returns a signed URL that allows anyone holding the URL -// to either retrieve the object at path or make a HEAD request against it. The signature is valid until expires. -func (b *Bucket) SignedURLWithMethod(method, path string, expires time.Time, params url.Values, headers http.Header) string { - var uv = url.Values{} - - if params != nil { - uv = params - } - - uv.Set("Expires", strconv.FormatInt(expires.Unix(), 10)) - uv.Set("OSSAccessKeyId", b.AccessKeyId) - - req := &request{ - method: method, - bucket: b.Name, - path: path, - params: uv, - headers: headers, - } - err := b.Client.prepare(req) - if err != nil { - panic(err) - } - u, err := req.url() - if err != nil { - panic(err) - } - - return u.String() -} - -// UploadSignedURL returns a signed URL that allows anyone holding the URL -// to upload the object at path. The signature is valid until expires. -// contenttype is a string like image/png -// name is the resource name in OSS terminology like images/ali.png [obviously excluding the bucket name itself] -func (b *Bucket) UploadSignedURL(name, method, contentType string, expires time.Time) string { - //TODO TESTING - expireDate := expires.Unix() - if method != "POST" { - method = "PUT" - } - - tokenData := "" - - stringToSign := method + "\n\n" + contentType + "\n" + strconv.FormatInt(expireDate, 10) + "\n" + tokenData + "/" + path.Join(b.Name, name) - secretKey := b.AccessKeySecret - accessId := b.AccessKeyId - mac := hmac.New(sha1.New, []byte(secretKey)) - mac.Write([]byte(stringToSign)) - macsum := mac.Sum(nil) - signature := base64.StdEncoding.EncodeToString(macsum) - signature = strings.TrimSpace(signature) - - signedurl, err := url.Parse(b.Region.GetEndpoint(b.Internal, b.Name, b.Secure)) - if err != nil { - log.Println("ERROR sining url for OSS upload", err) - return "" - } - signedurl.Path = name - params := url.Values{} - params.Add("OSSAccessKeyId", accessId) - params.Add("Expires", strconv.FormatInt(expireDate, 10)) - params.Add("Signature", signature) - - signedurl.RawQuery = params.Encode() - return signedurl.String() -} - -// PostFormArgsEx returns the action and input fields needed to allow anonymous -// uploads to a bucket within the expiration limit -// Additional conditions can be specified with conds -func (b *Bucket) PostFormArgsEx(path string, expires time.Time, redirect string, conds []string) (action string, fields map[string]string) { - conditions := []string{} - fields = map[string]string{ - "AWSAccessKeyId": b.AccessKeyId, - "key": path, - } - - if conds != nil { - conditions = append(conditions, conds...) - } - - conditions = append(conditions, fmt.Sprintf("{\"key\": \"%s\"}", path)) - conditions = append(conditions, fmt.Sprintf("{\"bucket\": \"%s\"}", b.Name)) - if redirect != "" { - conditions = append(conditions, fmt.Sprintf("{\"success_action_redirect\": \"%s\"}", redirect)) - fields["success_action_redirect"] = redirect - } - - vExpiration := expires.Format("2006-01-02T15:04:05Z") - vConditions := strings.Join(conditions, ",") - policy := fmt.Sprintf("{\"expiration\": \"%s\", \"conditions\": [%s]}", vExpiration, vConditions) - policy64 := base64.StdEncoding.EncodeToString([]byte(policy)) - fields["policy"] = policy64 - - signer := hmac.New(sha1.New, []byte(b.AccessKeySecret)) - signer.Write([]byte(policy64)) - fields["signature"] = base64.StdEncoding.EncodeToString(signer.Sum(nil)) - - action = fmt.Sprintf("%s/%s/", b.Client.Region, b.Name) - return -} - -// PostFormArgs returns the action and input fields needed to allow anonymous -// uploads to a bucket within the expiration limit -func (b *Bucket) PostFormArgs(path string, expires time.Time, redirect string) (action string, fields map[string]string) { - return b.PostFormArgsEx(path, expires, redirect, nil) -} - -type request struct { - method string - bucket string - path string - params url.Values - headers http.Header - baseurl string - payload io.Reader - prepared bool - timeout time.Duration -} - -func (req *request) url() (*url.URL, error) { - u, err := url.Parse(req.baseurl) - if err != nil { - return nil, fmt.Errorf("bad OSS endpoint URL %q: %v", req.baseurl, err) - } - u.RawQuery = req.params.Encode() - u.Path = req.path - return u, nil -} - -// query prepares and runs the req request. -// If resp is not nil, the XML data contained in the response -// body will be unmarshalled on it. -func (client *Client) query(req *request, resp interface{}) error { - err := client.prepare(req) - if err != nil { - return err - } - r, err := client.run(req, resp) - if r != nil && r.Body != nil { - r.Body.Close() - } - return err -} - -// Sets baseurl on req from bucket name and the region endpoint -func (client *Client) setBaseURL(req *request) error { - - if client.endpoint == "" { - req.baseurl = client.Region.GetEndpoint(client.Internal, req.bucket, client.Secure) - } else { - req.baseurl = fmt.Sprintf("%s://%s", getProtocol(client.Secure), client.endpoint) - } - - return nil -} - -// partiallyEscapedPath partially escapes the OSS path allowing for all OSS REST API calls. -// -// Some commands including: -// GET Bucket acl http://goo.gl/aoXflF -// GET Bucket cors http://goo.gl/UlmBdx -// GET Bucket lifecycle http://goo.gl/8Fme7M -// GET Bucket policy http://goo.gl/ClXIo3 -// GET Bucket location http://goo.gl/5lh8RD -// GET Bucket Logging http://goo.gl/sZ5ckF -// GET Bucket notification http://goo.gl/qSSZKD -// GET Bucket tagging http://goo.gl/QRvxnM -// require the first character after the bucket name in the path to be a literal '?' and -// not the escaped hex representation '%3F'. -func partiallyEscapedPath(path string) string { - pathEscapedAndSplit := strings.Split((&url.URL{Path: path}).String(), "/") - if len(pathEscapedAndSplit) >= 3 { - if len(pathEscapedAndSplit[2]) >= 3 { - // Check for the one "?" that should not be escaped. - if pathEscapedAndSplit[2][0:3] == "%3F" { - pathEscapedAndSplit[2] = "?" + pathEscapedAndSplit[2][3:] - } - } - } - return strings.Replace(strings.Join(pathEscapedAndSplit, "/"), "+", "%2B", -1) -} - -// prepare sets up req to be delivered to OSS. -func (client *Client) prepare(req *request) error { - // Copy so they can be mutated without affecting on retries. - headers := copyHeader(req.headers) - if len(client.SecurityToken) != 0 { - headers.Set("x-oss-security-token", client.SecurityToken) - } - - params := make(url.Values) - - for k, v := range req.params { - params[k] = v - } - - req.params = params - req.headers = headers - - if !req.prepared { - req.prepared = true - if req.method == "" { - req.method = "GET" - } - - if !strings.HasPrefix(req.path, "/") { - req.path = "/" + req.path - } - - err := client.setBaseURL(req) - if err != nil { - return err - } - } - - req.headers.Set("Date", util.GetGMTime()) - client.signRequest(req) - - return nil -} - -// Prepares an *http.Request for doHttpRequest -func (client *Client) setupHttpRequest(req *request) (*http.Request, error) { - // Copy so that signing the http request will not mutate it - - u, err := req.url() - if err != nil { - return nil, err - } - u.Opaque = fmt.Sprintf("//%s%s", u.Host, partiallyEscapedPath(u.Path)) - - hreq := http.Request{ - URL: u, - Method: req.method, - ProtoMajor: 1, - ProtoMinor: 1, - Close: true, - Header: req.headers, - Form: req.params, - } - - hreq.Header.Set("X-SDK-Client", `AliyunGO/`+common.Version) - - contentLength := req.headers.Get("Content-Length") - - if contentLength != "" { - hreq.ContentLength, _ = strconv.ParseInt(contentLength, 10, 64) - req.headers.Del("Content-Length") - } - - if req.payload != nil { - hreq.Body = ioutil.NopCloser(req.payload) - } - - return &hreq, nil -} - -// doHttpRequest sends hreq and returns the http response from the server. -// If resp is not nil, the XML data contained in the response -// body will be unmarshalled on it. -func (client *Client) doHttpRequest(c *http.Client, hreq *http.Request, resp interface{}) (*http.Response, error) { - - if true { - log.Printf("%s %s ...\n", hreq.Method, hreq.URL.String()) - } - hresp, err := c.Do(hreq) - if err != nil { - return nil, err - } - if client.debug { - log.Printf("%s %s %d\n", hreq.Method, hreq.URL.String(), hresp.StatusCode) - contentType := hresp.Header.Get("Content-Type") - if contentType == "application/xml" || contentType == "text/xml" { - dump, _ := httputil.DumpResponse(hresp, true) - log.Printf("%s\n", dump) - } else { - log.Printf("Response Content-Type: %s\n", contentType) - } - } - if hresp.StatusCode != 200 && hresp.StatusCode != 204 && hresp.StatusCode != 206 { - return nil, client.buildError(hresp) - } - if resp != nil { - err = xml.NewDecoder(hresp.Body).Decode(resp) - hresp.Body.Close() - - if client.debug { - log.Printf("aliyungo.oss> decoded xml into %#v", resp) - } - - } - return hresp, err -} - -// run sends req and returns the http response from the server. -// If resp is not nil, the XML data contained in the response -// body will be unmarshalled on it. -func (client *Client) run(req *request, resp interface{}) (*http.Response, error) { - if client.debug { - log.Printf("Running OSS request: %#v", req) - } - - hreq, err := client.setupHttpRequest(req) - if err != nil { - return nil, err - } - - c := &http.Client{ - Transport: &http.Transport{ - Dial: func(netw, addr string) (c net.Conn, err error) { - if client.ConnectTimeout > 0 { - c, err = net.DialTimeout(netw, addr, client.ConnectTimeout) - } else { - c, err = net.Dial(netw, addr) - } - if err != nil { - return - } - return - }, - Proxy: http.ProxyFromEnvironment, - }, - Timeout: req.timeout, - } - - return client.doHttpRequest(c, hreq, resp) -} - -// Error represents an error in an operation with OSS. -type Error struct { - StatusCode int // HTTP status code (200, 403, ...) - Code string // OSS error code ("UnsupportedOperation", ...) - Message string // The human-oriented error message - BucketName string - RequestId string - HostId string -} - -func (e *Error) Error() string { - return fmt.Sprintf("Aliyun API Error: RequestId: %s Status Code: %d Code: %s Message: %s", e.RequestId, e.StatusCode, e.Code, e.Message) -} - -func (client *Client) buildError(r *http.Response) error { - if client.debug { - log.Printf("got error (status code %v)", r.StatusCode) - data, err := ioutil.ReadAll(r.Body) - if err != nil { - log.Printf("\tread error: %v", err) - } else { - log.Printf("\tdata:\n%s\n\n", data) - } - r.Body = ioutil.NopCloser(bytes.NewBuffer(data)) - } - - err := Error{} - // TODO return error if Unmarshal fails? - xml.NewDecoder(r.Body).Decode(&err) - r.Body.Close() - err.StatusCode = r.StatusCode - if err.Message == "" { - err.Message = r.Status - } - if client.debug { - log.Printf("err: %#v\n", err) - } - return &err -} - -type TimeoutError interface { - error - Timeout() bool // Is the error a timeout? -} - -func shouldRetry(err error) bool { - if err == nil { - return false - } - - _, ok := err.(TimeoutError) - if ok { - return true - } - - switch err { - case io.ErrUnexpectedEOF, io.EOF: - return true - } - switch e := err.(type) { - case *net.DNSError: - return true - case *net.OpError: - switch e.Op { - case "read", "write": - return true - } - case *url.Error: - // url.Error can be returned either by net/url if a URL cannot be - // parsed, or by net/http if the response is closed before the headers - // are received or parsed correctly. In that later case, e.Op is set to - // the HTTP method name with the first letter uppercased. We don't want - // to retry on POST operations, since those are not idempotent, all the - // other ones should be safe to retry. - switch e.Op { - case "Get", "Put", "Delete", "Head": - return shouldRetry(e.Err) - default: - return false - } - case *Error: - switch e.Code { - case "InternalError", "NoSuchUpload", "NoSuchBucket": - return true - } - } - return false -} - -func hasCode(err error, code string) bool { - e, ok := err.(*Error) - return ok && e.Code == code -} - -func copyHeader(header http.Header) (newHeader http.Header) { - newHeader = make(http.Header) - for k, v := range header { - newSlice := make([]string, len(v)) - copy(newSlice, v) - newHeader[k] = newSlice - } - return -} - -type AccessControlPolicy struct { - Owner Owner - Grants []string `xml:"AccessControlList>Grant"` -} - -// ACL returns ACL of bucket -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&GetBucketAcl -func (b *Bucket) ACL() (result *AccessControlPolicy, err error) { - - params := make(url.Values) - params.Set("acl", "") - - r, err := b.GetWithParams("/", params) - if err != nil { - return nil, err - } - - // Parse the XML response. - var resp AccessControlPolicy - if err = xml.Unmarshal(r, &resp); err != nil { - return nil, err - } - - return &resp, nil -} - -func (b *Bucket) GetContentLength(sourcePath string) (int64, error) { - resp, err := b.Head(sourcePath, nil) - if err != nil { - return 0, err - } - - currentLength := resp.ContentLength - - return currentLength, err -} - -func (b *Bucket) CopyLargeFile(sourcePath string, destPath string, contentType string, perm ACL, options Options) error { - return b.CopyLargeFileInParallel(sourcePath, destPath, contentType, perm, options, 1) -} - -const defaultChunkSize = int64(128 * 1024 * 1024) //128MB -const maxCopytSize = int64(1024 * 1024 * 1024) //1G - -// Copy large file in the same bucket -func (b *Bucket) CopyLargeFileInParallel(sourcePath string, destPath string, contentType string, perm ACL, options Options, maxConcurrency int) error { - - if maxConcurrency < 1 { - maxConcurrency = 1 - } - - log.Printf("Copy large file from %s to %s\n", sourcePath, destPath) - - currentLength, err := b.GetContentLength(sourcePath) - - if err != nil { - return err - } - - if currentLength < maxCopytSize { - _, err := b.PutCopy(destPath, perm, - CopyOptions{}, - b.Path(sourcePath)) - return err - } - - multi, err := b.InitMulti(destPath, contentType, perm, options) - if err != nil { - return err - } - - numParts := (currentLength + defaultChunkSize - 1) / defaultChunkSize - completedParts := make([]Part, numParts) - - errChan := make(chan error, numParts) - limiter := make(chan struct{}, maxConcurrency) - - var start int64 = 0 - var to int64 = 0 - var partNumber = 0 - sourcePathForCopy := b.Path(sourcePath) - - for start = 0; start < currentLength; start = to { - to = start + defaultChunkSize - if to > currentLength { - to = currentLength - } - partNumber++ - - rangeStr := fmt.Sprintf("bytes=%d-%d", start, to-1) - limiter <- struct{}{} - go func(partNumber int, rangeStr string) { - _, part, err := multi.PutPartCopyWithContentLength(partNumber, - CopyOptions{CopySourceOptions: rangeStr}, - sourcePathForCopy, currentLength) - if err == nil { - completedParts[partNumber-1] = part - } else { - log.Printf("Unable in PutPartCopy of part %d for %s: %v\n", partNumber, sourcePathForCopy, err) - } - errChan <- err - <-limiter - }(partNumber, rangeStr) - } - - fullyCompleted := true - for range completedParts { - err := <-errChan - if err != nil { - fullyCompleted = false - } - } - - if fullyCompleted { - err = multi.Complete(completedParts) - } else { - err = multi.Abort() - } - - return err -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/export.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/export.go deleted file mode 100644 index ebdb0477..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/export.go +++ /dev/null @@ -1,23 +0,0 @@ -package oss - -import ( - "github.com/denverdino/aliyungo/util" -) - -var originalStrategy = attempts - -func SetAttemptStrategy(s *util.AttemptStrategy) { - if s == nil { - attempts = originalStrategy - } else { - attempts = *s - } -} - -func SetListPartsMax(n int) { - listPartsMax = n -} - -func SetListMultiMax(n int) { - listMultiMax = n -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/multi.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/multi.go deleted file mode 100644 index d720e188..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/multi.go +++ /dev/null @@ -1,489 +0,0 @@ -package oss - -import ( - "bytes" - "crypto/md5" - "encoding/base64" - "encoding/hex" - "encoding/xml" - "errors" - "io" - "time" - //"log" - "net/http" - "net/url" - "sort" - "strconv" - "strings" -) - -// Multi represents an unfinished multipart upload. -// -// Multipart uploads allow sending big objects in smaller chunks. -// After all parts have been sent, the upload must be explicitly -// completed by calling Complete with the list of parts. - -type Multi struct { - Bucket *Bucket - Key string - UploadId string -} - -// That's the default. Here just for testing. -var listMultiMax = 1000 - -type listMultiResp struct { - NextKeyMarker string - NextUploadIdMarker string - IsTruncated bool - Upload []Multi - CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` -} - -// ListMulti returns the list of unfinished multipart uploads in b. -// -// The prefix parameter limits the response to keys that begin with the -// specified prefix. You can use prefixes to separate a bucket into different -// groupings of keys (to get the feeling of folders, for example). -// -// The delim parameter causes the response to group all of the keys that -// share a common prefix up to the next delimiter in a single entry within -// the CommonPrefixes field. You can use delimiters to separate a bucket -// into different groupings of keys, similar to how folders would work. -// -func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) { - params := make(url.Values) - params.Set("uploads", "") - params.Set("max-uploads", strconv.FormatInt(int64(listMultiMax), 10)) - params.Set("prefix", prefix) - params.Set("delimiter", delim) - - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "GET", - bucket: b.Name, - params: params, - } - var resp listMultiResp - err := b.Client.query(req, &resp) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, nil, err - } - for i := range resp.Upload { - multi := &resp.Upload[i] - multi.Bucket = b - multis = append(multis, multi) - } - prefixes = append(prefixes, resp.CommonPrefixes...) - if !resp.IsTruncated { - return multis, prefixes, nil - } - params.Set("key-marker", resp.NextKeyMarker) - params.Set("upload-id-marker", resp.NextUploadIdMarker) - attempt = attempts.Start() // Last request worked. - } - panic("unreachable") -} - -// Multi returns a multipart upload handler for the provided key -// inside b. If a multipart upload exists for key, it is returned, -// otherwise a new multipart upload is initiated with contType and perm. -func (b *Bucket) Multi(key, contType string, perm ACL, options Options) (*Multi, error) { - multis, _, err := b.ListMulti(key, "") - if err != nil && !hasCode(err, "NoSuchUpload") { - return nil, err - } - for _, m := range multis { - if m.Key == key { - return m, nil - } - } - return b.InitMulti(key, contType, perm, options) -} - -// InitMulti initializes a new multipart upload at the provided -// key inside b and returns a value for manipulating it. -// -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&InitiateMultipartUpload -func (b *Bucket) InitMulti(key string, contType string, perm ACL, options Options) (*Multi, error) { - headers := make(http.Header) - headers.Set("Content-Length", "0") - headers.Set("Content-Type", contType) - headers.Set("x-oss-acl", string(perm)) - - options.addHeaders(headers) - params := make(url.Values) - params.Set("uploads", "") - req := &request{ - method: "POST", - bucket: b.Name, - path: key, - headers: headers, - params: params, - } - var err error - var resp struct { - UploadId string `xml:"UploadId"` - } - for attempt := attempts.Start(); attempt.Next(); { - err = b.Client.query(req, &resp) - if !shouldRetry(err) { - break - } - } - if err != nil { - return nil, err - } - return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil -} - -func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObjectResult, Part, error) { - return m.PutPartCopyWithContentLength(n, options, source, -1) -} - -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&UploadPartCopy -func (m *Multi) PutPartCopyWithContentLength(n int, options CopyOptions, source string, contentLength int64) (*CopyObjectResult, Part, error) { - // TODO source format a /BUCKET/PATH/TO/OBJECT - // TODO not a good design. API could be changed to PutPartCopyWithinBucket(..., path) and PutPartCopyFromBucket(bucket, path) - - headers := make(http.Header) - headers.Set("x-oss-copy-source", source) - - options.addHeaders(headers) - params := make(url.Values) - params.Set("uploadId", m.UploadId) - params.Set("partNumber", strconv.FormatInt(int64(n), 10)) - - if contentLength < 0 { - sourceBucket := m.Bucket.Client.Bucket(strings.TrimRight(strings.Split(source, "/")[1], "/")) - //log.Println("source: ", source) - //log.Println("sourceBucket: ", sourceBucket.Name) - //log.Println("HEAD: ", strings.strings.SplitAfterN(source, "/", 3)[2]) - // TODO SplitAfterN can be use in bucket name - sourceMeta, err := sourceBucket.Head(strings.SplitAfterN(source, "/", 3)[2], nil) - if err != nil { - return nil, Part{}, err - } - contentLength = sourceMeta.ContentLength - } - - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "PUT", - bucket: m.Bucket.Name, - path: m.Key, - headers: headers, - params: params, - } - resp := &CopyObjectResult{} - err := m.Bucket.Client.query(req, resp) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, Part{}, err - } - if resp.ETag == "" { - return nil, Part{}, errors.New("part upload succeeded with no ETag") - } - return resp, Part{n, resp.ETag, contentLength}, nil - } - panic("unreachable") -} - -// PutPart sends part n of the multipart upload, reading all the content from r. -// Each part, except for the last one, must be at least 5MB in size. -// -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&UploadPart -func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) { - partSize, _, md5b64, err := seekerInfo(r) - if err != nil { - return Part{}, err - } - return m.putPart(n, r, partSize, md5b64, 0) -} - -func (m *Multi) PutPartWithTimeout(n int, r io.ReadSeeker, timeout time.Duration) (Part, error) { - partSize, _, md5b64, err := seekerInfo(r) - if err != nil { - return Part{}, err - } - return m.putPart(n, r, partSize, md5b64, timeout) -} - -func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string, timeout time.Duration) (Part, error) { - headers := make(http.Header) - headers.Set("Content-Length", strconv.FormatInt(partSize, 10)) - headers.Set("Content-MD5", md5b64) - - params := make(url.Values) - params.Set("uploadId", m.UploadId) - params.Set("partNumber", strconv.FormatInt(int64(n), 10)) - - for attempt := attempts.Start(); attempt.Next(); { - _, err := r.Seek(0, 0) - if err != nil { - return Part{}, err - } - req := &request{ - method: "PUT", - bucket: m.Bucket.Name, - path: m.Key, - headers: headers, - params: params, - payload: r, - timeout: timeout, - } - err = m.Bucket.Client.prepare(req) - if err != nil { - return Part{}, err - } - resp, err := m.Bucket.Client.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return Part{}, err - } - etag := resp.Header.Get("ETag") - if etag == "" { - return Part{}, errors.New("part upload succeeded with no ETag") - } - return Part{n, etag, partSize}, nil - } - panic("unreachable") -} - -func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) { - _, err = r.Seek(0, 0) - if err != nil { - return 0, "", "", err - } - digest := md5.New() - size, err = io.Copy(digest, r) - if err != nil { - return 0, "", "", err - } - sum := digest.Sum(nil) - md5hex = hex.EncodeToString(sum) - md5b64 = base64.StdEncoding.EncodeToString(sum) - return size, md5hex, md5b64, nil -} - -type Part struct { - N int `xml:"PartNumber"` - ETag string - Size int64 -} - -type partSlice []Part - -func (s partSlice) Len() int { return len(s) } -func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N } -func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -type listPartsResp struct { - NextPartNumberMarker string - IsTruncated bool - Part []Part -} - -// That's the default. Here just for testing. -var listPartsMax = 1000 - -// ListParts for backcompatability. See the documentation for ListPartsFull -func (m *Multi) ListParts() ([]Part, error) { - return m.ListPartsFull(0, listPartsMax) -} - -// ListPartsFull returns the list of previously uploaded parts in m, -// ordered by part number (Only parts with higher part numbers than -// partNumberMarker will be listed). Only up to maxParts parts will be -// returned. -// -func (m *Multi) ListPartsFull(partNumberMarker int, maxParts int) ([]Part, error) { - if maxParts > listPartsMax { - maxParts = listPartsMax - } - - params := make(url.Values) - params.Set("uploadId", m.UploadId) - params.Set("max-parts", strconv.FormatInt(int64(maxParts), 10)) - params.Set("part-number-marker", strconv.FormatInt(int64(partNumberMarker), 10)) - - var parts partSlice - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "GET", - bucket: m.Bucket.Name, - path: m.Key, - params: params, - } - var resp listPartsResp - err := m.Bucket.Client.query(req, &resp) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - parts = append(parts, resp.Part...) - if !resp.IsTruncated { - sort.Sort(parts) - return parts, nil - } - params.Set("part-number-marker", resp.NextPartNumberMarker) - attempt = attempts.Start() // Last request worked. - } - panic("unreachable") -} - -type ReaderAtSeeker interface { - io.ReaderAt - io.ReadSeeker -} - -// PutAll sends all of r via a multipart upload with parts no larger -// than partSize bytes, which must be set to at least 5MB. -// Parts previously uploaded are either reused if their checksum -// and size match the new part, or otherwise overwritten with the -// new content. -// PutAll returns all the parts of m (reused or not). -func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) { - old, err := m.ListParts() - if err != nil && !hasCode(err, "NoSuchUpload") { - return nil, err - } - reuse := 0 // Index of next old part to consider reusing. - current := 1 // Part number of latest good part handled. - totalSize, err := r.Seek(0, 2) - if err != nil { - return nil, err - } - first := true // Must send at least one empty part if the file is empty. - var result []Part -NextSection: - for offset := int64(0); offset < totalSize || first; offset += partSize { - first = false - if offset+partSize > totalSize { - partSize = totalSize - offset - } - section := io.NewSectionReader(r, offset, partSize) - _, md5hex, md5b64, err := seekerInfo(section) - if err != nil { - return nil, err - } - for reuse < len(old) && old[reuse].N <= current { - // Looks like this part was already sent. - part := &old[reuse] - etag := `"` + md5hex + `"` - if part.N == current && part.Size == partSize && part.ETag == etag { - // Checksum matches. Reuse the old part. - result = append(result, *part) - current++ - continue NextSection - } - reuse++ - } - - // Part wasn't found or doesn't match. Send it. - part, err := m.putPart(current, section, partSize, md5b64, 0) - if err != nil { - return nil, err - } - result = append(result, part) - current++ - } - return result, nil -} - -type completeUpload struct { - XMLName xml.Name `xml:"CompleteMultipartUpload"` - Parts completeParts `xml:"Part"` -} - -type completePart struct { - PartNumber int - ETag string -} - -type completeParts []completePart - -func (p completeParts) Len() int { return len(p) } -func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber } -func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// Complete assembles the given previously uploaded parts into the -// final object. This operation may take several minutes. -// -func (m *Multi) Complete(parts []Part) error { - params := make(url.Values) - params.Set("uploadId", m.UploadId) - - c := completeUpload{} - for _, p := range parts { - c.Parts = append(c.Parts, completePart{p.N, p.ETag}) - } - sort.Sort(c.Parts) - data, err := xml.Marshal(&c) - if err != nil { - return err - } - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "POST", - bucket: m.Bucket.Name, - path: m.Key, - params: params, - payload: bytes.NewReader(data), - } - err := m.Bucket.Client.query(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - return err - } - panic("unreachable") -} - -// Abort deletes an unifinished multipart upload and any previously -// uploaded parts for it. -// -// After a multipart upload is aborted, no additional parts can be -// uploaded using it. However, if any part uploads are currently in -// progress, those part uploads might or might not succeed. As a result, -// it might be necessary to abort a given multipart upload multiple -// times in order to completely free all storage consumed by all parts. -// -// NOTE: If the described scenario happens to you, please report back to -// the goamz authors with details. In the future such retrying should be -// handled internally, but it's not clear what happens precisely (Is an -// error returned? Is the issue completely undetectable?). -// -// -// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&AbortMultipartUpload -func (m *Multi) Abort() error { - params := make(url.Values) - params.Set("uploadId", m.UploadId) - - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "DELETE", - bucket: m.Bucket.Name, - path: m.Key, - params: params, - } - err := m.Bucket.Client.query(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - return err - } - panic("unreachable") -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/regions.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/regions.go deleted file mode 100644 index 0f250604..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/regions.go +++ /dev/null @@ -1,70 +0,0 @@ -package oss - -import ( - "fmt" -) - -// Region represents OSS region -type Region string - -// Constants of region definition -const ( - Hangzhou = Region("oss-cn-hangzhou") - Qingdao = Region("oss-cn-qingdao") - Beijing = Region("oss-cn-beijing") - Hongkong = Region("oss-cn-hongkong") - Shenzhen = Region("oss-cn-shenzhen") - USWest1 = Region("oss-us-west-1") - USEast1 = Region("oss-us-east-1") - APSouthEast1 = Region("oss-ap-southeast-1") - Shanghai = Region("oss-cn-shanghai") - - DefaultRegion = Hangzhou -) - -// GetEndpoint returns endpoint of region -func (r Region) GetEndpoint(internal bool, bucket string, secure bool) string { - if internal { - return r.GetInternalEndpoint(bucket, secure) - } - return r.GetInternetEndpoint(bucket, secure) -} - -func getProtocol(secure bool) string { - protocol := "http" - if secure { - protocol = "https" - } - return protocol -} - -// GetInternetEndpoint returns internet endpoint of region -func (r Region) GetInternetEndpoint(bucket string, secure bool) string { - protocol := getProtocol(secure) - if bucket == "" { - return fmt.Sprintf("%s://oss.aliyuncs.com", protocol) - } - return fmt.Sprintf("%s://%s.%s.aliyuncs.com", protocol, bucket, string(r)) -} - -// GetInternalEndpoint returns internal endpoint of region -func (r Region) GetInternalEndpoint(bucket string, secure bool) string { - protocol := getProtocol(secure) - if bucket == "" { - return fmt.Sprintf("%s://oss-internal.aliyuncs.com", protocol) - } - return fmt.Sprintf("%s://%s.%s-internal.aliyuncs.com", protocol, bucket, string(r)) -} - -// GetInternalEndpoint returns internal endpoint of region -func (r Region) GetVPCInternalEndpoint(bucket string, secure bool) string { - protocol := getProtocol(secure) - if bucket == "" { - return fmt.Sprintf("%s://vpc100-oss-cn-hangzhou.aliyuncs.com", protocol) - } - if r == USEast1 { - return r.GetInternalEndpoint(bucket, secure) - } else { - return fmt.Sprintf("%s://%s.vpc100-%s.aliyuncs.com", protocol, bucket, string(r)) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/signature.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/signature.go deleted file mode 100644 index 12677175..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/oss/signature.go +++ /dev/null @@ -1,107 +0,0 @@ -package oss - -import ( - "github.com/denverdino/aliyungo/util" - //"log" - "net/http" - "net/url" - "sort" - "strings" -) - -const HeaderOSSPrefix = "x-oss-" - -var ossParamsToSign = map[string]bool{ - "acl": true, - "delete": true, - "location": true, - "logging": true, - "notification": true, - "partNumber": true, - "policy": true, - "requestPayment": true, - "torrent": true, - "uploadId": true, - "uploads": true, - "versionId": true, - "versioning": true, - "versions": true, - "response-content-type": true, - "response-content-language": true, - "response-expires": true, - "response-cache-control": true, - "response-content-disposition": true, - "response-content-encoding": true, - "bucketInfo": true, -} - -func (client *Client) signRequest(request *request) { - query := request.params - - urlSignature := query.Get("OSSAccessKeyId") != "" - - headers := request.headers - contentMd5 := headers.Get("Content-Md5") - contentType := headers.Get("Content-Type") - date := "" - if urlSignature { - date = query.Get("Expires") - } else { - date = headers.Get("Date") - } - - resource := request.path - if request.bucket != "" { - resource = "/" + request.bucket + request.path - } - params := make(url.Values) - for k, v := range query { - if ossParamsToSign[k] { - params[k] = v - } - } - - if len(params) > 0 { - resource = resource + "?" + util.Encode(params) - } - - canonicalizedResource := resource - - _, canonicalizedHeader := canonicalizeHeader(headers) - - stringToSign := request.method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedHeader + canonicalizedResource - - //log.Println("stringToSign: ", stringToSign) - signature := util.CreateSignature(stringToSign, client.AccessKeySecret) - - if query.Get("OSSAccessKeyId") != "" { - query.Set("Signature", signature) - } else { - headers.Set("Authorization", "OSS "+client.AccessKeyId+":"+signature) - } -} - -//Have to break the abstraction to append keys with lower case. -func canonicalizeHeader(headers http.Header) (newHeaders http.Header, result string) { - var canonicalizedHeaders []string - newHeaders = http.Header{} - - for k, v := range headers { - if lower := strings.ToLower(k); strings.HasPrefix(lower, HeaderOSSPrefix) { - newHeaders[lower] = v - canonicalizedHeaders = append(canonicalizedHeaders, lower) - } else { - newHeaders[k] = v - } - } - - sort.Strings(canonicalizedHeaders) - - var canonicalizedHeader string - - for _, k := range canonicalizedHeaders { - canonicalizedHeader += k + ":" + headers.Get(k) + "\n" - } - - return newHeaders, canonicalizedHeader -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/attempt.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/attempt.go deleted file mode 100644 index 2d07f03a..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/attempt.go +++ /dev/null @@ -1,76 +0,0 @@ -package util - -import ( - "time" -) - -// AttemptStrategy is reused from the goamz package - -// AttemptStrategy represents a strategy for waiting for an action -// to complete successfully. This is an internal type used by the -// implementation of other packages. -type AttemptStrategy struct { - Total time.Duration // total duration of attempt. - Delay time.Duration // interval between each try in the burst. - Min int // minimum number of retries; overrides Total -} - -type Attempt struct { - strategy AttemptStrategy - last time.Time - end time.Time - force bool - count int -} - -// Start begins a new sequence of attempts for the given strategy. -func (s AttemptStrategy) Start() *Attempt { - now := time.Now() - return &Attempt{ - strategy: s, - last: now, - end: now.Add(s.Total), - force: true, - } -} - -// Next waits until it is time to perform the next attempt or returns -// false if it is time to stop trying. -func (a *Attempt) Next() bool { - now := time.Now() - sleep := a.nextSleep(now) - if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count { - return false - } - a.force = false - if sleep > 0 && a.count > 0 { - time.Sleep(sleep) - now = time.Now() - } - a.count++ - a.last = now - return true -} - -func (a *Attempt) nextSleep(now time.Time) time.Duration { - sleep := a.strategy.Delay - now.Sub(a.last) - if sleep < 0 { - return 0 - } - return sleep -} - -// HasNext returns whether another attempt will be made if the current -// one fails. If it returns true, the following call to Next is -// guaranteed to return true. -func (a *Attempt) HasNext() bool { - if a.force || a.strategy.Min > a.count { - return true - } - now := time.Now() - if now.Add(a.nextSleep(now)).Before(a.end) { - a.force = true - return true - } - return false -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/encoding.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/encoding.go deleted file mode 100644 index e545e069..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/encoding.go +++ /dev/null @@ -1,152 +0,0 @@ -package util - -import ( - "encoding/json" - "fmt" - "log" - "net/url" - "reflect" - "strconv" - "time" -) - -//ConvertToQueryValues converts the struct to url.Values -func ConvertToQueryValues(ifc interface{}) url.Values { - values := url.Values{} - SetQueryValues(ifc, &values) - return values -} - -//SetQueryValues sets the struct to existing url.Values following ECS encoding rules -func SetQueryValues(ifc interface{}, values *url.Values) { - setQueryValues(ifc, values, "") -} - -func setQueryValues(i interface{}, values *url.Values, prefix string) { - // add to support url.Values - mapValues, ok := i.(url.Values) - if ok { - for k, _ := range mapValues { - values.Set(k, mapValues.Get(k)) - } - return - } - - elem := reflect.ValueOf(i) - if elem.Kind() == reflect.Ptr { - elem = elem.Elem() - } - elemType := elem.Type() - for i := 0; i < elem.NumField(); i++ { - - fieldName := elemType.Field(i).Name - anonymous := elemType.Field(i).Anonymous - field := elem.Field(i) - // TODO Use Tag for validation - // tag := typ.Field(i).Tag.Get("tagname") - kind := field.Kind() - if (kind == reflect.Ptr || kind == reflect.Array || kind == reflect.Slice || kind == reflect.Map || kind == reflect.Chan) && field.IsNil() { - continue - } - if kind == reflect.Ptr { - field = field.Elem() - kind = field.Kind() - } - var value string - //switch field.Interface().(type) { - switch kind { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - i := field.Int() - if i != 0 { - value = strconv.FormatInt(i, 10) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - i := field.Uint() - if i != 0 { - value = strconv.FormatUint(i, 10) - } - case reflect.Float32: - value = strconv.FormatFloat(field.Float(), 'f', 4, 32) - case reflect.Float64: - value = strconv.FormatFloat(field.Float(), 'f', 4, 64) - case reflect.Bool: - value = strconv.FormatBool(field.Bool()) - case reflect.String: - value = field.String() - case reflect.Map: - ifc := field.Interface() - m := ifc.(map[string]string) - if m != nil { - j := 0 - for k, v := range m { - j++ - keyName := fmt.Sprintf("%s.%d.Key", fieldName, j) - values.Set(keyName, k) - valueName := fmt.Sprintf("%s.%d.Value", fieldName, j) - values.Set(valueName, v) - } - } - case reflect.Slice: - switch field.Type().Elem().Kind() { - case reflect.Uint8: - value = string(field.Bytes()) - case reflect.String: - l := field.Len() - if l > 0 { - strArray := make([]string, l) - for i := 0; i < l; i++ { - strArray[i] = field.Index(i).String() - } - bytes, err := json.Marshal(strArray) - if err == nil { - value = string(bytes) - } else { - log.Printf("Failed to convert JSON: %v", err) - } - } - default: - l := field.Len() - for j := 0; j < l; j++ { - prefixName := fmt.Sprintf("%s.%d.", fieldName, (j + 1)) - ifc := field.Index(j).Interface() - //log.Printf("%s : %v", prefixName, ifc) - if ifc != nil { - setQueryValues(ifc, values, prefixName) - } - } - continue - } - - default: - switch field.Interface().(type) { - case ISO6801Time: - t := field.Interface().(ISO6801Time) - value = t.String() - case time.Time: - t := field.Interface().(time.Time) - value = GetISO8601TimeStamp(t) - default: - ifc := field.Interface() - if ifc != nil { - if anonymous { - SetQueryValues(ifc, values) - } else { - prefixName := fieldName + "." - setQueryValues(ifc, values, prefixName) - } - continue - } - } - } - if value != "" { - name := elemType.Field(i).Tag.Get("ArgName") - if name == "" { - name = fieldName - } - if prefix != "" { - name = prefix + name - } - values.Set(name, value) - } - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/iso6801.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/iso6801.go deleted file mode 100644 index 9c25e8f6..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/iso6801.go +++ /dev/null @@ -1,80 +0,0 @@ -package util - -import ( - "fmt" - "strconv" - "time" -) - -// GetISO8601TimeStamp gets timestamp string in ISO8601 format -func GetISO8601TimeStamp(ts time.Time) string { - t := ts.UTC() - return fmt.Sprintf("%04d-%02d-%02dT%02d:%02d:%02dZ", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second()) -} - -const formatISO8601 = "2006-01-02T15:04:05Z" -const jsonFormatISO8601 = `"` + formatISO8601 + `"` -const formatISO8601withoutSeconds = "2006-01-02T15:04Z" -const jsonFormatISO8601withoutSeconds = `"` + formatISO8601withoutSeconds + `"` - -// A ISO6801Time represents a time in ISO8601 format -type ISO6801Time time.Time - -// New constructs a new iso8601.Time instance from an existing -// time.Time instance. This causes the nanosecond field to be set to -// 0, and its time zone set to a fixed zone with no offset from UTC -// (but it is *not* UTC itself). -func NewISO6801Time(t time.Time) ISO6801Time { - return ISO6801Time(time.Date( - t.Year(), - t.Month(), - t.Day(), - t.Hour(), - t.Minute(), - t.Second(), - 0, - time.UTC, - )) -} - -// IsDefault checks if the time is default -func (it *ISO6801Time) IsDefault() bool { - return *it == ISO6801Time{} -} - -// MarshalJSON serializes the ISO6801Time into JSON string -func (it ISO6801Time) MarshalJSON() ([]byte, error) { - return []byte(time.Time(it).Format(jsonFormatISO8601)), nil -} - -// UnmarshalJSON deserializes the ISO6801Time from JSON string -func (it *ISO6801Time) UnmarshalJSON(data []byte) error { - str := string(data) - - if str == "\"\"" || len(data) == 0 { - return nil - } - var t time.Time - var err error - if str[0] == '"' { - t, err = time.ParseInLocation(jsonFormatISO8601, str, time.UTC) - if err != nil { - t, err = time.ParseInLocation(jsonFormatISO8601withoutSeconds, str, time.UTC) - } - } else { - var i int64 - i, err = strconv.ParseInt(str, 10, 64) - if err == nil { - t = time.Unix(i/1000, i%1000) - } - } - if err == nil { - *it = ISO6801Time(t) - } - return err -} - -// String returns the time in ISO6801Time format -func (it ISO6801Time) String() string { - return time.Time(it).Format(formatISO8601) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/signature.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/signature.go deleted file mode 100644 index a00b27c1..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/signature.go +++ /dev/null @@ -1,40 +0,0 @@ -package util - -import ( - "crypto/hmac" - "crypto/sha1" - "encoding/base64" - "net/url" - "strings" -) - -//CreateSignature creates signature for string following Aliyun rules -func CreateSignature(stringToSignature, accessKeySecret string) string { - // Crypto by HMAC-SHA1 - hmacSha1 := hmac.New(sha1.New, []byte(accessKeySecret)) - hmacSha1.Write([]byte(stringToSignature)) - sign := hmacSha1.Sum(nil) - - // Encode to Base64 - base64Sign := base64.StdEncoding.EncodeToString(sign) - - return base64Sign -} - -func percentReplace(str string) string { - str = strings.Replace(str, "+", "%20", -1) - str = strings.Replace(str, "*", "%2A", -1) - str = strings.Replace(str, "%7E", "~", -1) - - return str -} - -// CreateSignatureForRequest creates signature for query string values -func CreateSignatureForRequest(method string, values *url.Values, accessKeySecret string) string { - - canonicalizedQueryString := percentReplace(values.Encode()) - - stringToSign := method + "&%2F&" + url.QueryEscape(canonicalizedQueryString) - - return CreateSignature(stringToSign, accessKeySecret) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/util.go b/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/util.go deleted file mode 100644 index dd68214e..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/denverdino/aliyungo/util/util.go +++ /dev/null @@ -1,147 +0,0 @@ -package util - -import ( - "bytes" - srand "crypto/rand" - "encoding/binary" - "math/rand" - "net/http" - "net/url" - "sort" - "time" -) - -const dictionary = "_0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" - -//CreateRandomString create random string -func CreateRandomString() string { - b := make([]byte, 32) - l := len(dictionary) - - _, err := srand.Read(b) - - if err != nil { - // fail back to insecure rand - rand.Seed(time.Now().UnixNano()) - for i := range b { - b[i] = dictionary[rand.Int()%l] - } - } else { - for i, v := range b { - b[i] = dictionary[v%byte(l)] - } - } - - return string(b) -} - -// Encode encodes the values into ``URL encoded'' form -// ("acl&bar=baz&foo=quux") sorted by key. -func Encode(v url.Values) string { - if v == nil { - return "" - } - var buf bytes.Buffer - keys := make([]string, 0, len(v)) - for k := range v { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - vs := v[k] - prefix := url.QueryEscape(k) - for _, v := range vs { - if buf.Len() > 0 { - buf.WriteByte('&') - } - buf.WriteString(prefix) - if v != "" { - buf.WriteString("=") - buf.WriteString(url.QueryEscape(v)) - } - } - } - return buf.String() -} - -func GetGMTime() string { - return time.Now().UTC().Format(http.TimeFormat) -} - -// - -func randUint32() uint32 { - return randUint32Slice(1)[0] -} - -func randUint32Slice(c int) []uint32 { - b := make([]byte, c*4) - - _, err := srand.Read(b) - - if err != nil { - // fail back to insecure rand - rand.Seed(time.Now().UnixNano()) - for i := range b { - b[i] = byte(rand.Int()) - } - } - - n := make([]uint32, c) - - for i := range n { - n[i] = binary.BigEndian.Uint32(b[i*4 : i*4+4]) - } - - return n -} - -func toByte(n uint32, st, ed byte) byte { - return byte(n%uint32(ed-st+1) + uint32(st)) -} - -func toDigit(n uint32) byte { - return toByte(n, '0', '9') -} - -func toLowerLetter(n uint32) byte { - return toByte(n, 'a', 'z') -} - -func toUpperLetter(n uint32) byte { - return toByte(n, 'A', 'Z') -} - -type convFunc func(uint32) byte - -var convFuncs = []convFunc{toDigit, toLowerLetter, toUpperLetter} - -// tools for generating a random ECS instance password -// from 8 to 30 char MUST contain digit upper, case letter and upper case letter -// http://docs.aliyun.com/#/pub/ecs/open-api/instance&createinstance -func GenerateRandomECSPassword() string { - - // [8, 30] - l := int(randUint32()%23 + 8) - - n := randUint32Slice(l) - - b := make([]byte, l) - - b[0] = toDigit(n[0]) - b[1] = toLowerLetter(n[1]) - b[2] = toUpperLetter(n[2]) - - for i := 3; i < l; i++ { - b[i] = convFuncs[n[i]%3](n[i]) - } - - s := make([]byte, l) - perm := rand.Perm(l) - for i, v := range perm { - s[v] = b[i] - } - - return string(s) - -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/LICENSE deleted file mode 100644 index 53320c35..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/LICENSE +++ /dev/null @@ -1,185 +0,0 @@ -This software is licensed under the LGPLv3, included below. - -As a special exception to the GNU Lesser General Public License version 3 -("LGPL3"), the copyright holders of this Library give you permission to -convey to a third party a Combined Work that links statically or dynamically -to this Library without providing any Minimal Corresponding Source or -Minimal Application Code as set out in 4d or providing the installation -information set out in section 4e, provided that you comply with the other -provisions of LGPL3 and provided that you meet, for the Application the -terms and conditions of the license(s) which apply to the Application. - -Except as stated in this special exception, the provisions of LGPL3 will -continue to comply in full to this Library. If you modify this Library, you -may apply this exception to your version of this Library, but you are not -obliged to do so. If you do not wish to do so, delete this exception -statement from your version. This exception does not (and cannot) modify any -license terms which apply to the Application, with which you must still -comply. - - - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/attempt.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/attempt.go deleted file mode 100644 index c0654f5d..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/attempt.go +++ /dev/null @@ -1,74 +0,0 @@ -package aws - -import ( - "time" -) - -// AttemptStrategy represents a strategy for waiting for an action -// to complete successfully. This is an internal type used by the -// implementation of other goamz packages. -type AttemptStrategy struct { - Total time.Duration // total duration of attempt. - Delay time.Duration // interval between each try in the burst. - Min int // minimum number of retries; overrides Total -} - -type Attempt struct { - strategy AttemptStrategy - last time.Time - end time.Time - force bool - count int -} - -// Start begins a new sequence of attempts for the given strategy. -func (s AttemptStrategy) Start() *Attempt { - now := time.Now() - return &Attempt{ - strategy: s, - last: now, - end: now.Add(s.Total), - force: true, - } -} - -// Next waits until it is time to perform the next attempt or returns -// false if it is time to stop trying. -func (a *Attempt) Next() bool { - now := time.Now() - sleep := a.nextSleep(now) - if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count { - return false - } - a.force = false - if sleep > 0 && a.count > 0 { - time.Sleep(sleep) - now = time.Now() - } - a.count++ - a.last = now - return true -} - -func (a *Attempt) nextSleep(now time.Time) time.Duration { - sleep := a.strategy.Delay - now.Sub(a.last) - if sleep < 0 { - return 0 - } - return sleep -} - -// HasNext returns whether another attempt will be made if the current -// one fails. If it returns true, the following call to Next is -// guaranteed to return true. -func (a *Attempt) HasNext() bool { - if a.force || a.strategy.Min > a.count { - return true - } - now := time.Now() - if now.Add(a.nextSleep(now)).Before(a.end) { - a.force = true - return true - } - return false -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/aws.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/aws.go deleted file mode 100644 index ac8340ad..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/aws.go +++ /dev/null @@ -1,636 +0,0 @@ -// -// goamz - Go packages to interact with the Amazon Web Services. -// -// https://wiki.ubuntu.com/goamz -// -// Copyright (c) 2011 Canonical Ltd. -// -// Written by Gustavo Niemeyer -// -package aws - -import ( - "encoding/json" - "encoding/xml" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "os/user" - "path" - "regexp" - "strings" - "time" -) - -// Regular expressions for INI files -var ( - iniSectionRegexp = regexp.MustCompile(`^\s*\[([^\[\]]+)\]\s*$`) - iniSettingRegexp = regexp.MustCompile(`^\s*(.+?)\s*=\s*(.*\S)\s*$`) -) - -// Defines the valid signers -const ( - V2Signature = iota - V4Signature = iota - Route53Signature = iota -) - -// Defines the service endpoint and correct Signer implementation to use -// to sign requests for this endpoint -type ServiceInfo struct { - Endpoint string - Signer uint -} - -// Region defines the URLs where AWS services may be accessed. -// -// See http://goo.gl/d8BP1 for more details. -type Region struct { - Name string // the canonical name of this region. - EC2Endpoint ServiceInfo - S3Endpoint string - S3BucketEndpoint string // Not needed by AWS S3. Use ${bucket} for bucket name. - S3LocationConstraint bool // true if this region requires a LocationConstraint declaration. - S3LowercaseBucket bool // true if the region requires bucket names to be lower case. - SDBEndpoint string - SNSEndpoint string - SQSEndpoint string - SESEndpoint string - IAMEndpoint string - ELBEndpoint string - KMSEndpoint string - DynamoDBEndpoint string - CloudWatchServicepoint ServiceInfo - AutoScalingEndpoint string - RDSEndpoint ServiceInfo - KinesisEndpoint string - STSEndpoint string - CloudFormationEndpoint string - ElastiCacheEndpoint string -} - -var Regions = map[string]Region{ - APNortheast.Name: APNortheast, - APNortheast2.Name: APNortheast2, - APSoutheast.Name: APSoutheast, - APSoutheast2.Name: APSoutheast2, - EUCentral.Name: EUCentral, - EUWest.Name: EUWest, - USEast.Name: USEast, - USWest.Name: USWest, - USWest2.Name: USWest2, - USGovWest.Name: USGovWest, - SAEast.Name: SAEast, - CNNorth1.Name: CNNorth1, -} - -// Designates a signer interface suitable for signing AWS requests, params -// should be appropriately encoded for the request before signing. -// -// A signer should be initialized with Auth and the appropriate endpoint. -type Signer interface { - Sign(method, path string, params map[string]string) -} - -// An AWS Service interface with the API to query the AWS service -// -// Supplied as an easy way to mock out service calls during testing. -type AWSService interface { - // Queries the AWS service at a given method/path with the params and - // returns an http.Response and error - Query(method, path string, params map[string]string) (*http.Response, error) - // Builds an error given an XML payload in the http.Response, can be used - // to process an error if the status code is not 200 for example. - BuildError(r *http.Response) error -} - -// Implements a Server Query/Post API to easily query AWS services and build -// errors when desired -type Service struct { - service ServiceInfo - signer Signer -} - -// Create a base set of params for an action -func MakeParams(action string) map[string]string { - params := make(map[string]string) - params["Action"] = action - return params -} - -// Create a new AWS server to handle making requests -func NewService(auth Auth, service ServiceInfo) (s *Service, err error) { - var signer Signer - switch service.Signer { - case V2Signature: - signer, err = NewV2Signer(auth, service) - // case V4Signature: - // signer, err = NewV4Signer(auth, service, Regions["eu-west-1"]) - default: - err = fmt.Errorf("Unsupported signer for service") - } - if err != nil { - return - } - s = &Service{service: service, signer: signer} - return -} - -func (s *Service) Query(method, path string, params map[string]string) (resp *http.Response, err error) { - params["Timestamp"] = time.Now().UTC().Format(time.RFC3339) - u, err := url.Parse(s.service.Endpoint) - if err != nil { - return nil, err - } - u.Path = path - - s.signer.Sign(method, path, params) - if method == "GET" { - u.RawQuery = multimap(params).Encode() - resp, err = http.Get(u.String()) - } else if method == "POST" { - resp, err = http.PostForm(u.String(), multimap(params)) - } - - return -} - -func (s *Service) BuildError(r *http.Response) error { - errors := ErrorResponse{} - xml.NewDecoder(r.Body).Decode(&errors) - var err Error - err = errors.Errors - err.RequestId = errors.RequestId - err.StatusCode = r.StatusCode - if err.Message == "" { - err.Message = r.Status - } - return &err -} - -type ServiceError interface { - error - ErrorCode() string -} - -type ErrorResponse struct { - Errors Error `xml:"Error"` - RequestId string // A unique ID for tracking the request -} - -type Error struct { - StatusCode int - Type string - Code string - Message string - RequestId string -} - -func (err *Error) Error() string { - return fmt.Sprintf("Type: %s, Code: %s, Message: %s", - err.Type, err.Code, err.Message, - ) -} - -func (err *Error) ErrorCode() string { - return err.Code -} - -type Auth struct { - AccessKey, SecretKey string - token string - expiration time.Time -} - -func (a *Auth) Token() string { - if a.token == "" { - return "" - } - if time.Since(a.expiration) >= -30*time.Second { //in an ideal world this should be zero assuming the instance is synching it's clock - auth, err := GetAuth("", "", "", time.Time{}) - if err == nil { - *a = auth - } - } - return a.token -} - -func (a *Auth) Expiration() time.Time { - return a.expiration -} - -// To be used with other APIs that return auth credentials such as STS -func NewAuth(accessKey, secretKey, token string, expiration time.Time) *Auth { - return &Auth{ - AccessKey: accessKey, - SecretKey: secretKey, - token: token, - expiration: expiration, - } -} - -// ResponseMetadata -type ResponseMetadata struct { - RequestId string // A unique ID for tracking the request -} - -type BaseResponse struct { - ResponseMetadata ResponseMetadata -} - -var unreserved = make([]bool, 128) -var hex = "0123456789ABCDEF" - -func init() { - // RFC3986 - u := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~" - for _, c := range u { - unreserved[c] = true - } -} - -func multimap(p map[string]string) url.Values { - q := make(url.Values, len(p)) - for k, v := range p { - q[k] = []string{v} - } - return q -} - -type credentials struct { - Code string - LastUpdated string - Type string - AccessKeyId string - SecretAccessKey string - Token string - Expiration string -} - -// GetMetaData retrieves instance metadata about the current machine. -// -// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html for more details. -func GetMetaData(path string) (contents []byte, err error) { - c := http.Client{ - Transport: &http.Transport{ - Dial: func(netw, addr string) (net.Conn, error) { - deadline := time.Now().Add(5 * time.Second) - c, err := net.DialTimeout(netw, addr, time.Second*2) - if err != nil { - return nil, err - } - c.SetDeadline(deadline) - return c, nil - }, - }, - } - - url := "http://169.254.169.254/latest/meta-data/" + path - - resp, err := c.Get(url) - if err != nil { - return - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - err = fmt.Errorf("Code %d returned for url %s", resp.StatusCode, url) - return - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return - } - return []byte(body), err -} - -func GetRegion(regionName string) (region Region) { - region = Regions[regionName] - return -} - -// GetInstanceCredentials creates an Auth based on the instance's role credentials. -// If the running instance is not in EC2 or does not have a valid IAM role, an error will be returned. -// For more info about setting up IAM roles, see http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html -func GetInstanceCredentials() (cred credentials, err error) { - credentialPath := "iam/security-credentials/" - - // Get the instance role - role, err := GetMetaData(credentialPath) - if err != nil { - return - } - - // Get the instance role credentials - credentialJSON, err := GetMetaData(credentialPath + string(role)) - if err != nil { - return - } - - err = json.Unmarshal([]byte(credentialJSON), &cred) - return -} - -// GetAuth creates an Auth based on either passed in credentials, -// environment information or instance based role credentials. -func GetAuth(accessKey string, secretKey, token string, expiration time.Time) (auth Auth, err error) { - // First try passed in credentials - if accessKey != "" && secretKey != "" { - return Auth{accessKey, secretKey, token, expiration}, nil - } - - // Next try to get auth from the environment - auth, err = EnvAuth() - if err == nil { - // Found auth, return - return - } - - // Next try getting auth from the instance role - cred, err := GetInstanceCredentials() - if err == nil { - // Found auth, return - auth.AccessKey = cred.AccessKeyId - auth.SecretKey = cred.SecretAccessKey - auth.token = cred.Token - exptdate, err := time.Parse("2006-01-02T15:04:05Z", cred.Expiration) - if err != nil { - err = fmt.Errorf("Error Parsing expiration date: cred.Expiration :%s , error: %s \n", cred.Expiration, err) - } - auth.expiration = exptdate - return auth, err - } - - // Next try getting auth from the credentials file - auth, err = CredentialFileAuth("", "", time.Minute*5) - if err == nil { - return - } - - //err = errors.New("No valid AWS authentication found") - err = fmt.Errorf("No valid AWS authentication found: %s", err) - return auth, err -} - -// EnvAuth creates an Auth based on environment information. -// The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment -// variables are used. -func EnvAuth() (auth Auth, err error) { - auth.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID") - if auth.AccessKey == "" { - auth.AccessKey = os.Getenv("AWS_ACCESS_KEY") - } - - auth.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY") - if auth.SecretKey == "" { - auth.SecretKey = os.Getenv("AWS_SECRET_KEY") - } - if auth.AccessKey == "" { - err = errors.New("AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment") - } - if auth.SecretKey == "" { - err = errors.New("AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment") - } - return -} - -// CredentialFileAuth creates and Auth based on a credentials file. The file -// contains various authentication profiles for use with AWS. -// -// The credentials file, which is used by other AWS SDKs, is documented at -// http://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs -func CredentialFileAuth(filePath string, profile string, expiration time.Duration) (auth Auth, err error) { - if profile == "" { - profile = os.Getenv("AWS_DEFAULT_PROFILE") - if profile == "" { - profile = os.Getenv("AWS_PROFILE") - if profile == "" { - profile = "default" - } - } - } - - if filePath == "" { - u, err := user.Current() - if err != nil { - return auth, err - } - - filePath = path.Join(u.HomeDir, ".aws", "credentials") - } - - // read the file, then parse the INI - contents, err := ioutil.ReadFile(filePath) - if err != nil { - return - } - - profiles := parseINI(string(contents)) - profileData, ok := profiles[profile] - - if !ok { - err = errors.New("The credentials file did not contain the profile") - return - } - - keyId, ok := profileData["aws_access_key_id"] - if !ok { - err = errors.New("The credentials file did not contain required attribute aws_access_key_id") - return - } - - secretKey, ok := profileData["aws_secret_access_key"] - if !ok { - err = errors.New("The credentials file did not contain required attribute aws_secret_access_key") - return - } - - auth.AccessKey = keyId - auth.SecretKey = secretKey - - if token, ok := profileData["aws_session_token"]; ok { - auth.token = token - } - - auth.expiration = time.Now().Add(expiration) - - return -} - -// parseINI takes the contents of a credentials file and returns a map, whose keys -// are the various profiles, and whose values are maps of the settings for the -// profiles -func parseINI(fileContents string) map[string]map[string]string { - profiles := make(map[string]map[string]string) - - lines := strings.Split(fileContents, "\n") - - var currentSection map[string]string - for _, line := range lines { - // remove comments, which start with a semi-colon - if split := strings.Split(line, ";"); len(split) > 1 { - line = split[0] - } - - // check if the line is the start of a profile. - // - // for example: - // [default] - // - // otherwise, check for the proper setting - // property=value - if sectMatch := iniSectionRegexp.FindStringSubmatch(line); len(sectMatch) == 2 { - currentSection = make(map[string]string) - profiles[sectMatch[1]] = currentSection - } else if setMatch := iniSettingRegexp.FindStringSubmatch(line); len(setMatch) == 3 && currentSection != nil { - currentSection[setMatch[1]] = setMatch[2] - } - } - - return profiles -} - -// Encode takes a string and URI-encodes it in a way suitable -// to be used in AWS signatures. -func Encode(s string) string { - encode := false - for i := 0; i != len(s); i++ { - c := s[i] - if c > 127 || !unreserved[c] { - encode = true - break - } - } - if !encode { - return s - } - e := make([]byte, len(s)*3) - ei := 0 - for i := 0; i != len(s); i++ { - c := s[i] - if c > 127 || !unreserved[c] { - e[ei] = '%' - e[ei+1] = hex[c>>4] - e[ei+2] = hex[c&0xF] - ei += 3 - } else { - e[ei] = c - ei += 1 - } - } - return string(e[:ei]) -} - -func dialTimeout(network, addr string) (net.Conn, error) { - return net.DialTimeout(network, addr, time.Duration(2*time.Second)) -} - -func AvailabilityZone() string { - transport := http.Transport{Dial: dialTimeout} - client := http.Client{ - Transport: &transport, - } - resp, err := client.Get("http://169.254.169.254/latest/meta-data/placement/availability-zone") - if err != nil { - return "unknown" - } else { - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "unknown" - } else { - return string(body) - } - } -} - -func InstanceRegion() string { - az := AvailabilityZone() - if az == "unknown" { - return az - } else { - region := az[:len(az)-1] - return region - } -} - -func InstanceId() string { - transport := http.Transport{Dial: dialTimeout} - client := http.Client{ - Transport: &transport, - } - resp, err := client.Get("http://169.254.169.254/latest/meta-data/instance-id") - if err != nil { - return "unknown" - } else { - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "unknown" - } else { - return string(body) - } - } -} - -func InstanceType() string { - transport := http.Transport{Dial: dialTimeout} - client := http.Client{ - Transport: &transport, - } - resp, err := client.Get("http://169.254.169.254/latest/meta-data/instance-type") - if err != nil { - return "unknown" - } else { - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "unknown" - } else { - return string(body) - } - } -} - -func ServerLocalIp() string { - transport := http.Transport{Dial: dialTimeout} - client := http.Client{ - Transport: &transport, - } - resp, err := client.Get("http://169.254.169.254/latest/meta-data/local-ipv4") - if err != nil { - return "127.0.0.1" - } else { - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "127.0.0.1" - } else { - return string(body) - } - } -} - -func ServerPublicIp() string { - transport := http.Transport{Dial: dialTimeout} - client := http.Client{ - Transport: &transport, - } - resp, err := client.Get("http://169.254.169.254/latest/meta-data/public-ipv4") - if err != nil { - return "127.0.0.1" - } else { - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "127.0.0.1" - } else { - return string(body) - } - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/client.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/client.go deleted file mode 100644 index 86d2ccec..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/client.go +++ /dev/null @@ -1,124 +0,0 @@ -package aws - -import ( - "math" - "net" - "net/http" - "time" -) - -type RetryableFunc func(*http.Request, *http.Response, error) bool -type WaitFunc func(try int) -type DeadlineFunc func() time.Time - -type ResilientTransport struct { - // Timeout is the maximum amount of time a dial will wait for - // a connect to complete. - // - // The default is no timeout. - // - // With or without a timeout, the operating system may impose - // its own earlier timeout. For instance, TCP timeouts are - // often around 3 minutes. - DialTimeout time.Duration - - // MaxTries, if non-zero, specifies the number of times we will retry on - // failure. Retries are only attempted for temporary network errors or known - // safe failures. - MaxTries int - Deadline DeadlineFunc - ShouldRetry RetryableFunc - Wait WaitFunc - transport *http.Transport -} - -// Convenience method for creating an http client -func NewClient(rt *ResilientTransport) *http.Client { - rt.transport = &http.Transport{ - Dial: func(netw, addr string) (net.Conn, error) { - c, err := net.DialTimeout(netw, addr, rt.DialTimeout) - if err != nil { - return nil, err - } - c.SetDeadline(rt.Deadline()) - return c, nil - }, - Proxy: http.ProxyFromEnvironment, - } - // TODO: Would be nice is ResilientTransport allowed clients to initialize - // with http.Transport attributes. - return &http.Client{ - Transport: rt, - } -} - -var retryingTransport = &ResilientTransport{ - Deadline: func() time.Time { - return time.Now().Add(5 * time.Second) - }, - DialTimeout: 10 * time.Second, - MaxTries: 3, - ShouldRetry: awsRetry, - Wait: ExpBackoff, -} - -// Exported default client -var RetryingClient = NewClient(retryingTransport) - -func (t *ResilientTransport) RoundTrip(req *http.Request) (*http.Response, error) { - return t.tries(req) -} - -// Retry a request a maximum of t.MaxTries times. -// We'll only retry if the proper criteria are met. -// If a wait function is specified, wait that amount of time -// In between requests. -func (t *ResilientTransport) tries(req *http.Request) (res *http.Response, err error) { - for try := 0; try < t.MaxTries; try += 1 { - res, err = t.transport.RoundTrip(req) - - if !t.ShouldRetry(req, res, err) { - break - } - if res != nil { - res.Body.Close() - } - if t.Wait != nil { - t.Wait(try) - } - } - - return -} - -func ExpBackoff(try int) { - time.Sleep(100 * time.Millisecond * - time.Duration(math.Exp2(float64(try)))) -} - -func LinearBackoff(try int) { - time.Sleep(time.Duration(try*100) * time.Millisecond) -} - -// Decide if we should retry a request. -// In general, the criteria for retrying a request is described here -// http://docs.aws.amazon.com/general/latest/gr/api-retries.html -func awsRetry(req *http.Request, res *http.Response, err error) bool { - retry := false - - // Retry if there's a temporary network error. - if neterr, ok := err.(net.Error); ok { - if neterr.Temporary() { - retry = true - } - } - - // Retry if we get a 5xx series error. - if res != nil { - if res.StatusCode >= 500 && res.StatusCode < 600 { - retry = true - } - } - - return retry -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/regions.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/regions.go deleted file mode 100644 index 0406648d..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/regions.go +++ /dev/null @@ -1,289 +0,0 @@ -package aws - -var USGovWest = Region{ - "us-gov-west-1", - ServiceInfo{"https://ec2.us-gov-west-1.amazonaws.com", V2Signature}, - "https://s3-fips-us-gov-west-1.amazonaws.com", - "", - true, - true, - "", - "https://sns.us-gov-west-1.amazonaws.com", - "https://sqs.us-gov-west-1.amazonaws.com", - "", - "https://iam.us-gov.amazonaws.com", - "https://elasticloadbalancing.us-gov-west-1.amazonaws.com", - "", - "https://dynamodb.us-gov-west-1.amazonaws.com", - ServiceInfo{"https://monitoring.us-gov-west-1.amazonaws.com", V2Signature}, - "https://autoscaling.us-gov-west-1.amazonaws.com", - ServiceInfo{"https://rds.us-gov-west-1.amazonaws.com", V2Signature}, - "", - "https://sts.amazonaws.com", - "https://cloudformation.us-gov-west-1.amazonaws.com", - "", -} - -var USEast = Region{ - "us-east-1", - ServiceInfo{"https://ec2.us-east-1.amazonaws.com", V2Signature}, - "https://s3-external-1.amazonaws.com", - "", - false, - false, - "https://sdb.amazonaws.com", - "https://sns.us-east-1.amazonaws.com", - "https://sqs.us-east-1.amazonaws.com", - "https://email.us-east-1.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.us-east-1.amazonaws.com", - "https://kms.us-east-1.amazonaws.com", - "https://dynamodb.us-east-1.amazonaws.com", - ServiceInfo{"https://monitoring.us-east-1.amazonaws.com", V2Signature}, - "https://autoscaling.us-east-1.amazonaws.com", - ServiceInfo{"https://rds.us-east-1.amazonaws.com", V2Signature}, - "https://kinesis.us-east-1.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.us-east-1.amazonaws.com", - "https://elasticache.us-east-1.amazonaws.com", -} - -var USWest = Region{ - "us-west-1", - ServiceInfo{"https://ec2.us-west-1.amazonaws.com", V2Signature}, - "https://s3-us-west-1.amazonaws.com", - "", - true, - true, - "https://sdb.us-west-1.amazonaws.com", - "https://sns.us-west-1.amazonaws.com", - "https://sqs.us-west-1.amazonaws.com", - "", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.us-west-1.amazonaws.com", - "https://kms.us-west-1.amazonaws.com", - "https://dynamodb.us-west-1.amazonaws.com", - ServiceInfo{"https://monitoring.us-west-1.amazonaws.com", V2Signature}, - "https://autoscaling.us-west-1.amazonaws.com", - ServiceInfo{"https://rds.us-west-1.amazonaws.com", V2Signature}, - "https://kinesis.us-west-1.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.us-west-1.amazonaws.com", - "https://elasticache.us-west-1.amazonaws.com", -} - -var USWest2 = Region{ - "us-west-2", - ServiceInfo{"https://ec2.us-west-2.amazonaws.com", V2Signature}, - "https://s3-us-west-2.amazonaws.com", - "", - true, - true, - "https://sdb.us-west-2.amazonaws.com", - "https://sns.us-west-2.amazonaws.com", - "https://sqs.us-west-2.amazonaws.com", - "https://email.us-west-2.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.us-west-2.amazonaws.com", - "https://kms.us-west-2.amazonaws.com", - "https://dynamodb.us-west-2.amazonaws.com", - ServiceInfo{"https://monitoring.us-west-2.amazonaws.com", V2Signature}, - "https://autoscaling.us-west-2.amazonaws.com", - ServiceInfo{"https://rds.us-west-2.amazonaws.com", V2Signature}, - "https://kinesis.us-west-2.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.us-west-2.amazonaws.com", - "https://elasticache.us-west-2.amazonaws.com", -} - -var EUWest = Region{ - "eu-west-1", - ServiceInfo{"https://ec2.eu-west-1.amazonaws.com", V2Signature}, - "https://s3-eu-west-1.amazonaws.com", - "", - true, - true, - "https://sdb.eu-west-1.amazonaws.com", - "https://sns.eu-west-1.amazonaws.com", - "https://sqs.eu-west-1.amazonaws.com", - "https://email.eu-west-1.amazonaws.com", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.eu-west-1.amazonaws.com", - "https://kms.eu-west-1.amazonaws.com", - "https://dynamodb.eu-west-1.amazonaws.com", - ServiceInfo{"https://monitoring.eu-west-1.amazonaws.com", V2Signature}, - "https://autoscaling.eu-west-1.amazonaws.com", - ServiceInfo{"https://rds.eu-west-1.amazonaws.com", V2Signature}, - "https://kinesis.eu-west-1.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.eu-west-1.amazonaws.com", - "https://elasticache.eu-west-1.amazonaws.com", -} - -var EUCentral = Region{ - "eu-central-1", - ServiceInfo{"https://ec2.eu-central-1.amazonaws.com", V4Signature}, - "https://s3-eu-central-1.amazonaws.com", - "", - true, - true, - "https://sdb.eu-central-1.amazonaws.com", - "https://sns.eu-central-1.amazonaws.com", - "https://sqs.eu-central-1.amazonaws.com", - "", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.eu-central-1.amazonaws.com", - "https://kms.eu-central-1.amazonaws.com", - "https://dynamodb.eu-central-1.amazonaws.com", - ServiceInfo{"https://monitoring.eu-central-1.amazonaws.com", V2Signature}, - "https://autoscaling.eu-central-1.amazonaws.com", - ServiceInfo{"https://rds.eu-central-1.amazonaws.com", V2Signature}, - "https://kinesis.eu-central-1.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.eu-central-1.amazonaws.com", - "", -} - -var APSoutheast = Region{ - "ap-southeast-1", - ServiceInfo{"https://ec2.ap-southeast-1.amazonaws.com", V2Signature}, - "https://s3-ap-southeast-1.amazonaws.com", - "", - true, - true, - "https://sdb.ap-southeast-1.amazonaws.com", - "https://sns.ap-southeast-1.amazonaws.com", - "https://sqs.ap-southeast-1.amazonaws.com", - "", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.ap-southeast-1.amazonaws.com", - "https://kms.ap-southeast-1.amazonaws.com", - "https://dynamodb.ap-southeast-1.amazonaws.com", - ServiceInfo{"https://monitoring.ap-southeast-1.amazonaws.com", V2Signature}, - "https://autoscaling.ap-southeast-1.amazonaws.com", - ServiceInfo{"https://rds.ap-southeast-1.amazonaws.com", V2Signature}, - "https://kinesis.ap-southeast-1.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.ap-southeast-1.amazonaws.com", - "https://elasticache.ap-southeast-1.amazonaws.com", -} - -var APSoutheast2 = Region{ - "ap-southeast-2", - ServiceInfo{"https://ec2.ap-southeast-2.amazonaws.com", V2Signature}, - "https://s3-ap-southeast-2.amazonaws.com", - "", - true, - true, - "https://sdb.ap-southeast-2.amazonaws.com", - "https://sns.ap-southeast-2.amazonaws.com", - "https://sqs.ap-southeast-2.amazonaws.com", - "", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.ap-southeast-2.amazonaws.com", - "https://kms.ap-southeast-2.amazonaws.com", - "https://dynamodb.ap-southeast-2.amazonaws.com", - ServiceInfo{"https://monitoring.ap-southeast-2.amazonaws.com", V2Signature}, - "https://autoscaling.ap-southeast-2.amazonaws.com", - ServiceInfo{"https://rds.ap-southeast-2.amazonaws.com", V2Signature}, - "https://kinesis.ap-southeast-2.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.ap-southeast-2.amazonaws.com", - "https://elasticache.ap-southeast-2.amazonaws.com", -} - -var APNortheast = Region{ - "ap-northeast-1", - ServiceInfo{"https://ec2.ap-northeast-1.amazonaws.com", V2Signature}, - "https://s3-ap-northeast-1.amazonaws.com", - "", - true, - true, - "https://sdb.ap-northeast-1.amazonaws.com", - "https://sns.ap-northeast-1.amazonaws.com", - "https://sqs.ap-northeast-1.amazonaws.com", - "", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.ap-northeast-1.amazonaws.com", - "https://kms.ap-northeast-1.amazonaws.com", - "https://dynamodb.ap-northeast-1.amazonaws.com", - ServiceInfo{"https://monitoring.ap-northeast-1.amazonaws.com", V2Signature}, - "https://autoscaling.ap-northeast-1.amazonaws.com", - ServiceInfo{"https://rds.ap-northeast-1.amazonaws.com", V2Signature}, - "https://kinesis.ap-northeast-1.amazonaws.com", - "https://sts.amazonaws.com", - "https://cloudformation.ap-northeast-1.amazonaws.com", - "https://elasticache.ap-northeast-1.amazonaws.com", -} - -var APNortheast2 = Region{ - "ap-northeast-2", - ServiceInfo{"https://ec2.ap-northeast-2.amazonaws.com", V2Signature}, - "https://s3-ap-northeast-2.amazonaws.com", - "", - true, - true, - "", - "https://sns.ap-northeast-2.amazonaws.com", - "https://sqs.ap-northeast-2.amazonaws.com", - "", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.ap-northeast-2.amazonaws.com", - "https://kms.ap-northeast-2.amazonaws.com", - "https://dynamodb.ap-northeast-2.amazonaws.com", - ServiceInfo{"https://monitoring.ap-northeast-2.amazonaws.com", V2Signature}, - "https://autoscaling.ap-northeast-2.amazonaws.com", - ServiceInfo{"https://rds.ap-northeast-2.amazonaws.com", V2Signature}, - "https://kinesis.ap-northeast-2.amazonaws.com", - "https://sts.ap-northeast-2.amazonaws.com", - "https://cloudformation.ap-northeast-2.amazonaws.com", - "https://elasticache.ap-northeast-2.amazonaws.com", -} - -var SAEast = Region{ - "sa-east-1", - ServiceInfo{"https://ec2.sa-east-1.amazonaws.com", V2Signature}, - "https://s3-sa-east-1.amazonaws.com", - "", - true, - true, - "https://sdb.sa-east-1.amazonaws.com", - "https://sns.sa-east-1.amazonaws.com", - "https://sqs.sa-east-1.amazonaws.com", - "", - "https://iam.amazonaws.com", - "https://elasticloadbalancing.sa-east-1.amazonaws.com", - "https://kms.sa-east-1.amazonaws.com", - "https://dynamodb.sa-east-1.amazonaws.com", - ServiceInfo{"https://monitoring.sa-east-1.amazonaws.com", V2Signature}, - "https://autoscaling.sa-east-1.amazonaws.com", - ServiceInfo{"https://rds.sa-east-1.amazonaws.com", V2Signature}, - "", - "https://sts.amazonaws.com", - "https://cloudformation.sa-east-1.amazonaws.com", - "https://elasticache.sa-east-1.amazonaws.com", -} - -var CNNorth1 = Region{ - "cn-north-1", - ServiceInfo{"https://ec2.cn-north-1.amazonaws.com.cn", V2Signature}, - "https://s3.cn-north-1.amazonaws.com.cn", - "", - true, - true, - "", - "https://sns.cn-north-1.amazonaws.com.cn", - "https://sqs.cn-north-1.amazonaws.com.cn", - "", - "https://iam.cn-north-1.amazonaws.com.cn", - "https://elasticloadbalancing.cn-north-1.amazonaws.com.cn", - "", - "https://dynamodb.cn-north-1.amazonaws.com.cn", - ServiceInfo{"https://monitoring.cn-north-1.amazonaws.com.cn", V4Signature}, - "https://autoscaling.cn-north-1.amazonaws.com.cn", - ServiceInfo{"https://rds.cn-north-1.amazonaws.com.cn", V4Signature}, - "", - "https://sts.cn-north-1.amazonaws.com.cn", - "", - "", -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/retry.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/retry.go deleted file mode 100644 index bea964b9..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/retry.go +++ /dev/null @@ -1,136 +0,0 @@ -package aws - -import ( - "math/rand" - "net" - "net/http" - "time" -) - -const ( - maxDelay = 20 * time.Second - defaultScale = 300 * time.Millisecond - throttlingScale = 500 * time.Millisecond - throttlingScaleRange = throttlingScale / 4 - defaultMaxRetries = 3 - dynamoDBScale = 25 * time.Millisecond - dynamoDBMaxRetries = 10 -) - -// A RetryPolicy encapsulates a strategy for implementing client retries. -// -// Default implementations are provided which match the AWS SDKs. -type RetryPolicy interface { - // ShouldRetry returns whether a client should retry a failed request. - ShouldRetry(target string, r *http.Response, err error, numRetries int) bool - - // Delay returns the time a client should wait before issuing a retry. - Delay(target string, r *http.Response, err error, numRetries int) time.Duration -} - -// DefaultRetryPolicy implements the AWS SDK default retry policy. -// -// It will retry up to 3 times, and uses an exponential backoff with a scale -// factor of 300ms (300ms, 600ms, 1200ms). If the retry is because of -// throttling, the delay will also include some randomness. -// -// See https://github.com/aws/aws-sdk-java/blob/master/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L90. -type DefaultRetryPolicy struct { -} - -// ShouldRetry implements the RetryPolicy ShouldRetry method. -func (policy DefaultRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool { - return shouldRetry(r, err, numRetries, defaultMaxRetries) -} - -// Delay implements the RetryPolicy Delay method. -func (policy DefaultRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration { - scale := defaultScale - if err, ok := err.(*Error); ok && isThrottlingException(err) { - scale = throttlingScale + time.Duration(rand.Int63n(int64(throttlingScaleRange))) - } - return exponentialBackoff(numRetries, scale) -} - -// DynamoDBRetryPolicy implements the AWS SDK DynamoDB retry policy. -// -// It will retry up to 10 times, and uses an exponential backoff with a scale -// factor of 25ms (25ms, 50ms, 100ms, ...). -// -// See https://github.com/aws/aws-sdk-java/blob/master/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L103. -type DynamoDBRetryPolicy struct { -} - -// ShouldRetry implements the RetryPolicy ShouldRetry method. -func (policy DynamoDBRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool { - return shouldRetry(r, err, numRetries, dynamoDBMaxRetries) -} - -// Delay implements the RetryPolicy Delay method. -func (policy DynamoDBRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration { - return exponentialBackoff(numRetries, dynamoDBScale) -} - -// NeverRetryPolicy never retries requests and returns immediately on failure. -type NeverRetryPolicy struct { -} - -// ShouldRetry implements the RetryPolicy ShouldRetry method. -func (policy NeverRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool { - return false -} - -// Delay implements the RetryPolicy Delay method. -func (policy NeverRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration { - return time.Duration(0) -} - -// shouldRetry determines if we should retry the request. -// -// See http://docs.aws.amazon.com/general/latest/gr/api-retries.html. -func shouldRetry(r *http.Response, err error, numRetries int, maxRetries int) bool { - // Once we've exceeded the max retry attempts, game over. - if numRetries >= maxRetries { - return false - } - - // Always retry temporary network errors. - if err, ok := err.(net.Error); ok && err.Temporary() { - return true - } - - // Always retry 5xx responses. - if r != nil && r.StatusCode >= 500 { - return true - } - - // Always retry throttling exceptions. - if err, ok := err.(ServiceError); ok && isThrottlingException(err) { - return true - } - - // Other classes of failures indicate a problem with the request. Retrying - // won't help. - return false -} - -func exponentialBackoff(numRetries int, scale time.Duration) time.Duration { - if numRetries < 0 { - return time.Duration(0) - } - - delay := (1 << uint(numRetries)) * scale - if delay > maxDelay { - return maxDelay - } - return delay -} - -func isThrottlingException(err ServiceError) bool { - switch err.ErrorCode() { - case "Throttling", "ThrottlingException", "ProvisionedThroughputExceededException": - return true - default: - return false - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/sign.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/sign.go deleted file mode 100644 index 8b53e8e8..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/aws/sign.go +++ /dev/null @@ -1,472 +0,0 @@ -package aws - -import ( - "bytes" - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "path" - "sort" - "strings" - "time" -) - -// AWS specifies that the parameters in a signed request must -// be provided in the natural order of the keys. This is distinct -// from the natural order of the encoded value of key=value. -// Percent and gocheck.Equals affect the sorting order. -func EncodeSorted(values url.Values) string { - // preallocate the arrays for perfomance - keys := make([]string, 0, len(values)) - sarray := make([]string, 0, len(values)) - for k := range values { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - for _, v := range values[k] { - sarray = append(sarray, Encode(k)+"="+Encode(v)) - } - } - - return strings.Join(sarray, "&") -} - -type V2Signer struct { - auth Auth - service ServiceInfo - host string -} - -var b64 = base64.StdEncoding - -func NewV2Signer(auth Auth, service ServiceInfo) (*V2Signer, error) { - u, err := url.Parse(service.Endpoint) - if err != nil { - return nil, err - } - return &V2Signer{auth: auth, service: service, host: u.Host}, nil -} - -func (s *V2Signer) Sign(method, path string, params map[string]string) { - params["AWSAccessKeyId"] = s.auth.AccessKey - params["SignatureVersion"] = "2" - params["SignatureMethod"] = "HmacSHA256" - if s.auth.Token() != "" { - params["SecurityToken"] = s.auth.Token() - } - // AWS specifies that the parameters in a signed request must - // be provided in the natural order of the keys. This is distinct - // from the natural order of the encoded value of key=value. - // Percent and gocheck.Equals affect the sorting order. - var keys, sarray []string - for k := range params { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - sarray = append(sarray, Encode(k)+"="+Encode(params[k])) - } - joined := strings.Join(sarray, "&") - payload := method + "\n" + s.host + "\n" + path + "\n" + joined - hash := hmac.New(sha256.New, []byte(s.auth.SecretKey)) - hash.Write([]byte(payload)) - signature := make([]byte, b64.EncodedLen(hash.Size())) - b64.Encode(signature, hash.Sum(nil)) - - params["Signature"] = string(signature) -} - -func (s *V2Signer) SignRequest(req *http.Request) error { - req.ParseForm() - req.Form.Set("AWSAccessKeyId", s.auth.AccessKey) - req.Form.Set("SignatureVersion", "2") - req.Form.Set("SignatureMethod", "HmacSHA256") - if s.auth.Token() != "" { - req.Form.Set("SecurityToken", s.auth.Token()) - } - - payload := req.Method + "\n" + req.URL.Host + "\n" + req.URL.Path + "\n" + EncodeSorted(req.Form) - hash := hmac.New(sha256.New, []byte(s.auth.SecretKey)) - hash.Write([]byte(payload)) - signature := make([]byte, b64.EncodedLen(hash.Size())) - b64.Encode(signature, hash.Sum(nil)) - - req.Form.Set("Signature", string(signature)) - - req.URL.RawQuery = req.Form.Encode() - - return nil -} - -// Common date formats for signing requests -const ( - ISO8601BasicFormat = "20060102T150405Z" - ISO8601BasicFormatShort = "20060102" -) - -type Route53Signer struct { - auth Auth -} - -func NewRoute53Signer(auth Auth) *Route53Signer { - return &Route53Signer{auth: auth} -} - -// Creates the authorize signature based on the date stamp and secret key -func (s *Route53Signer) getHeaderAuthorize(message string) string { - hmacSha256 := hmac.New(sha256.New, []byte(s.auth.SecretKey)) - hmacSha256.Write([]byte(message)) - cryptedString := hmacSha256.Sum(nil) - - return base64.StdEncoding.EncodeToString(cryptedString) -} - -// Adds all the required headers for AWS Route53 API to the request -// including the authorization -func (s *Route53Signer) Sign(req *http.Request) { - date := time.Now().UTC().Format(time.RFC1123) - delete(req.Header, "Date") - req.Header.Set("Date", date) - - authHeader := fmt.Sprintf("AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=%s,Signature=%s", - s.auth.AccessKey, "HmacSHA256", s.getHeaderAuthorize(date)) - - req.Header.Set("Host", req.Host) - req.Header.Set("X-Amzn-Authorization", authHeader) - req.Header.Set("Content-Type", "application/xml") - if s.auth.Token() != "" { - req.Header.Set("X-Amz-Security-Token", s.auth.Token()) - } -} - -/* -The V4Signer encapsulates all of the functionality to sign a request with the AWS -Signature Version 4 Signing Process. (http://goo.gl/u1OWZz) -*/ -type V4Signer struct { - auth Auth - serviceName string - region Region - // Add the x-amz-content-sha256 header - IncludeXAmzContentSha256 bool -} - -/* -Return a new instance of a V4Signer capable of signing AWS requests. -*/ -func NewV4Signer(auth Auth, serviceName string, region Region) *V4Signer { - return &V4Signer{ - auth: auth, - serviceName: serviceName, - region: region, - IncludeXAmzContentSha256: false, - } -} - -/* -Sign a request according to the AWS Signature Version 4 Signing Process. (http://goo.gl/u1OWZz) - -The signed request will include an "x-amz-date" header with a current timestamp if a valid "x-amz-date" -or "date" header was not available in the original request. In addition, AWS Signature Version 4 requires -the "host" header to be a signed header, therefor the Sign method will manually set a "host" header from -the request.Host. - -The signed request will include a new "Authorization" header indicating that the request has been signed. - -Any changes to the request after signing the request will invalidate the signature. -*/ -func (s *V4Signer) Sign(req *http.Request) { - req.Header.Set("host", req.Host) // host header must be included as a signed header - t := s.requestTime(req) // Get request time - - payloadHash := "" - - if _, ok := req.Form["X-Amz-Expires"]; ok { - // We are authenticating the the request by using query params - // (also known as pre-signing a url, http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) - payloadHash = "UNSIGNED-PAYLOAD" - req.Header.Del("x-amz-date") - - req.Form["X-Amz-SignedHeaders"] = []string{s.signedHeaders(req.Header)} - req.Form["X-Amz-Algorithm"] = []string{"AWS4-HMAC-SHA256"} - req.Form["X-Amz-Credential"] = []string{s.auth.AccessKey + "/" + s.credentialScope(t)} - req.Form["X-Amz-Date"] = []string{t.Format(ISO8601BasicFormat)} - req.URL.RawQuery = req.Form.Encode() - } else { - payloadHash = s.payloadHash(req) - if s.IncludeXAmzContentSha256 { - req.Header.Set("x-amz-content-sha256", payloadHash) // x-amz-content-sha256 contains the payload hash - } - } - creq := s.canonicalRequest(req, payloadHash) // Build canonical request - sts := s.stringToSign(t, creq) // Build string to sign - signature := s.signature(t, sts) // Calculate the AWS Signature Version 4 - auth := s.authorization(req.Header, t, signature) // Create Authorization header value - - if _, ok := req.Form["X-Amz-Expires"]; ok { - req.Form["X-Amz-Signature"] = []string{signature} - } else { - req.Header.Set("Authorization", auth) // Add Authorization header to request - } - return -} - -func (s *V4Signer) SignRequest(req *http.Request) error { - s.Sign(req) - return nil -} - -/* -requestTime method will parse the time from the request "x-amz-date" or "date" headers. -If the "x-amz-date" header is present, that will take priority over the "date" header. -If neither header is defined or we are unable to parse either header as a valid date -then we will create a new "x-amz-date" header with the current time. -*/ -func (s *V4Signer) requestTime(req *http.Request) time.Time { - - // Get "x-amz-date" header - date := req.Header.Get("x-amz-date") - - // Attempt to parse as ISO8601BasicFormat - t, err := time.Parse(ISO8601BasicFormat, date) - if err == nil { - return t - } - - // Attempt to parse as http.TimeFormat - t, err = time.Parse(http.TimeFormat, date) - if err == nil { - req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat)) - return t - } - - // Get "date" header - date = req.Header.Get("date") - - // Attempt to parse as http.TimeFormat - t, err = time.Parse(http.TimeFormat, date) - if err == nil { - return t - } - - // Create a current time header to be used - t = time.Now().UTC() - req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat)) - return t -} - -/* -canonicalRequest method creates the canonical request according to Task 1 of the AWS Signature Version 4 Signing Process. (http://goo.gl/eUUZ3S) - - CanonicalRequest = - HTTPRequestMethod + '\n' + - CanonicalURI + '\n' + - CanonicalQueryString + '\n' + - CanonicalHeaders + '\n' + - SignedHeaders + '\n' + - HexEncode(Hash(Payload)) - -payloadHash is optional; use the empty string and it will be calculated from the request -*/ -func (s *V4Signer) canonicalRequest(req *http.Request, payloadHash string) string { - if payloadHash == "" { - payloadHash = s.payloadHash(req) - } - c := new(bytes.Buffer) - fmt.Fprintf(c, "%s\n", req.Method) - fmt.Fprintf(c, "%s\n", s.canonicalURI(req.URL)) - fmt.Fprintf(c, "%s\n", s.canonicalQueryString(req.URL)) - fmt.Fprintf(c, "%s\n\n", s.canonicalHeaders(req.Header)) - fmt.Fprintf(c, "%s\n", s.signedHeaders(req.Header)) - fmt.Fprintf(c, "%s", payloadHash) - return c.String() -} - -func (s *V4Signer) canonicalURI(u *url.URL) string { - u = &url.URL{Path: u.Path} - canonicalPath := u.String() - - slash := strings.HasSuffix(canonicalPath, "/") - canonicalPath = path.Clean(canonicalPath) - - if canonicalPath == "" || canonicalPath == "." { - canonicalPath = "/" - } - - if canonicalPath != "/" && slash { - canonicalPath += "/" - } - - return canonicalPath -} - -func (s *V4Signer) canonicalQueryString(u *url.URL) string { - keyValues := make(map[string]string, len(u.Query())) - keys := make([]string, len(u.Query())) - - key_i := 0 - for k, vs := range u.Query() { - k = url.QueryEscape(k) - - a := make([]string, len(vs)) - for idx, v := range vs { - v = url.QueryEscape(v) - a[idx] = fmt.Sprintf("%s=%s", k, v) - } - - keyValues[k] = strings.Join(a, "&") - keys[key_i] = k - key_i++ - } - - sort.Strings(keys) - - query := make([]string, len(keys)) - for idx, key := range keys { - query[idx] = keyValues[key] - } - - query_str := strings.Join(query, "&") - - // AWS V4 signing requires that the space characters - // are encoded as %20 instead of +. On the other hand - // golangs url.QueryEscape as well as url.Values.Encode() - // both encode the space as a + character. See: - // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html - // https://github.com/golang/go/issues/4013 - // https://groups.google.com/forum/#!topic/golang-nuts/BB443qEjPIk - - return strings.Replace(query_str, "+", "%20", -1) -} - -func (s *V4Signer) canonicalHeaders(h http.Header) string { - i, a, lowerCase := 0, make([]string, len(h)), make(map[string][]string) - - for k, v := range h { - lowerCase[strings.ToLower(k)] = v - } - - var keys []string - for k := range lowerCase { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - v := lowerCase[k] - for j, w := range v { - v[j] = strings.Trim(w, " ") - } - sort.Strings(v) - a[i] = strings.ToLower(k) + ":" + strings.Join(v, ",") - i++ - } - return strings.Join(a, "\n") -} - -func (s *V4Signer) signedHeaders(h http.Header) string { - i, a := 0, make([]string, len(h)) - for k := range h { - a[i] = strings.ToLower(k) - i++ - } - sort.Strings(a) - return strings.Join(a, ";") -} - -func (s *V4Signer) payloadHash(req *http.Request) string { - var b []byte - if req.Body == nil { - b = []byte("") - } else { - var err error - b, err = ioutil.ReadAll(req.Body) - if err != nil { - // TODO: I REALLY DON'T LIKE THIS PANIC!!!! - panic(err) - } - } - req.Body = ioutil.NopCloser(bytes.NewBuffer(b)) - return s.hash(string(b)) -} - -/* -stringToSign method creates the string to sign accorting to Task 2 of the AWS Signature Version 4 Signing Process. (http://goo.gl/es1PAu) - - StringToSign = - Algorithm + '\n' + - RequestDate + '\n' + - CredentialScope + '\n' + - HexEncode(Hash(CanonicalRequest)) -*/ -func (s *V4Signer) stringToSign(t time.Time, creq string) string { - w := new(bytes.Buffer) - fmt.Fprint(w, "AWS4-HMAC-SHA256\n") - fmt.Fprintf(w, "%s\n", t.Format(ISO8601BasicFormat)) - fmt.Fprintf(w, "%s\n", s.credentialScope(t)) - fmt.Fprintf(w, "%s", s.hash(creq)) - return w.String() -} - -func (s *V4Signer) credentialScope(t time.Time) string { - return fmt.Sprintf("%s/%s/%s/aws4_request", t.Format(ISO8601BasicFormatShort), s.region.Name, s.serviceName) -} - -/* -signature method calculates the AWS Signature Version 4 according to Task 3 of the AWS Signature Version 4 Signing Process. (http://goo.gl/j0Yqe1) - - signature = HexEncode(HMAC(derived-signing-key, string-to-sign)) -*/ -func (s *V4Signer) signature(t time.Time, sts string) string { - h := s.hmac(s.derivedKey(t), []byte(sts)) - return fmt.Sprintf("%x", h) -} - -/* -derivedKey method derives a signing key to be used for signing a request. - - kSecret = Your AWS Secret Access Key - kDate = HMAC("AWS4" + kSecret, Date) - kRegion = HMAC(kDate, Region) - kService = HMAC(kRegion, Service) - kSigning = HMAC(kService, "aws4_request") -*/ -func (s *V4Signer) derivedKey(t time.Time) []byte { - h := s.hmac([]byte("AWS4"+s.auth.SecretKey), []byte(t.Format(ISO8601BasicFormatShort))) - h = s.hmac(h, []byte(s.region.Name)) - h = s.hmac(h, []byte(s.serviceName)) - h = s.hmac(h, []byte("aws4_request")) - return h -} - -/* -authorization method generates the authorization header value. -*/ -func (s *V4Signer) authorization(header http.Header, t time.Time, signature string) string { - w := new(bytes.Buffer) - fmt.Fprint(w, "AWS4-HMAC-SHA256 ") - fmt.Fprintf(w, "Credential=%s/%s, ", s.auth.AccessKey, s.credentialScope(t)) - fmt.Fprintf(w, "SignedHeaders=%s, ", s.signedHeaders(header)) - fmt.Fprintf(w, "Signature=%s", signature) - return w.String() -} - -// hash method calculates the sha256 hash for a given string -func (s *V4Signer) hash(in string) string { - h := sha256.New() - fmt.Fprintf(h, "%s", in) - return fmt.Sprintf("%x", h.Sum(nil)) -} - -// hmac method calculates the sha256 hmac for a given slice of bytes -func (s *V4Signer) hmac(key, data []byte) []byte { - h := hmac.New(sha256.New, key) - h.Write(data) - return h.Sum(nil) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/lifecycle.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/lifecycle.go deleted file mode 100644 index d9281261..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/lifecycle.go +++ /dev/null @@ -1,202 +0,0 @@ -package s3 - -import ( - "crypto/md5" - "encoding/base64" - "encoding/xml" - "net/url" - "strconv" - "time" -) - -// Implements an interface for s3 bucket lifecycle configuration -// See goo.gl/d0bbDf for details. - -const ( - LifecycleRuleStatusEnabled = "Enabled" - LifecycleRuleStatusDisabled = "Disabled" - LifecycleRuleDateFormat = "2006-01-02" - StorageClassGlacier = "GLACIER" -) - -type Expiration struct { - Days *uint `xml:"Days,omitempty"` - Date string `xml:"Date,omitempty"` -} - -// Returns Date as a time.Time. -func (r *Expiration) ParseDate() (time.Time, error) { - return time.Parse(LifecycleRuleDateFormat, r.Date) -} - -type Transition struct { - Days *uint `xml:"Days,omitempty"` - Date string `xml:"Date,omitempty"` - StorageClass string `xml:"StorageClass"` -} - -// Returns Date as a time.Time. -func (r *Transition) ParseDate() (time.Time, error) { - return time.Parse(LifecycleRuleDateFormat, r.Date) -} - -type NoncurrentVersionExpiration struct { - Days *uint `xml:"NoncurrentDays,omitempty"` -} - -type NoncurrentVersionTransition struct { - Days *uint `xml:"NoncurrentDays,omitempty"` - StorageClass string `xml:"StorageClass"` -} - -type LifecycleRule struct { - ID string `xml:"ID"` - Prefix string `xml:"Prefix"` - Status string `xml:"Status"` - NoncurrentVersionTransition *NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty"` - NoncurrentVersionExpiration *NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"` - Transition *Transition `xml:"Transition,omitempty"` - Expiration *Expiration `xml:"Expiration,omitempty"` -} - -// Create a lifecycle rule with arbitrary identifier id and object name prefix -// for which the rules should apply. -func NewLifecycleRule(id, prefix string) *LifecycleRule { - rule := &LifecycleRule{ - ID: id, - Prefix: prefix, - Status: LifecycleRuleStatusEnabled, - } - return rule -} - -// Adds a transition rule in days. Overwrites any previous transition rule. -func (r *LifecycleRule) SetTransitionDays(days uint) { - r.Transition = &Transition{ - Days: &days, - StorageClass: StorageClassGlacier, - } -} - -// Adds a transition rule as a date. Overwrites any previous transition rule. -func (r *LifecycleRule) SetTransitionDate(date time.Time) { - r.Transition = &Transition{ - Date: date.Format(LifecycleRuleDateFormat), - StorageClass: StorageClassGlacier, - } -} - -// Adds an expiration rule in days. Overwrites any previous expiration rule. -// Days must be > 0. -func (r *LifecycleRule) SetExpirationDays(days uint) { - r.Expiration = &Expiration{ - Days: &days, - } -} - -// Adds an expiration rule as a date. Overwrites any previous expiration rule. -func (r *LifecycleRule) SetExpirationDate(date time.Time) { - r.Expiration = &Expiration{ - Date: date.Format(LifecycleRuleDateFormat), - } -} - -// Adds a noncurrent version transition rule. Overwrites any previous -// noncurrent version transition rule. -func (r *LifecycleRule) SetNoncurrentVersionTransitionDays(days uint) { - r.NoncurrentVersionTransition = &NoncurrentVersionTransition{ - Days: &days, - StorageClass: StorageClassGlacier, - } -} - -// Adds a noncurrent version expiration rule. Days must be > 0. Overwrites -// any previous noncurrent version expiration rule. -func (r *LifecycleRule) SetNoncurrentVersionExpirationDays(days uint) { - r.NoncurrentVersionExpiration = &NoncurrentVersionExpiration{ - Days: &days, - } -} - -// Marks the rule as disabled. -func (r *LifecycleRule) Disable() { - r.Status = LifecycleRuleStatusDisabled -} - -// Marks the rule as enabled (default). -func (r *LifecycleRule) Enable() { - r.Status = LifecycleRuleStatusEnabled -} - -type LifecycleConfiguration struct { - XMLName xml.Name `xml:"LifecycleConfiguration"` - Rules *[]*LifecycleRule `xml:"Rule,omitempty"` -} - -// Adds a LifecycleRule to the configuration. -func (c *LifecycleConfiguration) AddRule(r *LifecycleRule) { - var rules []*LifecycleRule - if c.Rules != nil { - rules = *c.Rules - } - rules = append(rules, r) - c.Rules = &rules -} - -// Sets the bucket's lifecycle configuration. -func (b *Bucket) PutLifecycleConfiguration(c *LifecycleConfiguration) error { - doc, err := xml.Marshal(c) - if err != nil { - return err - } - - buf := makeXmlBuffer(doc) - digest := md5.New() - size, err := digest.Write(buf.Bytes()) - if err != nil { - return err - } - - headers := map[string][]string{ - "Content-Length": {strconv.FormatInt(int64(size), 10)}, - "Content-MD5": {base64.StdEncoding.EncodeToString(digest.Sum(nil))}, - } - - req := &request{ - path: "/", - method: "PUT", - bucket: b.Name, - headers: headers, - payload: buf, - params: url.Values{"lifecycle": {""}}, - } - - return b.S3.queryV4Sign(req, nil) -} - -// Retrieves the lifecycle configuration for the bucket. AWS returns an error -// if no lifecycle found. -func (b *Bucket) GetLifecycleConfiguration() (*LifecycleConfiguration, error) { - req := &request{ - method: "GET", - bucket: b.Name, - path: "/", - params: url.Values{"lifecycle": {""}}, - } - - conf := &LifecycleConfiguration{} - err := b.S3.queryV4Sign(req, conf) - return conf, err -} - -// Delete the bucket's lifecycle configuration. -func (b *Bucket) DeleteLifecycleConfiguration() error { - req := &request{ - method: "DELETE", - bucket: b.Name, - path: "/", - params: url.Values{"lifecycle": {""}}, - } - - return b.S3.queryV4Sign(req, nil) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/multi.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/multi.go deleted file mode 100644 index d905f565..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/multi.go +++ /dev/null @@ -1,508 +0,0 @@ -package s3 - -import ( - "bytes" - "crypto/md5" - "encoding/base64" - "encoding/hex" - "encoding/xml" - "errors" - "io" - "net/http" - "net/url" - "sort" - "strconv" - "strings" -) - -// Multi represents an unfinished multipart upload. -// -// Multipart uploads allow sending big objects in smaller chunks. -// After all parts have been sent, the upload must be explicitly -// completed by calling Complete with the list of parts. -// -// See http://goo.gl/vJfTG for an overview of multipart uploads. -type Multi struct { - Bucket *Bucket - Key string - UploadId string -} - -// That's the default. Here just for testing. -var listMultiMax = 1000 - -type listMultiResp struct { - NextKeyMarker string - NextUploadIdMarker string - IsTruncated bool - Upload []Multi - CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` -} - -// ListMulti returns the list of unfinished multipart uploads in b. -// -// The prefix parameter limits the response to keys that begin with the -// specified prefix. You can use prefixes to separate a bucket into different -// groupings of keys (to get the feeling of folders, for example). -// -// The delim parameter causes the response to group all of the keys that -// share a common prefix up to the next delimiter in a single entry within -// the CommonPrefixes field. You can use delimiters to separate a bucket -// into different groupings of keys, similar to how folders would work. -// -// See http://goo.gl/ePioY for details. -func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) { - params := map[string][]string{ - "uploads": {""}, - "max-uploads": {strconv.FormatInt(int64(listMultiMax), 10)}, - "prefix": {prefix}, - "delimiter": {delim}, - } - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "GET", - bucket: b.Name, - params: params, - } - var resp listMultiResp - err := b.S3.query(req, &resp) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, nil, err - } - for i := range resp.Upload { - multi := &resp.Upload[i] - multi.Bucket = b - multis = append(multis, multi) - } - prefixes = append(prefixes, resp.CommonPrefixes...) - if !resp.IsTruncated { - return multis, prefixes, nil - } - params["key-marker"] = []string{resp.NextKeyMarker} - params["upload-id-marker"] = []string{resp.NextUploadIdMarker} - attempt = attempts.Start() // Last request worked. - } - panic("unreachable") -} - -// Multi returns a multipart upload handler for the provided key -// inside b. If a multipart upload exists for key, it is returned, -// otherwise a new multipart upload is initiated with contType and perm. -func (b *Bucket) Multi(key, contType string, perm ACL, options Options) (*Multi, error) { - multis, _, err := b.ListMulti(key, "") - if err != nil && !hasCode(err, "NoSuchUpload") { - return nil, err - } - for _, m := range multis { - if m.Key == key { - return m, nil - } - } - return b.InitMulti(key, contType, perm, options) -} - -// InitMulti initializes a new multipart upload at the provided -// key inside b and returns a value for manipulating it. -// -// See http://goo.gl/XP8kL for details. -func (b *Bucket) InitMulti(key string, contType string, perm ACL, options Options) (*Multi, error) { - headers := map[string][]string{ - "Content-Type": {contType}, - "Content-Length": {"0"}, - "x-amz-acl": {string(perm)}, - } - options.addHeaders(headers) - params := map[string][]string{ - "uploads": {""}, - } - req := &request{ - method: "POST", - bucket: b.Name, - path: key, - headers: headers, - params: params, - } - var err error - var resp struct { - UploadId string `xml:"UploadId"` - } - for attempt := attempts.Start(); attempt.Next(); { - err = b.S3.query(req, &resp) - if !shouldRetry(err) { - break - } - } - if err != nil { - return nil, err - } - return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil -} - -func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObjectResult, Part, error) { - headers := map[string][]string{ - "x-amz-copy-source": {url.QueryEscape(source)}, - } - options.addHeaders(headers) - params := map[string][]string{ - "uploadId": {m.UploadId}, - "partNumber": {strconv.FormatInt(int64(n), 10)}, - } - - sourceBucket := m.Bucket.S3.Bucket(strings.TrimRight(strings.SplitAfterN(source, "/", 2)[0], "/")) - sourceMeta, err := sourceBucket.Head(strings.SplitAfterN(source, "/", 2)[1], nil) - if err != nil { - return nil, Part{}, err - } - - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "PUT", - bucket: m.Bucket.Name, - path: m.Key, - headers: headers, - params: params, - } - resp := &CopyObjectResult{} - err = m.Bucket.S3.query(req, resp) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, Part{}, err - } - if resp.ETag == "" { - return nil, Part{}, errors.New("part upload succeeded with no ETag") - } - return resp, Part{n, resp.ETag, sourceMeta.ContentLength}, nil - } - panic("unreachable") -} - -// PutPart sends part n of the multipart upload, reading all the content from r. -// Each part, except for the last one, must be at least 5MB in size. -// -// See http://goo.gl/pqZer for details. -func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) { - partSize, _, md5b64, err := seekerInfo(r) - if err != nil { - return Part{}, err - } - return m.putPart(n, r, partSize, md5b64) -} - -func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) { - headers := map[string][]string{ - "Content-Length": {strconv.FormatInt(partSize, 10)}, - "Content-MD5": {md5b64}, - } - params := map[string][]string{ - "uploadId": {m.UploadId}, - "partNumber": {strconv.FormatInt(int64(n), 10)}, - } - for attempt := attempts.Start(); attempt.Next(); { - _, err := r.Seek(0, 0) - if err != nil { - return Part{}, err - } - req := &request{ - method: "PUT", - bucket: m.Bucket.Name, - path: m.Key, - headers: headers, - params: params, - payload: r, - } - err = m.Bucket.S3.prepare(req) - if err != nil { - return Part{}, err - } - resp, err := m.Bucket.S3.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return Part{}, err - } - etag := resp.Header.Get("ETag") - if etag == "" { - return Part{}, errors.New("part upload succeeded with no ETag") - } - return Part{n, etag, partSize}, nil - } - panic("unreachable") -} - -func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) { - _, err = r.Seek(0, 0) - if err != nil { - return 0, "", "", err - } - digest := md5.New() - size, err = io.Copy(digest, r) - if err != nil { - return 0, "", "", err - } - sum := digest.Sum(nil) - md5hex = hex.EncodeToString(sum) - md5b64 = base64.StdEncoding.EncodeToString(sum) - return size, md5hex, md5b64, nil -} - -type Part struct { - N int `xml:"PartNumber"` - ETag string - Size int64 -} - -type partSlice []Part - -func (s partSlice) Len() int { return len(s) } -func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N } -func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -type listPartsResp struct { - NextPartNumberMarker string - IsTruncated bool - Part []Part -} - -// That's the default. Here just for testing. -var listPartsMax = 1000 - -// Kept for backcompatability. See the documentation for ListPartsFull -func (m *Multi) ListParts() ([]Part, error) { - return m.ListPartsFull(0, listPartsMax) -} - -// ListParts returns the list of previously uploaded parts in m, -// ordered by part number (Only parts with higher part numbers than -// partNumberMarker will be listed). Only up to maxParts parts will be -// returned. -// -// See http://goo.gl/ePioY for details. -func (m *Multi) ListPartsFull(partNumberMarker int, maxParts int) ([]Part, error) { - if maxParts > listPartsMax { - maxParts = listPartsMax - } - - params := map[string][]string{ - "uploadId": {m.UploadId}, - "max-parts": {strconv.FormatInt(int64(maxParts), 10)}, - "part-number-marker": {strconv.FormatInt(int64(partNumberMarker), 10)}, - } - var parts partSlice - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "GET", - bucket: m.Bucket.Name, - path: m.Key, - params: params, - } - var resp listPartsResp - err := m.Bucket.S3.query(req, &resp) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - parts = append(parts, resp.Part...) - if !resp.IsTruncated { - sort.Sort(parts) - return parts, nil - } - params["part-number-marker"] = []string{resp.NextPartNumberMarker} - attempt = attempts.Start() // Last request worked. - } - panic("unreachable") -} - -type ReaderAtSeeker interface { - io.ReaderAt - io.ReadSeeker -} - -// PutAll sends all of r via a multipart upload with parts no larger -// than partSize bytes, which must be set to at least 5MB. -// Parts previously uploaded are either reused if their checksum -// and size match the new part, or otherwise overwritten with the -// new content. -// PutAll returns all the parts of m (reused or not). -func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) { - old, err := m.ListParts() - if err != nil && !hasCode(err, "NoSuchUpload") { - return nil, err - } - reuse := 0 // Index of next old part to consider reusing. - current := 1 // Part number of latest good part handled. - totalSize, err := r.Seek(0, 2) - if err != nil { - return nil, err - } - first := true // Must send at least one empty part if the file is empty. - var result []Part -NextSection: - for offset := int64(0); offset < totalSize || first; offset += partSize { - first = false - if offset+partSize > totalSize { - partSize = totalSize - offset - } - section := io.NewSectionReader(r, offset, partSize) - _, md5hex, md5b64, err := seekerInfo(section) - if err != nil { - return nil, err - } - for reuse < len(old) && old[reuse].N <= current { - // Looks like this part was already sent. - part := &old[reuse] - etag := `"` + md5hex + `"` - if part.N == current && part.Size == partSize && part.ETag == etag { - // Checksum matches. Reuse the old part. - result = append(result, *part) - current++ - continue NextSection - } - reuse++ - } - - // Part wasn't found or doesn't match. Send it. - part, err := m.putPart(current, section, partSize, md5b64) - if err != nil { - return nil, err - } - result = append(result, part) - current++ - } - return result, nil -} - -type completeUpload struct { - XMLName xml.Name `xml:"CompleteMultipartUpload"` - Parts completeParts `xml:"Part"` -} - -type completePart struct { - PartNumber int - ETag string -} - -type completeParts []completePart - -func (p completeParts) Len() int { return len(p) } -func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber } -func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// We can't know in advance whether we'll have an Error or a -// CompleteMultipartUploadResult, so this structure is just a placeholder to -// know the name of the XML object. -type completeUploadResp struct { - XMLName xml.Name - InnerXML string `xml:",innerxml"` -} - -// Complete assembles the given previously uploaded parts into the -// final object. This operation may take several minutes. -// -// See http://goo.gl/2Z7Tw for details. -func (m *Multi) Complete(parts []Part) error { - params := map[string][]string{ - "uploadId": {m.UploadId}, - } - c := completeUpload{} - for _, p := range parts { - c.Parts = append(c.Parts, completePart{p.N, p.ETag}) - } - sort.Sort(c.Parts) - data, err := xml.Marshal(&c) - if err != nil { - return err - } - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "POST", - bucket: m.Bucket.Name, - path: m.Key, - params: params, - payload: bytes.NewReader(data), - } - var resp completeUploadResp - if m.Bucket.Region.Name == "generic" { - headers := make(http.Header) - headers.Add("Content-Length", strconv.FormatInt(int64(len(data)), 10)) - req.headers = headers - } - err := m.Bucket.S3.query(req, &resp) - if shouldRetry(err) && attempt.HasNext() { - continue - } - - if err != nil { - return err - } - - // A 200 error code does not guarantee that there were no errors (see - // http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html ), - // so first figure out what kind of XML "object" we are dealing with. - - if resp.XMLName.Local == "Error" { - // S3.query does the unmarshalling for us, so we can't unmarshal - // again in a different struct... So we need to duct-tape back the - // original XML back together. - fullErrorXml := "" + resp.InnerXML + "" - s3err := &Error{} - - if err := xml.Unmarshal([]byte(fullErrorXml), s3err); err != nil { - return err - } - - return s3err - } - - if resp.XMLName.Local == "CompleteMultipartUploadResult" { - // FIXME: One could probably add a CompleteFull method returning the - // actual contents of the CompleteMultipartUploadResult object. - return nil - } - - return errors.New("Invalid XML struct returned: " + resp.XMLName.Local) - } - panic("unreachable") -} - -// Abort deletes an unifinished multipart upload and any previously -// uploaded parts for it. -// -// After a multipart upload is aborted, no additional parts can be -// uploaded using it. However, if any part uploads are currently in -// progress, those part uploads might or might not succeed. As a result, -// it might be necessary to abort a given multipart upload multiple -// times in order to completely free all storage consumed by all parts. -// -// NOTE: If the described scenario happens to you, please report back to -// the goamz authors with details. In the future such retrying should be -// handled internally, but it's not clear what happens precisely (Is an -// error returned? Is the issue completely undetectable?). -// -// See http://goo.gl/dnyJw for details. -func (m *Multi) Abort() error { - params := map[string][]string{ - "uploadId": {m.UploadId}, - } - for attempt := attempts.Start(); attempt.Next(); { - req := &request{ - method: "DELETE", - bucket: m.Bucket.Name, - path: m.Key, - params: params, - } - err := m.Bucket.S3.query(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - return err - } - panic("unreachable") -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/s3.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/s3.go deleted file mode 100644 index 5dedaf10..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/s3.go +++ /dev/null @@ -1,1305 +0,0 @@ -// -// goamz - Go packages to interact with the Amazon Web Services. -// -// https://wiki.ubuntu.com/goamz -// -// Copyright (c) 2011 Canonical Ltd. -// -// Written by Gustavo Niemeyer -// - -package s3 - -import ( - "bytes" - "crypto/hmac" - "crypto/md5" - "crypto/sha1" - "encoding/base64" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/http/httputil" - "net/url" - "path" - "strconv" - "strings" - "time" - - "github.com/docker/goamz/aws" -) - -const debug = false - -// The S3 type encapsulates operations with an S3 region. -type S3 struct { - aws.Auth - aws.Region - Signature int - Client *http.Client - private byte // Reserve the right of using private data. -} - -// The Bucket type encapsulates operations with an S3 bucket. -type Bucket struct { - *S3 - Name string -} - -// The Owner type represents the owner of the object in an S3 bucket. -type Owner struct { - ID string - DisplayName string -} - -// Fold options into an Options struct -// -type Options struct { - SSE bool - SSEKMS bool - SSEKMSKeyId string - SSECustomerAlgorithm string - SSECustomerKey string - SSECustomerKeyMD5 string - Meta map[string][]string - ContentEncoding string - CacheControl string - RedirectLocation string - ContentMD5 string - ContentDisposition string - Range string - StorageClass StorageClass - // What else? -} - -type CopyOptions struct { - Options - CopySourceOptions string - MetadataDirective string - ContentType string -} - -// CopyObjectResult is the output from a Copy request -type CopyObjectResult struct { - ETag string - LastModified string -} - -var attempts = aws.AttemptStrategy{ - Min: 5, - Total: 5 * time.Second, - Delay: 200 * time.Millisecond, -} - -// New creates a new S3. -func New(auth aws.Auth, region aws.Region) *S3 { - return &S3{ - Auth: auth, - Region: region, - Signature: aws.V2Signature, - Client: http.DefaultClient, - private: 0, - } -} - -// Bucket returns a Bucket with the given name. -func (s3 *S3) Bucket(name string) *Bucket { - if s3.Region.S3BucketEndpoint != "" || s3.Region.S3LowercaseBucket { - name = strings.ToLower(name) - } - return &Bucket{s3, name} -} - -type BucketInfo struct { - Name string - CreationDate string -} - -type GetServiceResp struct { - Owner Owner - Buckets []BucketInfo `xml:">Bucket"` -} - -// GetService gets a list of all buckets owned by an account. -// -// See http://goo.gl/wbHkGj for details. -func (s3 *S3) GetService() (*GetServiceResp, error) { - bucket := s3.Bucket("") - - r, err := bucket.Get("") - if err != nil { - return nil, err - } - - // Parse the XML response. - var resp GetServiceResp - if err = xml.Unmarshal(r, &resp); err != nil { - return nil, err - } - - return &resp, nil -} - -var createBucketConfiguration = ` - %s -` - -// locationConstraint returns an io.Reader specifying a LocationConstraint if -// required for the region. -// -// See http://goo.gl/bh9Kq for details. -func (s3 *S3) locationConstraint() io.Reader { - constraint := "" - if s3.Region.S3LocationConstraint { - constraint = fmt.Sprintf(createBucketConfiguration, s3.Region.Name) - } - return strings.NewReader(constraint) -} - -type ACL string - -const ( - Private = ACL("private") - PublicRead = ACL("public-read") - PublicReadWrite = ACL("public-read-write") - AuthenticatedRead = ACL("authenticated-read") - BucketOwnerRead = ACL("bucket-owner-read") - BucketOwnerFull = ACL("bucket-owner-full-control") -) - -type StorageClass string - -const ( - ReducedRedundancy = StorageClass("REDUCED_REDUNDANCY") - StandardStorage = StorageClass("STANDARD") -) - -type ServerSideEncryption string - -const ( - S3Managed = ServerSideEncryption("AES256") - KMSManaged = ServerSideEncryption("aws:kms") -) - -// PutBucket creates a new bucket. -// -// See http://goo.gl/ndjnR for details. -func (b *Bucket) PutBucket(perm ACL) error { - headers := map[string][]string{ - "x-amz-acl": {string(perm)}, - } - req := &request{ - method: "PUT", - bucket: b.Name, - path: "/", - headers: headers, - payload: b.locationConstraint(), - } - return b.S3.query(req, nil) -} - -// DelBucket removes an existing S3 bucket. All objects in the bucket must -// be removed before the bucket itself can be removed. -// -// See http://goo.gl/GoBrY for details. -func (b *Bucket) DelBucket() (err error) { - req := &request{ - method: "DELETE", - bucket: b.Name, - path: "/", - } - for attempt := attempts.Start(); attempt.Next(); { - err = b.S3.query(req, nil) - if !shouldRetry(err) { - break - } - } - return err -} - -// Get retrieves an object from an S3 bucket. -// -// See http://goo.gl/isCO7 for details. -func (b *Bucket) Get(path string) (data []byte, err error) { - body, err := b.GetReader(path) - if err != nil { - return nil, err - } - data, err = ioutil.ReadAll(body) - body.Close() - return data, err -} - -// GetReader retrieves an object from an S3 bucket, -// returning the body of the HTTP response. -// It is the caller's responsibility to call Close on rc when -// finished reading. -func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) { - resp, err := b.GetResponse(path) - if resp != nil { - return resp.Body, err - } - return nil, err -} - -// GetResponse retrieves an object from an S3 bucket, -// returning the HTTP response. -// It is the caller's responsibility to call Close on rc when -// finished reading -func (b *Bucket) GetResponse(path string) (resp *http.Response, err error) { - return b.GetResponseWithHeaders(path, make(http.Header)) -} - -// GetReaderWithHeaders retrieves an object from an S3 bucket -// Accepts custom headers to be sent as the second parameter -// returning the body of the HTTP response. -// It is the caller's responsibility to call Close on rc when -// finished reading -func (b *Bucket) GetResponseWithHeaders(path string, headers map[string][]string) (resp *http.Response, err error) { - req := &request{ - bucket: b.Name, - path: path, - headers: headers, - } - err = b.S3.prepare(req) - if err != nil { - return nil, err - } - for attempt := attempts.Start(); attempt.Next(); { - resp, err := b.S3.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - return resp, nil - } - panic("unreachable") -} - -// Exists checks whether or not an object exists on an S3 bucket using a HEAD request. -func (b *Bucket) Exists(path string) (exists bool, err error) { - req := &request{ - method: "HEAD", - bucket: b.Name, - path: path, - } - err = b.S3.prepare(req) - if err != nil { - return - } - for attempt := attempts.Start(); attempt.Next(); { - resp, err := b.S3.run(req, nil) - - if shouldRetry(err) && attempt.HasNext() { - continue - } - - if err != nil { - // We can treat a 403 or 404 as non existance - if e, ok := err.(*Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) { - return false, nil - } - return false, err - } - - if resp.StatusCode/100 == 2 { - exists = true - } - if resp.Body != nil { - resp.Body.Close() - } - return exists, err - } - return false, fmt.Errorf("S3 Currently Unreachable") -} - -// Head HEADs an object in the S3 bucket, returns the response with -// no body see http://bit.ly/17K1ylI -func (b *Bucket) Head(path string, headers map[string][]string) (*http.Response, error) { - req := &request{ - method: "HEAD", - bucket: b.Name, - path: path, - headers: headers, - } - err := b.S3.prepare(req) - if err != nil { - return nil, err - } - - for attempt := attempts.Start(); attempt.Next(); { - resp, err := b.S3.run(req, nil) - if shouldRetry(err) && attempt.HasNext() { - continue - } - if err != nil { - return nil, err - } - return resp, err - } - return nil, fmt.Errorf("S3 Currently Unreachable") -} - -// Put inserts an object into the S3 bucket. -// -// See http://goo.gl/FEBPD for details. -func (b *Bucket) Put(path string, data []byte, contType string, perm ACL, options Options) error { - body := bytes.NewBuffer(data) - return b.PutReader(path, body, int64(len(data)), contType, perm, options) -} - -// PutCopy puts a copy of an object given by the key path into bucket b using b.Path as the target key -func (b *Bucket) PutCopy(path string, perm ACL, options CopyOptions, source string) (*CopyObjectResult, error) { - headers := map[string][]string{ - "x-amz-acl": {string(perm)}, - "x-amz-copy-source": {escapePath(source)}, - } - options.addHeaders(headers) - req := &request{ - method: "PUT", - bucket: b.Name, - path: path, - headers: headers, - } - resp := &CopyObjectResult{} - err := b.S3.query(req, resp) - if err != nil { - return resp, err - } - return resp, nil -} - -// PutReader inserts an object into the S3 bucket by consuming data -// from r until EOF. -func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL, options Options) error { - headers := map[string][]string{ - "Content-Length": {strconv.FormatInt(length, 10)}, - "Content-Type": {contType}, - "x-amz-acl": {string(perm)}, - } - options.addHeaders(headers) - req := &request{ - method: "PUT", - bucket: b.Name, - path: path, - headers: headers, - payload: r, - } - return b.S3.query(req, nil) -} - -// addHeaders adds o's specified fields to headers -func (o Options) addHeaders(headers map[string][]string) { - if o.SSE { - headers["x-amz-server-side-encryption"] = []string{string(S3Managed)} - } else if o.SSEKMS { - headers["x-amz-server-side-encryption"] = []string{string(KMSManaged)} - if len(o.SSEKMSKeyId) != 0 { - headers["x-amz-server-side-encryption-aws-kms-key-id"] = []string{o.SSEKMSKeyId} - } - } else if len(o.SSECustomerAlgorithm) != 0 && len(o.SSECustomerKey) != 0 && len(o.SSECustomerKeyMD5) != 0 { - // Amazon-managed keys and customer-managed keys are mutually exclusive - headers["x-amz-server-side-encryption-customer-algorithm"] = []string{o.SSECustomerAlgorithm} - headers["x-amz-server-side-encryption-customer-key"] = []string{o.SSECustomerKey} - headers["x-amz-server-side-encryption-customer-key-MD5"] = []string{o.SSECustomerKeyMD5} - } - if len(o.Range) != 0 { - headers["Range"] = []string{o.Range} - } - if len(o.ContentEncoding) != 0 { - headers["Content-Encoding"] = []string{o.ContentEncoding} - } - if len(o.CacheControl) != 0 { - headers["Cache-Control"] = []string{o.CacheControl} - } - if len(o.ContentMD5) != 0 { - headers["Content-MD5"] = []string{o.ContentMD5} - } - if len(o.RedirectLocation) != 0 { - headers["x-amz-website-redirect-location"] = []string{o.RedirectLocation} - } - if len(o.ContentDisposition) != 0 { - headers["Content-Disposition"] = []string{o.ContentDisposition} - } - if len(o.StorageClass) != 0 { - headers["x-amz-storage-class"] = []string{string(o.StorageClass)} - - } - for k, v := range o.Meta { - headers["x-amz-meta-"+k] = v - } -} - -// addHeaders adds o's specified fields to headers -func (o CopyOptions) addHeaders(headers map[string][]string) { - o.Options.addHeaders(headers) - if len(o.MetadataDirective) != 0 { - headers["x-amz-metadata-directive"] = []string{o.MetadataDirective} - } - if len(o.CopySourceOptions) != 0 { - headers["x-amz-copy-source-range"] = []string{o.CopySourceOptions} - } - if len(o.ContentType) != 0 { - headers["Content-Type"] = []string{o.ContentType} - } -} - -func makeXmlBuffer(doc []byte) *bytes.Buffer { - buf := new(bytes.Buffer) - buf.WriteString(xml.Header) - buf.Write(doc) - return buf -} - -type IndexDocument struct { - Suffix string `xml:"Suffix"` -} - -type ErrorDocument struct { - Key string `xml:"Key"` -} - -type RoutingRule struct { - ConditionKeyPrefixEquals string `xml:"Condition>KeyPrefixEquals"` - RedirectReplaceKeyPrefixWith string `xml:"Redirect>ReplaceKeyPrefixWith,omitempty"` - RedirectReplaceKeyWith string `xml:"Redirect>ReplaceKeyWith,omitempty"` -} - -type RedirectAllRequestsTo struct { - HostName string `xml:"HostName"` - Protocol string `xml:"Protocol,omitempty"` -} - -type WebsiteConfiguration struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ WebsiteConfiguration"` - IndexDocument *IndexDocument `xml:"IndexDocument,omitempty"` - ErrorDocument *ErrorDocument `xml:"ErrorDocument,omitempty"` - RoutingRules *[]RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"` - RedirectAllRequestsTo *RedirectAllRequestsTo `xml:"RedirectAllRequestsTo,omitempty"` -} - -// PutBucketWebsite configures a bucket as a website. -// -// See http://goo.gl/TpRlUy for details. -func (b *Bucket) PutBucketWebsite(configuration WebsiteConfiguration) error { - doc, err := xml.Marshal(configuration) - if err != nil { - return err - } - - buf := makeXmlBuffer(doc) - - return b.PutBucketSubresource("website", buf, int64(buf.Len())) -} - -func (b *Bucket) PutBucketSubresource(subresource string, r io.Reader, length int64) error { - headers := map[string][]string{ - "Content-Length": {strconv.FormatInt(length, 10)}, - } - req := &request{ - path: "/", - method: "PUT", - bucket: b.Name, - headers: headers, - payload: r, - params: url.Values{subresource: {""}}, - } - - return b.S3.query(req, nil) -} - -// Del removes an object from the S3 bucket. -// -// See http://goo.gl/APeTt for details. -func (b *Bucket) Del(path string) error { - req := &request{ - method: "DELETE", - bucket: b.Name, - path: path, - } - return b.S3.query(req, nil) -} - -type Delete struct { - Quiet bool `xml:"Quiet,omitempty"` - Objects []Object `xml:"Object"` -} - -type Object struct { - Key string `xml:"Key"` - VersionId string `xml:"VersionId,omitempty"` -} - -// DelMulti removes up to 1000 objects from the S3 bucket. -// -// See http://goo.gl/jx6cWK for details. -func (b *Bucket) DelMulti(objects Delete) error { - doc, err := xml.Marshal(objects) - if err != nil { - return err - } - - buf := makeXmlBuffer(doc) - digest := md5.New() - size, err := digest.Write(buf.Bytes()) - if err != nil { - return err - } - - headers := map[string][]string{ - "Content-Length": {strconv.FormatInt(int64(size), 10)}, - "Content-MD5": {base64.StdEncoding.EncodeToString(digest.Sum(nil))}, - "Content-Type": {"text/xml"}, - } - req := &request{ - path: "/", - method: "POST", - params: url.Values{"delete": {""}}, - bucket: b.Name, - headers: headers, - payload: buf, - } - - return b.S3.query(req, nil) -} - -// The ListResp type holds the results of a List bucket operation. -type ListResp struct { - Name string - Prefix string - Delimiter string - Marker string - MaxKeys int - // IsTruncated is true if the results have been truncated because - // there are more keys and prefixes than can fit in MaxKeys. - // N.B. this is the opposite sense to that documented (incorrectly) in - // http://goo.gl/YjQTc - IsTruncated bool - Contents []Key - CommonPrefixes []string `xml:">Prefix"` - // if IsTruncated is true, pass NextMarker as marker argument to List() - // to get the next set of keys - NextMarker string -} - -// The Key type represents an item stored in an S3 bucket. -type Key struct { - Key string - LastModified string - Size int64 - // ETag gives the hex-encoded MD5 sum of the contents, - // surrounded with double-quotes. - ETag string - StorageClass string - Owner Owner -} - -// List returns information about objects in an S3 bucket. -// -// The prefix parameter limits the response to keys that begin with the -// specified prefix. -// -// The delim parameter causes the response to group all of the keys that -// share a common prefix up to the next delimiter in a single entry within -// the CommonPrefixes field. You can use delimiters to separate a bucket -// into different groupings of keys, similar to how folders would work. -// -// The marker parameter specifies the key to start with when listing objects -// in a bucket. Amazon S3 lists objects in alphabetical order and -// will return keys alphabetically greater than the marker. -// -// The max parameter specifies how many keys + common prefixes to return in -// the response. The default is 1000. -// -// For example, given these keys in a bucket: -// -// index.html -// index2.html -// photos/2006/January/sample.jpg -// photos/2006/February/sample2.jpg -// photos/2006/February/sample3.jpg -// photos/2006/February/sample4.jpg -// -// Listing this bucket with delimiter set to "/" would yield the -// following result: -// -// &ListResp{ -// Name: "sample-bucket", -// MaxKeys: 1000, -// Delimiter: "/", -// Contents: []Key{ -// {Key: "index.html", "index2.html"}, -// }, -// CommonPrefixes: []string{ -// "photos/", -// }, -// } -// -// Listing the same bucket with delimiter set to "/" and prefix set to -// "photos/2006/" would yield the following result: -// -// &ListResp{ -// Name: "sample-bucket", -// MaxKeys: 1000, -// Delimiter: "/", -// Prefix: "photos/2006/", -// CommonPrefixes: []string{ -// "photos/2006/February/", -// "photos/2006/January/", -// }, -// } -// -// See http://goo.gl/YjQTc for details. -func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) { - params := map[string][]string{ - "prefix": {prefix}, - "delimiter": {delim}, - "marker": {marker}, - } - if max != 0 { - params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)} - } - req := &request{ - bucket: b.Name, - params: params, - } - result = &ListResp{} - for attempt := attempts.Start(); attempt.Next(); { - err = b.S3.query(req, result) - if !shouldRetry(err) { - break - } - } - if err != nil { - return nil, err - } - // if NextMarker is not returned, it should be set to the name of last key, - // so let's do it so that each caller doesn't have to - if result.IsTruncated && result.NextMarker == "" { - n := len(result.Contents) - if n > 0 { - result.NextMarker = result.Contents[n-1].Key - } - } - return result, nil -} - -// The VersionsResp type holds the results of a list bucket Versions operation. -type VersionsResp struct { - Name string - Prefix string - KeyMarker string - VersionIdMarker string - MaxKeys int - Delimiter string - IsTruncated bool - Versions []Version `xml:"Version"` - CommonPrefixes []string `xml:">Prefix"` -} - -// The Version type represents an object version stored in an S3 bucket. -type Version struct { - Key string - VersionId string - IsLatest bool - LastModified string - // ETag gives the hex-encoded MD5 sum of the contents, - // surrounded with double-quotes. - ETag string - Size int64 - Owner Owner - StorageClass string -} - -func (b *Bucket) Versions(prefix, delim, keyMarker string, versionIdMarker string, max int) (result *VersionsResp, err error) { - params := map[string][]string{ - "versions": {""}, - "prefix": {prefix}, - "delimiter": {delim}, - } - - if len(versionIdMarker) != 0 { - params["version-id-marker"] = []string{versionIdMarker} - } - if len(keyMarker) != 0 { - params["key-marker"] = []string{keyMarker} - } - - if max != 0 { - params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)} - } - req := &request{ - bucket: b.Name, - params: params, - } - result = &VersionsResp{} - for attempt := attempts.Start(); attempt.Next(); { - err = b.S3.query(req, result) - if !shouldRetry(err) { - break - } - } - if err != nil { - return nil, err - } - return result, nil -} - -type GetLocationResp struct { - Location string `xml:",innerxml"` -} - -func (b *Bucket) Location() (string, error) { - r, err := b.Get("/?location") - if err != nil { - return "", err - } - - // Parse the XML response. - var resp GetLocationResp - if err = xml.Unmarshal(r, &resp); err != nil { - return "", err - } - - if resp.Location == "" { - return "us-east-1", nil - } else { - return resp.Location, nil - } -} - -// URL returns a non-signed URL that allows retriving the -// object at path. It only works if the object is publicly -// readable (see SignedURL). -func (b *Bucket) URL(path string) string { - req := &request{ - bucket: b.Name, - path: path, - } - err := b.S3.prepare(req) - if err != nil { - panic(err) - } - u, err := req.url() - if err != nil { - panic(err) - } - u.RawQuery = "" - return u.String() -} - -// SignedURL returns a signed URL that allows anyone holding the URL -// to retrieve the object at path. The signature is valid until expires. -func (b *Bucket) SignedURL(path string, expires time.Time) string { - return b.SignedURLWithArgs(path, expires, nil, nil) -} - -// SignedURLWithArgs returns a signed URL that allows anyone holding the URL -// to retrieve the object at path. The signature is valid until expires. -func (b *Bucket) SignedURLWithArgs(path string, expires time.Time, params url.Values, headers http.Header) string { - return b.SignedURLWithMethod("GET", path, expires, params, headers) -} - -// SignedURLWithMethod returns a signed URL that allows anyone holding the URL -// to either retrieve the object at path or make a HEAD request against it. The signature is valid until expires. -func (b *Bucket) SignedURLWithMethod(method, path string, expires time.Time, params url.Values, headers http.Header) string { - var uv = url.Values{} - - if params != nil { - uv = params - } - - if b.S3.Signature == aws.V2Signature { - uv.Set("Expires", strconv.FormatInt(expires.Unix(), 10)) - } else { - uv.Set("X-Amz-Expires", strconv.FormatInt(expires.Unix()-time.Now().Unix(), 10)) - } - - req := &request{ - method: method, - bucket: b.Name, - path: path, - params: uv, - headers: headers, - } - err := b.S3.prepare(req) - if err != nil { - panic(err) - } - u, err := req.url() - if err != nil { - panic(err) - } - if b.S3.Auth.Token() != "" && b.S3.Signature == aws.V2Signature { - return u.String() + "&x-amz-security-token=" + url.QueryEscape(req.headers["X-Amz-Security-Token"][0]) - } else { - return u.String() - } -} - -// UploadSignedURL returns a signed URL that allows anyone holding the URL -// to upload the object at path. The signature is valid until expires. -// contenttype is a string like image/png -// name is the resource name in s3 terminology like images/ali.png [obviously excluding the bucket name itself] -func (b *Bucket) UploadSignedURL(name, method, content_type string, expires time.Time) string { - expire_date := expires.Unix() - if method != "POST" { - method = "PUT" - } - - a := b.S3.Auth - tokenData := "" - - if a.Token() != "" { - tokenData = "x-amz-security-token:" + a.Token() + "\n" - } - - stringToSign := method + "\n\n" + content_type + "\n" + strconv.FormatInt(expire_date, 10) + "\n" + tokenData + "/" + path.Join(b.Name, name) - secretKey := a.SecretKey - accessId := a.AccessKey - mac := hmac.New(sha1.New, []byte(secretKey)) - mac.Write([]byte(stringToSign)) - macsum := mac.Sum(nil) - signature := base64.StdEncoding.EncodeToString([]byte(macsum)) - signature = strings.TrimSpace(signature) - - var signedurl *url.URL - var err error - if b.Region.S3Endpoint != "" { - signedurl, err = url.Parse(b.Region.S3Endpoint) - name = b.Name + "/" + name - } else { - signedurl, err = url.Parse("https://" + b.Name + ".s3.amazonaws.com/") - } - - if err != nil { - log.Println("ERROR sining url for S3 upload", err) - return "" - } - signedurl.Path = name - params := url.Values{} - params.Add("AWSAccessKeyId", accessId) - params.Add("Expires", strconv.FormatInt(expire_date, 10)) - params.Add("Signature", signature) - if a.Token() != "" { - params.Add("x-amz-security-token", a.Token()) - } - - signedurl.RawQuery = params.Encode() - return signedurl.String() -} - -// PostFormArgs returns the action and input fields needed to allow anonymous -// uploads to a bucket within the expiration limit -// Additional conditions can be specified with conds -func (b *Bucket) PostFormArgsEx(path string, expires time.Time, redirect string, conds []string) (action string, fields map[string]string) { - conditions := make([]string, 0) - fields = map[string]string{ - "AWSAccessKeyId": b.Auth.AccessKey, - "key": path, - } - - if token := b.S3.Auth.Token(); token != "" { - fields["x-amz-security-token"] = token - conditions = append(conditions, - fmt.Sprintf("{\"x-amz-security-token\": \"%s\"}", token)) - } - - if conds != nil { - conditions = append(conditions, conds...) - } - - conditions = append(conditions, fmt.Sprintf("{\"key\": \"%s\"}", path)) - conditions = append(conditions, fmt.Sprintf("{\"bucket\": \"%s\"}", b.Name)) - if redirect != "" { - conditions = append(conditions, fmt.Sprintf("{\"success_action_redirect\": \"%s\"}", redirect)) - fields["success_action_redirect"] = redirect - } - - vExpiration := expires.Format("2006-01-02T15:04:05Z") - vConditions := strings.Join(conditions, ",") - policy := fmt.Sprintf("{\"expiration\": \"%s\", \"conditions\": [%s]}", vExpiration, vConditions) - policy64 := base64.StdEncoding.EncodeToString([]byte(policy)) - fields["policy"] = policy64 - - signer := hmac.New(sha1.New, []byte(b.Auth.SecretKey)) - signer.Write([]byte(policy64)) - fields["signature"] = base64.StdEncoding.EncodeToString(signer.Sum(nil)) - - action = fmt.Sprintf("%s/%s/", b.S3.Region.S3Endpoint, b.Name) - return -} - -// PostFormArgs returns the action and input fields needed to allow anonymous -// uploads to a bucket within the expiration limit -func (b *Bucket) PostFormArgs(path string, expires time.Time, redirect string) (action string, fields map[string]string) { - return b.PostFormArgsEx(path, expires, redirect, nil) -} - -type request struct { - method string - bucket string - path string - params url.Values - headers http.Header - baseurl string - payload io.Reader - prepared bool -} - -func (req *request) url() (*url.URL, error) { - u, err := url.Parse(req.baseurl) - if err != nil { - return nil, fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err) - } - u.RawQuery = req.params.Encode() - u.Path = req.path - return u, nil -} - -// query prepares and runs the req request. -// If resp is not nil, the XML data contained in the response -// body will be unmarshalled on it. -func (s3 *S3) query(req *request, resp interface{}) error { - err := s3.prepare(req) - if err != nil { - return err - } - r, err := s3.run(req, resp) - if r != nil && r.Body != nil { - r.Body.Close() - } - return err -} - -// queryV4Signprepares and runs the req request, signed with aws v4 signatures. -// If resp is not nil, the XML data contained in the response -// body will be unmarshalled on it. -func (s3 *S3) queryV4Sign(req *request, resp interface{}) error { - if req.headers == nil { - req.headers = map[string][]string{} - } - - err := s3.setBaseURL(req) - if err != nil { - return err - } - - hreq, err := s3.setupHttpRequest(req) - if err != nil { - return err - } - - // req.Host must be set for V4 signature calculation - hreq.Host = hreq.URL.Host - - signer := aws.NewV4Signer(s3.Auth, "s3", s3.Region) - signer.IncludeXAmzContentSha256 = true - signer.Sign(hreq) - - _, err = s3.doHttpRequest(hreq, resp) - return err -} - -// Sets baseurl on req from bucket name and the region endpoint -func (s3 *S3) setBaseURL(req *request) error { - if req.bucket == "" { - req.baseurl = s3.Region.S3Endpoint - } else { - req.baseurl = s3.Region.S3BucketEndpoint - if req.baseurl == "" { - // Use the path method to address the bucket. - req.baseurl = s3.Region.S3Endpoint - req.path = "/" + req.bucket + req.path - } else { - // Just in case, prevent injection. - if strings.IndexAny(req.bucket, "/:@") >= 0 { - return fmt.Errorf("bad S3 bucket: %q", req.bucket) - } - req.baseurl = strings.Replace(req.baseurl, "${bucket}", req.bucket, -1) - } - } - - return nil -} - -// partiallyEscapedPath partially escapes the S3 path allowing for all S3 REST API calls. -// -// Some commands including: -// GET Bucket acl http://goo.gl/aoXflF -// GET Bucket cors http://goo.gl/UlmBdx -// GET Bucket lifecycle http://goo.gl/8Fme7M -// GET Bucket policy http://goo.gl/ClXIo3 -// GET Bucket location http://goo.gl/5lh8RD -// GET Bucket Logging http://goo.gl/sZ5ckF -// GET Bucket notification http://goo.gl/qSSZKD -// GET Bucket tagging http://goo.gl/QRvxnM -// require the first character after the bucket name in the path to be a literal '?' and -// not the escaped hex representation '%3F'. -func partiallyEscapedPath(path string) string { - pathEscapedAndSplit := strings.Split((&url.URL{Path: path}).String(), "/") - if len(pathEscapedAndSplit) >= 3 { - if len(pathEscapedAndSplit[2]) >= 3 { - // Check for the one "?" that should not be escaped. - if pathEscapedAndSplit[2][0:3] == "%3F" { - pathEscapedAndSplit[2] = "?" + pathEscapedAndSplit[2][3:] - } - } - } - return strings.Replace(strings.Join(pathEscapedAndSplit, "/"), "+", "%2B", -1) -} - -// prepare sets up req to be delivered to S3. -func (s3 *S3) prepare(req *request) error { - // Copy so they can be mutated without affecting on retries. - params := make(url.Values) - headers := make(http.Header) - for k, v := range req.params { - params[k] = v - } - for k, v := range req.headers { - headers[k] = v - } - req.params = params - req.headers = headers - - if !req.prepared { - req.prepared = true - if req.method == "" { - req.method = "GET" - } - - if !strings.HasPrefix(req.path, "/") { - req.path = "/" + req.path - } - - err := s3.setBaseURL(req) - if err != nil { - return err - } - } - - if s3.Signature == aws.V2Signature && s3.Auth.Token() != "" { - req.headers["X-Amz-Security-Token"] = []string{s3.Auth.Token()} - } else if s3.Auth.Token() != "" { - req.params.Set("X-Amz-Security-Token", s3.Auth.Token()) - } - - if s3.Signature == aws.V2Signature { - // Always sign again as it's not clear how far the - // server has handled a previous attempt. - u, err := url.Parse(req.baseurl) - if err != nil { - return err - } - - signpathPartiallyEscaped := partiallyEscapedPath(req.path) - if strings.IndexAny(s3.Region.S3BucketEndpoint, "${bucket}") >= 0 { - signpathPartiallyEscaped = "/" + req.bucket + signpathPartiallyEscaped - } - req.headers["Host"] = []string{u.Host} - req.headers["Date"] = []string{time.Now().In(time.UTC).Format(time.RFC1123)} - - sign(s3.Auth, req.method, signpathPartiallyEscaped, req.params, req.headers) - } else { - hreq, err := s3.setupHttpRequest(req) - if err != nil { - return err - } - - hreq.Host = hreq.URL.Host - signer := aws.NewV4Signer(s3.Auth, "s3", s3.Region) - signer.IncludeXAmzContentSha256 = true - signer.Sign(hreq) - - req.payload = hreq.Body - if _, ok := headers["Content-Length"]; ok { - req.headers["Content-Length"] = headers["Content-Length"] - } - } - return nil -} - -// Prepares an *http.Request for doHttpRequest -func (s3 *S3) setupHttpRequest(req *request) (*http.Request, error) { - // Copy so that signing the http request will not mutate it - headers := make(http.Header) - for k, v := range req.headers { - headers[k] = v - } - req.headers = headers - - u, err := req.url() - if err != nil { - return nil, err - } - if s3.Region.Name != "generic" { - u.Opaque = fmt.Sprintf("//%s%s", u.Host, partiallyEscapedPath(u.Path)) - } - - hreq := http.Request{ - URL: u, - Method: req.method, - ProtoMajor: 1, - ProtoMinor: 1, - Header: req.headers, - Form: req.params, - } - - if v, ok := req.headers["Content-Length"]; ok { - hreq.ContentLength, _ = strconv.ParseInt(v[0], 10, 64) - delete(req.headers, "Content-Length") - } - if req.payload != nil { - hreq.Body = ioutil.NopCloser(req.payload) - } - - return &hreq, nil -} - -// doHttpRequest sends hreq and returns the http response from the server. -// If resp is not nil, the XML data contained in the response -// body will be unmarshalled on it. -func (s3 *S3) doHttpRequest(hreq *http.Request, resp interface{}) (*http.Response, error) { - hresp, err := s3.Client.Do(hreq) - if err != nil { - return nil, err - } - if debug { - dump, _ := httputil.DumpResponse(hresp, true) - log.Printf("} -> %s\n", dump) - } - if hresp.StatusCode != 200 && hresp.StatusCode != 204 && hresp.StatusCode != 206 { - return nil, buildError(hresp) - } - if resp != nil { - err = xml.NewDecoder(hresp.Body).Decode(resp) - hresp.Body.Close() - - if debug { - log.Printf("goamz.s3> decoded xml into %#v", resp) - } - - } - return hresp, err -} - -// run sends req and returns the http response from the server. -// If resp is not nil, the XML data contained in the response -// body will be unmarshalled on it. -func (s3 *S3) run(req *request, resp interface{}) (*http.Response, error) { - if debug { - log.Printf("Running S3 request: %#v", req) - } - - hreq, err := s3.setupHttpRequest(req) - if err != nil { - return nil, err - } - - return s3.doHttpRequest(hreq, resp) -} - -// Error represents an error in an operation with S3. -type Error struct { - StatusCode int // HTTP status code (200, 403, ...) - Code string // EC2 error code ("UnsupportedOperation", ...) - Message string // The human-oriented error message - BucketName string - RequestId string - HostId string -} - -func (e *Error) Error() string { - return e.Message -} - -func buildError(r *http.Response) error { - if debug { - log.Printf("got error (status code %v)", r.StatusCode) - data, err := ioutil.ReadAll(r.Body) - if err != nil { - log.Printf("\tread error: %v", err) - } else { - log.Printf("\tdata:\n%s\n\n", data) - } - r.Body = ioutil.NopCloser(bytes.NewBuffer(data)) - } - - err := Error{} - // TODO return error if Unmarshal fails? - xml.NewDecoder(r.Body).Decode(&err) - r.Body.Close() - err.StatusCode = r.StatusCode - if err.Message == "" { - err.Message = r.Status - } - if debug { - log.Printf("err: %#v\n", err) - } - return &err -} - -func shouldRetry(err error) bool { - if err == nil { - return false - } - switch err { - case io.ErrUnexpectedEOF, io.EOF: - return true - } - switch e := err.(type) { - case *net.DNSError: - return true - case *net.OpError: - switch e.Op { - case "dial", "read", "write": - return true - } - case *url.Error: - // url.Error can be returned either by net/url if a URL cannot be - // parsed, or by net/http if the response is closed before the headers - // are received or parsed correctly. In that later case, e.Op is set to - // the HTTP method name with the first letter uppercased. We don't want - // to retry on POST operations, since those are not idempotent, all the - // other ones should be safe to retry. The only case where all - // operations are safe to retry are "dial" errors, since in that case - // the POST request didn't make it to the server. - - if netErr, ok := e.Err.(*net.OpError); ok && netErr.Op == "dial" { - return true - } - - switch e.Op { - case "Get", "Put", "Delete", "Head": - return shouldRetry(e.Err) - default: - return false - } - case *Error: - switch e.Code { - case "InternalError", "NoSuchUpload", "NoSuchBucket": - return true - } - switch e.StatusCode { - case 500, 503, 504: - return true - } - } - return false -} - -func hasCode(err error, code string) bool { - s3err, ok := err.(*Error) - return ok && s3err.Code == code -} - -func escapePath(s string) string { - return (&url.URL{Path: s}).String() -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/sign.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/sign.go deleted file mode 100644 index 6f24c667..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/goamz/s3/sign.go +++ /dev/null @@ -1,120 +0,0 @@ -package s3 - -import ( - "crypto/hmac" - "crypto/sha1" - "encoding/base64" - "github.com/docker/goamz/aws" - "log" - "sort" - "strings" -) - -var b64 = base64.StdEncoding - -// ---------------------------------------------------------------------------- -// S3 signing (http://goo.gl/G1LrK) - -var s3ParamsToSign = map[string]bool{ - "acl": true, - "location": true, - "logging": true, - "notification": true, - "partNumber": true, - "policy": true, - "requestPayment": true, - "torrent": true, - "uploadId": true, - "uploads": true, - "versionId": true, - "versioning": true, - "versions": true, - "response-content-type": true, - "response-content-language": true, - "response-expires": true, - "response-cache-control": true, - "response-content-disposition": true, - "response-content-encoding": true, - "website": true, - "delete": true, -} - -func sign(auth aws.Auth, method, canonicalPath string, params, headers map[string][]string) { - var md5, ctype, date, xamz string - var xamzDate bool - var keys, sarray []string - xheaders := make(map[string]string) - for k, v := range headers { - k = strings.ToLower(k) - switch k { - case "content-md5": - md5 = v[0] - case "content-type": - ctype = v[0] - case "date": - if !xamzDate { - date = v[0] - } - default: - if strings.HasPrefix(k, "x-amz-") { - keys = append(keys, k) - xheaders[k] = strings.Join(v, ",") - if k == "x-amz-date" { - xamzDate = true - date = "" - } - } - } - } - if len(keys) > 0 { - sort.StringSlice(keys).Sort() - for i := range keys { - key := keys[i] - value := xheaders[key] - sarray = append(sarray, key+":"+value) - } - xamz = strings.Join(sarray, "\n") + "\n" - } - - expires := false - if v, ok := params["Expires"]; ok { - // Query string request authentication alternative. - expires = true - date = v[0] - params["AWSAccessKeyId"] = []string{auth.AccessKey} - } - - sarray = sarray[0:0] - for k, v := range params { - if s3ParamsToSign[k] { - for _, vi := range v { - if vi == "" { - sarray = append(sarray, k) - } else { - // "When signing you do not encode these values." - sarray = append(sarray, k+"="+vi) - } - } - } - } - if len(sarray) > 0 { - sort.StringSlice(sarray).Sort() - canonicalPath = canonicalPath + "?" + strings.Join(sarray, "&") - } - - payload := method + "\n" + md5 + "\n" + ctype + "\n" + date + "\n" + xamz + canonicalPath - hash := hmac.New(sha1.New, []byte(auth.SecretKey)) - hash.Write([]byte(payload)) - signature := make([]byte, b64.EncodedLen(hash.Size())) - b64.Encode(signature, hash.Sum(nil)) - - if expires { - params["Signature"] = []string{string(signature)} - } else { - headers["Authorization"] = []string{"AWS " + auth.AccessKey + ":" + string(signature)} - } - if debug { - log.Printf("Signature payload: %q", payload) - log.Printf("Signature: %q", signature) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/CONTRIBUTING.md b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/CONTRIBUTING.md deleted file mode 100644 index 05be0f8a..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/CONTRIBUTING.md +++ /dev/null @@ -1,13 +0,0 @@ -# Contributing to libtrust - -Want to hack on libtrust? Awesome! Here are instructions to get you -started. - -libtrust is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read -[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). - -Happy hacking! diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/LICENSE deleted file mode 100644 index 27448585..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/MAINTAINERS b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/MAINTAINERS deleted file mode 100644 index 9768175f..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/MAINTAINERS +++ /dev/null @@ -1,3 +0,0 @@ -Solomon Hykes -Josh Hawn (github: jlhawn) -Derek McGowan (github: dmcgowan) diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/README.md b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/README.md deleted file mode 100644 index 8e7db381..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# libtrust - -Libtrust is library for managing authentication and authorization using public key cryptography. - -Authentication is handled using the identity attached to the public key. -Libtrust provides multiple methods to prove possession of the private key associated with an identity. - - TLS x509 certificates - - Signature verification - - Key Challenge - -Authorization and access control is managed through a distributed trust graph. -Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. - -## Copyright and license - -Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. -Docs released under Creative commons. - diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/certificates.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/certificates.go deleted file mode 100644 index 3dcca33c..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/certificates.go +++ /dev/null @@ -1,175 +0,0 @@ -package libtrust - -import ( - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "io/ioutil" - "math/big" - "net" - "time" -) - -type certTemplateInfo struct { - commonName string - domains []string - ipAddresses []net.IP - isCA bool - clientAuth bool - serverAuth bool -} - -func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { - // Generate a certificate template which is valid from the past week to - // 10 years from now. The usage of the certificate depends on the - // specified fields in the given certTempInfo object. - var ( - keyUsage x509.KeyUsage - extKeyUsage []x509.ExtKeyUsage - ) - - if info.isCA { - keyUsage = x509.KeyUsageCertSign - } - - if info.clientAuth { - extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) - } - - if info.serverAuth { - extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) - } - - return &x509.Certificate{ - SerialNumber: big.NewInt(0), - Subject: pkix.Name{ - CommonName: info.commonName, - }, - NotBefore: time.Now().Add(-time.Hour * 24 * 7), - NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), - DNSNames: info.domains, - IPAddresses: info.ipAddresses, - IsCA: info.isCA, - KeyUsage: keyUsage, - ExtKeyUsage: extKeyUsage, - BasicConstraintsValid: info.isCA, - } -} - -func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { - pubCertTemplate := generateCertTemplate(subInfo) - privCertTemplate := generateCertTemplate(issInfo) - - certDER, err := x509.CreateCertificate( - rand.Reader, pubCertTemplate, privCertTemplate, - pub.CryptoPublicKey(), priv.CryptoPrivateKey(), - ) - if err != nil { - return nil, fmt.Errorf("failed to create certificate: %s", err) - } - - cert, err = x509.ParseCertificate(certDER) - if err != nil { - return nil, fmt.Errorf("failed to parse certificate: %s", err) - } - - return -} - -// GenerateSelfSignedServerCert creates a self-signed certificate for the -// given key which is to be used for TLS servers with the given domains and -// IP addresses. -func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { - info := &certTemplateInfo{ - commonName: key.KeyID(), - domains: domains, - ipAddresses: ipAddresses, - serverAuth: true, - } - - return generateCert(key.PublicKey(), key, info, info) -} - -// GenerateSelfSignedClientCert creates a self-signed certificate for the -// given key which is to be used for TLS clients. -func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { - info := &certTemplateInfo{ - commonName: key.KeyID(), - clientAuth: true, - } - - return generateCert(key.PublicKey(), key, info, info) -} - -// GenerateCACert creates a certificate which can be used as a trusted -// certificate authority. -func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { - subjectInfo := &certTemplateInfo{ - commonName: trustedKey.KeyID(), - isCA: true, - } - issuerInfo := &certTemplateInfo{ - commonName: signer.KeyID(), - } - - return generateCert(trustedKey, signer, subjectInfo, issuerInfo) -} - -// GenerateCACertPool creates a certificate authority pool to be used for a -// TLS configuration. Any self-signed certificates issued by the specified -// trusted keys will be verified during a TLS handshake -func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { - certPool := x509.NewCertPool() - - for _, trustedKey := range trustedKeys { - cert, err := GenerateCACert(signer, trustedKey) - if err != nil { - return nil, fmt.Errorf("failed to generate CA certificate: %s", err) - } - - certPool.AddCert(cert) - } - - return certPool, nil -} - -// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded -// containing one or more certificates. The expected pem type is "CERTIFICATE". -func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - certificates := []*x509.Certificate{} - var block *pem.Block - block, b = pem.Decode(b) - for ; block != nil; block, b = pem.Decode(b) { - if block.Type == "CERTIFICATE" { - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, err - } - certificates = append(certificates, cert) - } else { - return nil, fmt.Errorf("invalid pem block type: %s", block.Type) - } - } - - return certificates, nil -} - -// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded -// containing one or more certificates. The expected pem type is "CERTIFICATE". -func LoadCertificatePool(filename string) (*x509.CertPool, error) { - certs, err := LoadCertificateBundle(filename) - if err != nil { - return nil, err - } - pool := x509.NewCertPool() - for _, cert := range certs { - pool.AddCert(cert) - } - return pool, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/doc.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/doc.go deleted file mode 100644 index ec5d2159..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -/* -Package libtrust provides an interface for managing authentication and -authorization using public key cryptography. Authentication is handled -using the identity attached to the public key and verified through TLS -x509 certificates, a key challenge, or signature. Authorization and -access control is managed through a trust graph distributed between -both remote trust servers and locally cached and managed data. -*/ -package libtrust diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/ec_key.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/ec_key.go deleted file mode 100644 index 00bbe4b3..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/ec_key.go +++ /dev/null @@ -1,428 +0,0 @@ -package libtrust - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" -) - -/* - * EC DSA PUBLIC KEY - */ - -// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital -// signature algorithms. -type ecPublicKey struct { - *ecdsa.PublicKey - curveName string - signatureAlgorithm *signatureAlgorithm - extended map[string]interface{} -} - -func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { - curve := cryptoPublicKey.Curve - - switch { - case curve == elliptic.P256(): - return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil - case curve == elliptic.P384(): - return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil - case curve == elliptic.P521(): - return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil - default: - return nil, errors.New("unsupported elliptic curve") - } -} - -// KeyType returns the key type for elliptic curve keys, i.e., "EC". -func (k *ecPublicKey) KeyType() string { - return "EC" -} - -// CurveName returns the elliptic curve identifier. -// Possible values are "P-256", "P-384", and "P-521". -func (k *ecPublicKey) CurveName() string { - return k.curveName -} - -// KeyID returns a distinct identifier which is unique to this Public Key. -func (k *ecPublicKey) KeyID() string { - return keyIDFromCryptoKey(k) -} - -func (k *ecPublicKey) String() string { - return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) -} - -// Verify verifyies the signature of the data in the io.Reader using this -// PublicKey. The alg parameter should identify the digital signature -// algorithm which was used to produce the signature and should be supported -// by this public key. Returns a nil error if the signature is valid. -func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { - // For EC keys there is only one supported signature algorithm depending - // on the curve parameters. - if k.signatureAlgorithm.HeaderParam() != alg { - return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) - } - - // signature is the concatenation of (r, s), base64Url encoded. - sigLength := len(signature) - expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) - if sigLength != expectedOctetLength { - return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) - } - - rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] - r := new(big.Int).SetBytes(rBytes) - s := new(big.Int).SetBytes(sBytes) - - hasher := k.signatureAlgorithm.HashID().New() - _, err := io.Copy(hasher, data) - if err != nil { - return fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - if !ecdsa.Verify(k.PublicKey, hash, r, s) { - return errors.New("invalid signature") - } - - return nil -} - -// CryptoPublicKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { - return k.PublicKey -} - -func (k *ecPublicKey) toMap() map[string]interface{} { - jwk := make(map[string]interface{}) - for k, v := range k.extended { - jwk[k] = v - } - jwk["kty"] = k.KeyType() - jwk["kid"] = k.KeyID() - jwk["crv"] = k.CurveName() - - xBytes := k.X.Bytes() - yBytes := k.Y.Bytes() - octetLength := (k.Params().BitSize + 7) >> 3 - // MUST include leading zeros in the output so that x, y are each - // *octetLength* bytes long. - xBuf := make([]byte, octetLength-len(xBytes), octetLength) - yBuf := make([]byte, octetLength-len(yBytes), octetLength) - xBuf = append(xBuf, xBytes...) - yBuf = append(yBuf, yBytes...) - - jwk["x"] = joseBase64UrlEncode(xBuf) - jwk["y"] = joseBase64UrlEncode(yBuf) - - return jwk -} - -// MarshalJSON serializes this Public Key using the JWK JSON serialization format for -// elliptic curve keys. -func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Public Key to DER-encoded PKIX format. -func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { - derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) - if err != nil { - return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) - } - k.extended["kid"] = k.KeyID() // For display purposes. - return createPemBlock("PUBLIC KEY", derBytes, k.extended) -} - -func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { - k.extended[field] = value -} - -func (k *ecPublicKey) GetExtendedField(field string) interface{} { - v, ok := k.extended[field] - if !ok { - return nil - } - return v -} - -func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { - // JWK key type (kty) has already been determined to be "EC". - // Need to extract 'crv', 'x', 'y', and 'kid' and check for - // consistency. - - // Get the curve identifier value. - crv, err := stringFromMap(jwk, "crv") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) - } - - var ( - curve elliptic.Curve - sigAlg *signatureAlgorithm - ) - - switch { - case crv == "P-256": - curve = elliptic.P256() - sigAlg = es256 - case crv == "P-384": - curve = elliptic.P384() - sigAlg = es384 - case crv == "P-521": - curve = elliptic.P521() - sigAlg = es512 - default: - return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) - } - - // Get the X and Y coordinates for the public key point. - xB64Url, err := stringFromMap(jwk, "x") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) - } - x, err := parseECCoordinate(xB64Url, curve) - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) - } - - yB64Url, err := stringFromMap(jwk, "y") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) - } - y, err := parseECCoordinate(yB64Url, curve) - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) - } - - key := &ecPublicKey{ - PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, - curveName: crv, signatureAlgorithm: sigAlg, - } - - // Key ID is optional too, but if it exists, it should match the key. - _, ok := jwk["kid"] - if ok { - kid, err := stringFromMap(jwk, "kid") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) - } - if kid != key.KeyID() { - return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) - } - } - - key.extended = jwk - - return key, nil -} - -/* - * EC DSA PRIVATE KEY - */ - -// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature -// algorithms. -type ecPrivateKey struct { - ecPublicKey - *ecdsa.PrivateKey -} - -func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { - publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) - if err != nil { - return nil, err - } - - return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil -} - -// PublicKey returns the Public Key data associated with this Private Key. -func (k *ecPrivateKey) PublicKey() PublicKey { - return &k.ecPublicKey -} - -func (k *ecPrivateKey) String() string { - return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) -} - -// Sign signs the data read from the io.Reader using a signature algorithm supported -// by the elliptic curve private key. If the specified hashing algorithm is -// supported by this key, that hash function is used to generate the signature -// otherwise the the default hashing algorithm for this key is used. Returns -// the signature and the name of the JWK signature algorithm used, e.g., -// "ES256", "ES384", "ES512". -func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { - // Generate a signature of the data using the internal alg. - // The given hashId is only a suggestion, and since EC keys only support - // on signature/hash algorithm given the curve name, we disregard it for - // the elliptic curve JWK signature implementation. - hasher := k.signatureAlgorithm.HashID().New() - _, err = io.Copy(hasher, data) - if err != nil { - return nil, "", fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash) - if err != nil { - return nil, "", fmt.Errorf("error producing signature: %s", err) - } - rBytes, sBytes := r.Bytes(), s.Bytes() - octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 - // MUST include leading zeros in the output - rBuf := make([]byte, octetLength-len(rBytes), octetLength) - sBuf := make([]byte, octetLength-len(sBytes), octetLength) - - rBuf = append(rBuf, rBytes...) - sBuf = append(sBuf, sBytes...) - - signature = append(rBuf, sBuf...) - alg = k.signatureAlgorithm.HeaderParam() - - return -} - -// CryptoPrivateKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { - return k.PrivateKey -} - -func (k *ecPrivateKey) toMap() map[string]interface{} { - jwk := k.ecPublicKey.toMap() - - dBytes := k.D.Bytes() - // The length of this octet string MUST be ceiling(log-base-2(n)/8) - // octets (where n is the order of the curve). This is because the private - // key d must be in the interval [1, n-1] so the bitlength of d should be - // no larger than the bitlength of n-1. The easiest way to find the octet - // length is to take bitlength(n-1), add 7 to force a carry, and shift this - // bit sequence right by 3, which is essentially dividing by 8 and adding - // 1 if there is any remainder. Thus, the private key value d should be - // output to (bitlength(n-1)+7)>>3 octets. - n := k.ecPublicKey.Params().N - octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 - // Create a buffer with the necessary zero-padding. - dBuf := make([]byte, octetLength-len(dBytes), octetLength) - dBuf = append(dBuf, dBytes...) - - jwk["d"] = joseBase64UrlEncode(dBuf) - - return jwk -} - -// MarshalJSON serializes this Private Key using the JWK JSON serialization format for -// elliptic curve keys. -func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Private Key to DER-encoded PKIX format. -func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { - derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) - if err != nil { - return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) - } - k.extended["keyID"] = k.KeyID() // For display purposes. - return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) -} - -func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { - dB64Url, err := stringFromMap(jwk, "d") - if err != nil { - return nil, fmt.Errorf("JWK EC Private Key: %s", err) - } - - // JWK key type (kty) has already been determined to be "EC". - // Need to extract the public key information, then extract the private - // key value 'd'. - publicKey, err := ecPublicKeyFromMap(jwk) - if err != nil { - return nil, err - } - - d, err := parseECPrivateParam(dB64Url, publicKey.Curve) - if err != nil { - return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) - } - - key := &ecPrivateKey{ - ecPublicKey: *publicKey, - PrivateKey: &ecdsa.PrivateKey{ - PublicKey: *publicKey.PublicKey, - D: d, - }, - } - - return key, nil -} - -/* - * Key Generation Functions. - */ - -func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { - k = new(ecPrivateKey) - k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) - if err != nil { - return nil, err - } - - k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey - k.extended = make(map[string]interface{}) - - return -} - -// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. -func GenerateECP256PrivateKey() (PrivateKey, error) { - k, err := generateECPrivateKey(elliptic.P256()) - if err != nil { - return nil, fmt.Errorf("error generating EC P-256 key: %s", err) - } - - k.curveName = "P-256" - k.signatureAlgorithm = es256 - - return k, nil -} - -// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. -func GenerateECP384PrivateKey() (PrivateKey, error) { - k, err := generateECPrivateKey(elliptic.P384()) - if err != nil { - return nil, fmt.Errorf("error generating EC P-384 key: %s", err) - } - - k.curveName = "P-384" - k.signatureAlgorithm = es384 - - return k, nil -} - -// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. -func GenerateECP521PrivateKey() (PrivateKey, error) { - k, err := generateECPrivateKey(elliptic.P521()) - if err != nil { - return nil, fmt.Errorf("error generating EC P-521 key: %s", err) - } - - k.curveName = "P-521" - k.signatureAlgorithm = es512 - - return k, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/filter.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/filter.go deleted file mode 100644 index 5b2b4fca..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/filter.go +++ /dev/null @@ -1,50 +0,0 @@ -package libtrust - -import ( - "path/filepath" -) - -// FilterByHosts filters the list of PublicKeys to only those which contain a -// 'hosts' pattern which matches the given host. If *includeEmpty* is true, -// then keys which do not specify any hosts are also returned. -func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { - filtered := make([]PublicKey, 0, len(keys)) - - for _, pubKey := range keys { - var hosts []string - switch v := pubKey.GetExtendedField("hosts").(type) { - case []string: - hosts = v - case []interface{}: - for _, value := range v { - h, ok := value.(string) - if !ok { - continue - } - hosts = append(hosts, h) - } - } - - if len(hosts) == 0 { - if includeEmpty { - filtered = append(filtered, pubKey) - } - continue - } - - // Check if any hosts match pattern - for _, hostPattern := range hosts { - match, err := filepath.Match(hostPattern, host) - if err != nil { - return nil, err - } - - if match { - filtered = append(filtered, pubKey) - continue - } - } - } - - return filtered, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/hash.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/hash.go deleted file mode 100644 index a2df787d..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/hash.go +++ /dev/null @@ -1,56 +0,0 @@ -package libtrust - -import ( - "crypto" - _ "crypto/sha256" // Registrer SHA224 and SHA256 - _ "crypto/sha512" // Registrer SHA384 and SHA512 - "fmt" -) - -type signatureAlgorithm struct { - algHeaderParam string - hashID crypto.Hash -} - -func (h *signatureAlgorithm) HeaderParam() string { - return h.algHeaderParam -} - -func (h *signatureAlgorithm) HashID() crypto.Hash { - return h.hashID -} - -var ( - rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} - rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} - rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} - es256 = &signatureAlgorithm{"ES256", crypto.SHA256} - es384 = &signatureAlgorithm{"ES384", crypto.SHA384} - es512 = &signatureAlgorithm{"ES512", crypto.SHA512} -) - -func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { - switch { - case alg == "RS256": - return rs256, nil - case alg == "RS384": - return rs384, nil - case alg == "RS512": - return rs512, nil - default: - return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) - } -} - -func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { - switch { - case hashID == crypto.SHA512: - return rs512 - case hashID == crypto.SHA384: - return rs384 - case hashID == crypto.SHA256: - fallthrough - default: - return rs256 - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/jsonsign.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/jsonsign.go deleted file mode 100644 index cb2ca9a7..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/jsonsign.go +++ /dev/null @@ -1,657 +0,0 @@ -package libtrust - -import ( - "bytes" - "crypto" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "sort" - "time" - "unicode" -) - -var ( - // ErrInvalidSignContent is used when the content to be signed is invalid. - ErrInvalidSignContent = errors.New("invalid sign content") - - // ErrInvalidJSONContent is used when invalid json is encountered. - ErrInvalidJSONContent = errors.New("invalid json content") - - // ErrMissingSignatureKey is used when the specified signature key - // does not exist in the JSON content. - ErrMissingSignatureKey = errors.New("missing signature key") -) - -type jsHeader struct { - JWK PublicKey `json:"jwk,omitempty"` - Algorithm string `json:"alg"` - Chain []string `json:"x5c,omitempty"` -} - -type jsSignature struct { - Header jsHeader `json:"header"` - Signature string `json:"signature"` - Protected string `json:"protected,omitempty"` -} - -type jsSignaturesSorted []jsSignature - -func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] } -func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) } - -func (jsbkid jsSignaturesSorted) Less(i, j int) bool { - ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID() - si, sj := jsbkid[i].Signature, jsbkid[j].Signature - - if ki == kj { - return si < sj - } - - return ki < kj -} - -type signKey struct { - PrivateKey - Chain []*x509.Certificate -} - -// JSONSignature represents a signature of a json object. -type JSONSignature struct { - payload string - signatures []jsSignature - indent string - formatLength int - formatTail []byte -} - -func newJSONSignature() *JSONSignature { - return &JSONSignature{ - signatures: make([]jsSignature, 0, 1), - } -} - -// Payload returns the encoded payload of the signature. This -// payload should not be signed directly -func (js *JSONSignature) Payload() ([]byte, error) { - return joseBase64UrlDecode(js.payload) -} - -func (js *JSONSignature) protectedHeader() (string, error) { - protected := map[string]interface{}{ - "formatLength": js.formatLength, - "formatTail": joseBase64UrlEncode(js.formatTail), - "time": time.Now().UTC().Format(time.RFC3339), - } - protectedBytes, err := json.Marshal(protected) - if err != nil { - return "", err - } - - return joseBase64UrlEncode(protectedBytes), nil -} - -func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { - buf := make([]byte, len(js.payload)+len(protectedHeader)+1) - copy(buf, protectedHeader) - buf[len(protectedHeader)] = '.' - copy(buf[len(protectedHeader)+1:], js.payload) - return buf, nil -} - -// Sign adds a signature using the given private key. -func (js *JSONSignature) Sign(key PrivateKey) error { - protected, err := js.protectedHeader() - if err != nil { - return err - } - signBytes, err := js.signBytes(protected) - if err != nil { - return err - } - sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) - if err != nil { - return err - } - - js.signatures = append(js.signatures, jsSignature{ - Header: jsHeader{ - JWK: key.PublicKey(), - Algorithm: algorithm, - }, - Signature: joseBase64UrlEncode(sigBytes), - Protected: protected, - }) - - return nil -} - -// SignWithChain adds a signature using the given private key -// and setting the x509 chain. The public key of the first element -// in the chain must be the public key corresponding with the sign key. -func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { - // Ensure key.Chain[0] is public key for key - //key.Chain.PublicKey - //key.PublicKey().CryptoPublicKey() - - // Verify chain - protected, err := js.protectedHeader() - if err != nil { - return err - } - signBytes, err := js.signBytes(protected) - if err != nil { - return err - } - sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) - if err != nil { - return err - } - - header := jsHeader{ - Chain: make([]string, len(chain)), - Algorithm: algorithm, - } - - for i, cert := range chain { - header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) - } - - js.signatures = append(js.signatures, jsSignature{ - Header: header, - Signature: joseBase64UrlEncode(sigBytes), - Protected: protected, - }) - - return nil -} - -// Verify verifies all the signatures and returns the list of -// public keys used to sign. Any x509 chains are not checked. -func (js *JSONSignature) Verify() ([]PublicKey, error) { - keys := make([]PublicKey, len(js.signatures)) - for i, signature := range js.signatures { - signBytes, err := js.signBytes(signature.Protected) - if err != nil { - return nil, err - } - var publicKey PublicKey - if len(signature.Header.Chain) > 0 { - certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) - if err != nil { - return nil, err - } - cert, err := x509.ParseCertificate(certBytes) - if err != nil { - return nil, err - } - publicKey, err = FromCryptoPublicKey(cert.PublicKey) - if err != nil { - return nil, err - } - } else if signature.Header.JWK != nil { - publicKey = signature.Header.JWK - } else { - return nil, errors.New("missing public key") - } - - sigBytes, err := joseBase64UrlDecode(signature.Signature) - if err != nil { - return nil, err - } - - err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) - if err != nil { - return nil, err - } - - keys[i] = publicKey - } - return keys, nil -} - -// VerifyChains verifies all the signatures and the chains associated -// with each signature and returns the list of verified chains. -// Signatures without an x509 chain are not checked. -func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { - chains := make([][]*x509.Certificate, 0, len(js.signatures)) - for _, signature := range js.signatures { - signBytes, err := js.signBytes(signature.Protected) - if err != nil { - return nil, err - } - var publicKey PublicKey - if len(signature.Header.Chain) > 0 { - certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) - if err != nil { - return nil, err - } - cert, err := x509.ParseCertificate(certBytes) - if err != nil { - return nil, err - } - publicKey, err = FromCryptoPublicKey(cert.PublicKey) - if err != nil { - return nil, err - } - intermediates := x509.NewCertPool() - if len(signature.Header.Chain) > 1 { - intermediateChain := signature.Header.Chain[1:] - for i := range intermediateChain { - certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) - if err != nil { - return nil, err - } - intermediate, err := x509.ParseCertificate(certBytes) - if err != nil { - return nil, err - } - intermediates.AddCert(intermediate) - } - } - - verifyOptions := x509.VerifyOptions{ - Intermediates: intermediates, - Roots: ca, - } - - verifiedChains, err := cert.Verify(verifyOptions) - if err != nil { - return nil, err - } - chains = append(chains, verifiedChains...) - - sigBytes, err := joseBase64UrlDecode(signature.Signature) - if err != nil { - return nil, err - } - - err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) - if err != nil { - return nil, err - } - } - - } - return chains, nil -} - -// JWS returns JSON serialized JWS according to -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 -func (js *JSONSignature) JWS() ([]byte, error) { - if len(js.signatures) == 0 { - return nil, errors.New("missing signature") - } - - sort.Sort(jsSignaturesSorted(js.signatures)) - - jsonMap := map[string]interface{}{ - "payload": js.payload, - "signatures": js.signatures, - } - - return json.MarshalIndent(jsonMap, "", " ") -} - -func notSpace(r rune) bool { - return !unicode.IsSpace(r) -} - -func detectJSONIndent(jsonContent []byte) (indent string) { - if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { - quoteIndex := bytes.IndexRune(jsonContent[1:], '"') - if quoteIndex > 0 { - indent = string(jsonContent[2 : quoteIndex+1]) - } - } - return -} - -type jsParsedHeader struct { - JWK json.RawMessage `json:"jwk"` - Algorithm string `json:"alg"` - Chain []string `json:"x5c"` -} - -type jsParsedSignature struct { - Header jsParsedHeader `json:"header"` - Signature string `json:"signature"` - Protected string `json:"protected"` -} - -// ParseJWS parses a JWS serialized JSON object into a Json Signature. -func ParseJWS(content []byte) (*JSONSignature, error) { - type jsParsed struct { - Payload string `json:"payload"` - Signatures []jsParsedSignature `json:"signatures"` - } - parsed := &jsParsed{} - err := json.Unmarshal(content, parsed) - if err != nil { - return nil, err - } - if len(parsed.Signatures) == 0 { - return nil, errors.New("missing signatures") - } - payload, err := joseBase64UrlDecode(parsed.Payload) - if err != nil { - return nil, err - } - - js, err := NewJSONSignature(payload) - if err != nil { - return nil, err - } - js.signatures = make([]jsSignature, len(parsed.Signatures)) - for i, signature := range parsed.Signatures { - header := jsHeader{ - Algorithm: signature.Header.Algorithm, - } - if signature.Header.Chain != nil { - header.Chain = signature.Header.Chain - } - if signature.Header.JWK != nil { - publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) - if err != nil { - return nil, err - } - header.JWK = publicKey - } - js.signatures[i] = jsSignature{ - Header: header, - Signature: signature.Signature, - Protected: signature.Protected, - } - } - - return js, nil -} - -// NewJSONSignature returns a new unsigned JWS from a json byte array. -// JSONSignature will need to be signed before serializing or storing. -// Optionally, one or more signatures can be provided as byte buffers, -// containing serialized JWS signatures, to assemble a fully signed JWS -// package. It is the callers responsibility to ensure uniqueness of the -// provided signatures. -func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) { - var dataMap map[string]interface{} - err := json.Unmarshal(content, &dataMap) - if err != nil { - return nil, err - } - - js := newJSONSignature() - js.indent = detectJSONIndent(content) - - js.payload = joseBase64UrlEncode(content) - - // Find trailing } and whitespace, put in protected header - closeIndex := bytes.LastIndexFunc(content, notSpace) - if content[closeIndex] != '}' { - return nil, ErrInvalidJSONContent - } - lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) - if content[lastRuneIndex] == ',' { - return nil, ErrInvalidJSONContent - } - js.formatLength = lastRuneIndex + 1 - js.formatTail = content[js.formatLength:] - - if len(signatures) > 0 { - for _, signature := range signatures { - var parsedJSig jsParsedSignature - - if err := json.Unmarshal(signature, &parsedJSig); err != nil { - return nil, err - } - - // TODO(stevvooe): A lot of the code below is repeated in - // ParseJWS. It will require more refactoring to fix that. - jsig := jsSignature{ - Header: jsHeader{ - Algorithm: parsedJSig.Header.Algorithm, - }, - Signature: parsedJSig.Signature, - Protected: parsedJSig.Protected, - } - - if parsedJSig.Header.Chain != nil { - jsig.Header.Chain = parsedJSig.Header.Chain - } - - if parsedJSig.Header.JWK != nil { - publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK)) - if err != nil { - return nil, err - } - jsig.Header.JWK = publicKey - } - - js.signatures = append(js.signatures, jsig) - } - } - - return js, nil -} - -// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or -// struct. JWS will need to be signed before serializing or storing. -func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { - switch content.(type) { - case map[string]interface{}: - case struct{}: - default: - return nil, errors.New("invalid data type") - } - - js := newJSONSignature() - js.indent = " " - - payload, err := json.MarshalIndent(content, "", js.indent) - if err != nil { - return nil, err - } - js.payload = joseBase64UrlEncode(payload) - - // Remove '\n}' from formatted section, put in protected header - js.formatLength = len(payload) - 2 - js.formatTail = payload[js.formatLength:] - - return js, nil -} - -func readIntFromMap(key string, m map[string]interface{}) (int, bool) { - value, ok := m[key] - if !ok { - return 0, false - } - switch v := value.(type) { - case int: - return v, true - case float64: - return int(v), true - default: - return 0, false - } -} - -func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { - value, ok := m[key] - if !ok { - return "", false - } - v, ok = value.(string) - return -} - -// ParsePrettySignature parses a formatted signature into a -// JSON signature. If the signatures are missing the format information -// an error is thrown. The formatted signature must be created by -// the same method as format signature. -func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { - var contentMap map[string]json.RawMessage - err := json.Unmarshal(content, &contentMap) - if err != nil { - return nil, fmt.Errorf("error unmarshalling content: %s", err) - } - sigMessage, ok := contentMap[signatureKey] - if !ok { - return nil, ErrMissingSignatureKey - } - - var signatureBlocks []jsParsedSignature - err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) - if err != nil { - return nil, fmt.Errorf("error unmarshalling signatures: %s", err) - } - - js := newJSONSignature() - js.signatures = make([]jsSignature, len(signatureBlocks)) - - for i, signatureBlock := range signatureBlocks { - protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) - if err != nil { - return nil, fmt.Errorf("base64 decode error: %s", err) - } - var protectedHeader map[string]interface{} - err = json.Unmarshal(protectedBytes, &protectedHeader) - if err != nil { - return nil, fmt.Errorf("error unmarshalling protected header: %s", err) - } - - formatLength, ok := readIntFromMap("formatLength", protectedHeader) - if !ok { - return nil, errors.New("missing formatted length") - } - encodedTail, ok := readStringFromMap("formatTail", protectedHeader) - if !ok { - return nil, errors.New("missing formatted tail") - } - formatTail, err := joseBase64UrlDecode(encodedTail) - if err != nil { - return nil, fmt.Errorf("base64 decode error on tail: %s", err) - } - if js.formatLength == 0 { - js.formatLength = formatLength - } else if js.formatLength != formatLength { - return nil, errors.New("conflicting format length") - } - if len(js.formatTail) == 0 { - js.formatTail = formatTail - } else if bytes.Compare(js.formatTail, formatTail) != 0 { - return nil, errors.New("conflicting format tail") - } - - header := jsHeader{ - Algorithm: signatureBlock.Header.Algorithm, - Chain: signatureBlock.Header.Chain, - } - if signatureBlock.Header.JWK != nil { - publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) - if err != nil { - return nil, fmt.Errorf("error unmarshalling public key: %s", err) - } - header.JWK = publicKey - } - js.signatures[i] = jsSignature{ - Header: header, - Signature: signatureBlock.Signature, - Protected: signatureBlock.Protected, - } - } - if js.formatLength > len(content) { - return nil, errors.New("invalid format length") - } - formatted := make([]byte, js.formatLength+len(js.formatTail)) - copy(formatted, content[:js.formatLength]) - copy(formatted[js.formatLength:], js.formatTail) - js.indent = detectJSONIndent(formatted) - js.payload = joseBase64UrlEncode(formatted) - - return js, nil -} - -// PrettySignature formats a json signature into an easy to read -// single json serialized object. -func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { - if len(js.signatures) == 0 { - return nil, errors.New("no signatures") - } - payload, err := joseBase64UrlDecode(js.payload) - if err != nil { - return nil, err - } - payload = payload[:js.formatLength] - - sort.Sort(jsSignaturesSorted(js.signatures)) - - var marshalled []byte - var marshallErr error - if js.indent != "" { - marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) - } else { - marshalled, marshallErr = json.Marshal(js.signatures) - } - if marshallErr != nil { - return nil, marshallErr - } - - buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) - buf.Write(payload) - buf.WriteByte(',') - if js.indent != "" { - buf.WriteByte('\n') - buf.WriteString(js.indent) - buf.WriteByte('"') - buf.WriteString(signatureKey) - buf.WriteString("\": ") - buf.Write(marshalled) - buf.WriteByte('\n') - } else { - buf.WriteByte('"') - buf.WriteString(signatureKey) - buf.WriteString("\":") - buf.Write(marshalled) - } - buf.WriteByte('}') - - return buf.Bytes(), nil -} - -// Signatures provides the signatures on this JWS as opaque blobs, sorted by -// keyID. These blobs can be stored and reassembled with payloads. Internally, -// they are simply marshaled json web signatures but implementations should -// not rely on this. -func (js *JSONSignature) Signatures() ([][]byte, error) { - sort.Sort(jsSignaturesSorted(js.signatures)) - - var sb [][]byte - for _, jsig := range js.signatures { - p, err := json.Marshal(jsig) - if err != nil { - return nil, err - } - - sb = append(sb, p) - } - - return sb, nil -} - -// Merge combines the signatures from one or more other signatures into the -// method receiver. If the payloads differ for any argument, an error will be -// returned and the receiver will not be modified. -func (js *JSONSignature) Merge(others ...*JSONSignature) error { - merged := js.signatures - for _, other := range others { - if js.payload != other.payload { - return fmt.Errorf("payloads differ from merge target") - } - merged = append(merged, other.signatures...) - } - - js.signatures = merged - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/key.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/key.go deleted file mode 100644 index 73642db2..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/key.go +++ /dev/null @@ -1,253 +0,0 @@ -package libtrust - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rsa" - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" -) - -// PublicKey is a generic interface for a Public Key. -type PublicKey interface { - // KeyType returns the key type for this key. For elliptic curve keys, - // this value should be "EC". For RSA keys, this value should be "RSA". - KeyType() string - // KeyID returns a distinct identifier which is unique to this Public Key. - // The format generated by this library is a base32 encoding of a 240 bit - // hash of the public key data divided into 12 groups like so: - // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP - KeyID() string - // Verify verifyies the signature of the data in the io.Reader using this - // Public Key. The alg parameter should identify the digital signature - // algorithm which was used to produce the signature and should be - // supported by this public key. Returns a nil error if the signature - // is valid. - Verify(data io.Reader, alg string, signature []byte) error - // CryptoPublicKey returns the internal object which can be used as a - // crypto.PublicKey for use with other standard library operations. The type - // is either *rsa.PublicKey or *ecdsa.PublicKey - CryptoPublicKey() crypto.PublicKey - // These public keys can be serialized to the standard JSON encoding for - // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web - // Algorithms. - MarshalJSON() ([]byte, error) - // These keys can also be serialized to the standard PEM encoding. - PEMBlock() (*pem.Block, error) - // The string representation of a key is its key type and ID. - String() string - AddExtendedField(string, interface{}) - GetExtendedField(string) interface{} -} - -// PrivateKey is a generic interface for a Private Key. -type PrivateKey interface { - // A PrivateKey contains all fields and methods of a PublicKey of the - // same type. The MarshalJSON method also outputs the private key as a - // JSON Web Key, and the PEMBlock method outputs the private key as a - // PEM block. - PublicKey - // PublicKey returns the PublicKey associated with this PrivateKey. - PublicKey() PublicKey - // Sign signs the data read from the io.Reader using a signature algorithm - // supported by the private key. If the specified hashing algorithm is - // supported by this key, that hash function is used to generate the - // signature otherwise the the default hashing algorithm for this key is - // used. Returns the signature and identifier of the algorithm used. - Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) - // CryptoPrivateKey returns the internal object which can be used as a - // crypto.PublicKey for use with other standard library operations. The - // type is either *rsa.PublicKey or *ecdsa.PublicKey - CryptoPrivateKey() crypto.PrivateKey -} - -// FromCryptoPublicKey returns a libtrust PublicKey representation of the given -// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given -// key is of an unsupported type. -func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { - switch cryptoPublicKey := cryptoPublicKey.(type) { - case *ecdsa.PublicKey: - return fromECPublicKey(cryptoPublicKey) - case *rsa.PublicKey: - return fromRSAPublicKey(cryptoPublicKey), nil - default: - return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) - } -} - -// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given -// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given -// key is of an unsupported type. -func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { - switch cryptoPrivateKey := cryptoPrivateKey.(type) { - case *ecdsa.PrivateKey: - return fromECPrivateKey(cryptoPrivateKey) - case *rsa.PrivateKey: - return fromRSAPrivateKey(cryptoPrivateKey), nil - default: - return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) - } -} - -// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust -// PublicKey or an error if there is a problem with the encoding. -func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { - pemBlock, _ := pem.Decode(data) - if pemBlock == nil { - return nil, errors.New("unable to find PEM encoded data") - } else if pemBlock.Type != "PUBLIC KEY" { - return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) - } - - return pubKeyFromPEMBlock(pemBlock) -} - -// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of -// PEM blocks appended one after the other and returns a slice of PublicKey -// objects that it finds. -func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { - pubKeys := []PublicKey{} - - for { - var pemBlock *pem.Block - pemBlock, data = pem.Decode(data) - if pemBlock == nil { - break - } else if pemBlock.Type != "PUBLIC KEY" { - return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) - } - - pubKey, err := pubKeyFromPEMBlock(pemBlock) - if err != nil { - return nil, err - } - - pubKeys = append(pubKeys, pubKey) - } - - return pubKeys, nil -} - -// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust -// PrivateKey or an error if there is a problem with the encoding. -func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { - pemBlock, _ := pem.Decode(data) - if pemBlock == nil { - return nil, errors.New("unable to find PEM encoded data") - } - - var key PrivateKey - - switch { - case pemBlock.Type == "RSA PRIVATE KEY": - rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) - } - key = fromRSAPrivateKey(rsaPrivateKey) - case pemBlock.Type == "EC PRIVATE KEY": - ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) - } - key, err = fromECPrivateKey(ecPrivateKey) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) - } - - addPEMHeadersToKey(pemBlock, key.PublicKey()) - - return key, nil -} - -// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic -// Public Key to be used with libtrust. -func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { - jwk := make(map[string]interface{}) - - err := json.Unmarshal(data, &jwk) - if err != nil { - return nil, fmt.Errorf( - "decoding JWK Public Key JSON data: %s\n", err, - ) - } - - // Get the Key Type value. - kty, err := stringFromMap(jwk, "kty") - if err != nil { - return nil, fmt.Errorf("JWK Public Key type: %s", err) - } - - switch { - case kty == "EC": - // Call out to unmarshal EC public key. - return ecPublicKeyFromMap(jwk) - case kty == "RSA": - // Call out to unmarshal RSA public key. - return rsaPublicKeyFromMap(jwk) - default: - return nil, fmt.Errorf( - "JWK Public Key type not supported: %q\n", kty, - ) - } -} - -// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set -// and returns a slice of Public Key objects. -func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { - rawKeys, err := loadJSONKeySetRaw(data) - if err != nil { - return nil, err - } - - pubKeys := make([]PublicKey, 0, len(rawKeys)) - - for _, rawKey := range rawKeys { - pubKey, err := UnmarshalPublicKeyJWK(rawKey) - if err != nil { - return nil, err - } - pubKeys = append(pubKeys, pubKey) - } - - return pubKeys, nil -} - -// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic -// Private Key to be used with libtrust. -func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { - jwk := make(map[string]interface{}) - - err := json.Unmarshal(data, &jwk) - if err != nil { - return nil, fmt.Errorf( - "decoding JWK Private Key JSON data: %s\n", err, - ) - } - - // Get the Key Type value. - kty, err := stringFromMap(jwk, "kty") - if err != nil { - return nil, fmt.Errorf("JWK Private Key type: %s", err) - } - - switch { - case kty == "EC": - // Call out to unmarshal EC private key. - return ecPrivateKeyFromMap(jwk) - case kty == "RSA": - // Call out to unmarshal RSA private key. - return rsaPrivateKeyFromMap(jwk) - default: - return nil, fmt.Errorf( - "JWK Private Key type not supported: %q\n", kty, - ) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/key_files.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/key_files.go deleted file mode 100644 index c526de54..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/key_files.go +++ /dev/null @@ -1,255 +0,0 @@ -package libtrust - -import ( - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - "os" - "strings" -) - -var ( - // ErrKeyFileDoesNotExist indicates that the private key file does not exist. - ErrKeyFileDoesNotExist = errors.New("key file does not exist") -) - -func readKeyFileBytes(filename string) ([]byte, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - if os.IsNotExist(err) { - err = ErrKeyFileDoesNotExist - } else { - err = fmt.Errorf("unable to read key file %s: %s", filename, err) - } - - return nil, err - } - - return data, nil -} - -/* - Loading and Saving of Public and Private Keys in either PEM or JWK format. -*/ - -// LoadKeyFile opens the given filename and attempts to read a Private Key -// encoded in either PEM or JWK format (if .json or .jwk file extension). -func LoadKeyFile(filename string) (PrivateKey, error) { - contents, err := readKeyFileBytes(filename) - if err != nil { - return nil, err - } - - var key PrivateKey - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - key, err = UnmarshalPrivateKeyJWK(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode private key JWK: %s", err) - } - } else { - key, err = UnmarshalPrivateKeyPEM(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode private key PEM: %s", err) - } - } - - return key, nil -} - -// LoadPublicKeyFile opens the given filename and attempts to read a Public Key -// encoded in either PEM or JWK format (if .json or .jwk file extension). -func LoadPublicKeyFile(filename string) (PublicKey, error) { - contents, err := readKeyFileBytes(filename) - if err != nil { - return nil, err - } - - var key PublicKey - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - key, err = UnmarshalPublicKeyJWK(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode public key JWK: %s", err) - } - } else { - key, err = UnmarshalPublicKeyPEM(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode public key PEM: %s", err) - } - } - - return key, nil -} - -// SaveKey saves the given key to a file using the provided filename. -// This process will overwrite any existing file at the provided location. -func SaveKey(filename string, key PrivateKey) error { - var encodedKey []byte - var err error - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - // Encode in JSON Web Key format. - encodedKey, err = json.MarshalIndent(key, "", " ") - if err != nil { - return fmt.Errorf("unable to encode private key JWK: %s", err) - } - } else { - // Encode in PEM format. - pemBlock, err := key.PEMBlock() - if err != nil { - return fmt.Errorf("unable to encode private key PEM: %s", err) - } - encodedKey = pem.EncodeToMemory(pemBlock) - } - - err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) - if err != nil { - return fmt.Errorf("unable to write private key file %s: %s", filename, err) - } - - return nil -} - -// SavePublicKey saves the given public key to the file. -func SavePublicKey(filename string, key PublicKey) error { - var encodedKey []byte - var err error - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - // Encode in JSON Web Key format. - encodedKey, err = json.MarshalIndent(key, "", " ") - if err != nil { - return fmt.Errorf("unable to encode public key JWK: %s", err) - } - } else { - // Encode in PEM format. - pemBlock, err := key.PEMBlock() - if err != nil { - return fmt.Errorf("unable to encode public key PEM: %s", err) - } - encodedKey = pem.EncodeToMemory(pemBlock) - } - - err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) - if err != nil { - return fmt.Errorf("unable to write public key file %s: %s", filename, err) - } - - return nil -} - -// Public Key Set files - -type jwkSet struct { - Keys []json.RawMessage `json:"keys"` -} - -// LoadKeySetFile loads a key set -func LoadKeySetFile(filename string) ([]PublicKey, error) { - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - return loadJSONKeySetFile(filename) - } - - // Must be a PEM format file - return loadPEMKeySetFile(filename) -} - -func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { - if len(data) == 0 { - // This is okay, just return an empty slice. - return []json.RawMessage{}, nil - } - - keySet := jwkSet{} - - err := json.Unmarshal(data, &keySet) - if err != nil { - return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) - } - - return keySet.Keys, nil -} - -func loadJSONKeySetFile(filename string) ([]PublicKey, error) { - contents, err := readKeyFileBytes(filename) - if err != nil && err != ErrKeyFileDoesNotExist { - return nil, err - } - - return UnmarshalPublicKeyJWKSet(contents) -} - -func loadPEMKeySetFile(filename string) ([]PublicKey, error) { - data, err := readKeyFileBytes(filename) - if err != nil && err != ErrKeyFileDoesNotExist { - return nil, err - } - - return UnmarshalPublicKeyPEMBundle(data) -} - -// AddKeySetFile adds a key to a key set -func AddKeySetFile(filename string, key PublicKey) error { - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - return addKeySetJSONFile(filename, key) - } - - // Must be a PEM format file - return addKeySetPEMFile(filename, key) -} - -func addKeySetJSONFile(filename string, key PublicKey) error { - encodedKey, err := json.Marshal(key) - if err != nil { - return fmt.Errorf("unable to encode trusted client key: %s", err) - } - - contents, err := readKeyFileBytes(filename) - if err != nil && err != ErrKeyFileDoesNotExist { - return err - } - - rawEntries, err := loadJSONKeySetRaw(contents) - if err != nil { - return err - } - - rawEntries = append(rawEntries, json.RawMessage(encodedKey)) - entriesWrapper := jwkSet{Keys: rawEntries} - - encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") - if err != nil { - return fmt.Errorf("unable to encode trusted client keys: %s", err) - } - - err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) - if err != nil { - return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) - } - - return nil -} - -func addKeySetPEMFile(filename string, key PublicKey) error { - // Encode to PEM, open file for appending, write PEM. - file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) - if err != nil { - return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) - } - defer file.Close() - - pemBlock, err := key.PEMBlock() - if err != nil { - return fmt.Errorf("unable to encoded trusted key: %s", err) - } - - _, err = file.Write(pem.EncodeToMemory(pemBlock)) - if err != nil { - return fmt.Errorf("unable to write trusted keys file: %s", err) - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/key_manager.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/key_manager.go deleted file mode 100644 index 9a98ae35..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/key_manager.go +++ /dev/null @@ -1,175 +0,0 @@ -package libtrust - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net" - "os" - "path" - "sync" -) - -// ClientKeyManager manages client keys on the filesystem -type ClientKeyManager struct { - key PrivateKey - clientFile string - clientDir string - - clientLock sync.RWMutex - clients []PublicKey - - configLock sync.Mutex - configs []*tls.Config -} - -// NewClientKeyManager loads a new manager from a set of key files -// and managed by the given private key. -func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) { - m := &ClientKeyManager{ - key: trustKey, - clientFile: clientFile, - clientDir: clientDir, - } - if err := m.loadKeys(); err != nil { - return nil, err - } - // TODO Start watching file and directory - - return m, nil -} - -func (c *ClientKeyManager) loadKeys() (err error) { - // Load authorized keys file - var clients []PublicKey - if c.clientFile != "" { - clients, err = LoadKeySetFile(c.clientFile) - if err != nil { - return fmt.Errorf("unable to load authorized keys: %s", err) - } - } - - // Add clients from authorized keys directory - files, err := ioutil.ReadDir(c.clientDir) - if err != nil && !os.IsNotExist(err) { - return fmt.Errorf("unable to open authorized keys directory: %s", err) - } - for _, f := range files { - if !f.IsDir() { - publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name())) - if err != nil { - return fmt.Errorf("unable to load authorized key file: %s", err) - } - clients = append(clients, publicKey) - } - } - - c.clientLock.Lock() - c.clients = clients - c.clientLock.Unlock() - - return nil -} - -// RegisterTLSConfig registers a tls configuration to manager -// such that any changes to the keys may be reflected in -// the tls client CA pool -func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error { - c.clientLock.RLock() - certPool, err := GenerateCACertPool(c.key, c.clients) - if err != nil { - return fmt.Errorf("CA pool generation error: %s", err) - } - c.clientLock.RUnlock() - - tlsConfig.ClientCAs = certPool - - c.configLock.Lock() - c.configs = append(c.configs, tlsConfig) - c.configLock.Unlock() - - return nil -} - -// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for -// libtrust identity authentication for the domain specified -func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) { - tlsConfig := newTLSConfig() - - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - if err := clients.RegisterTLSConfig(tlsConfig); err != nil { - return nil, err - } - - // Generate cert - ips, domains, err := parseAddr(addr) - if err != nil { - return nil, err - } - // add domain that it expects clients to use - domains = append(domains, domain) - x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips) - if err != nil { - return nil, fmt.Errorf("certificate generation error: %s", err) - } - tlsConfig.Certificates = []tls.Certificate{{ - Certificate: [][]byte{x509Cert.Raw}, - PrivateKey: trustKey.CryptoPrivateKey(), - Leaf: x509Cert, - }} - - return tlsConfig, nil -} - -// NewCertAuthTLSConfig creates a tls.Config for the server to use for -// certificate authentication -func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { - tlsConfig := newTLSConfig() - - cert, err := tls.LoadX509KeyPair(certPath, keyPath) - if err != nil { - return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err) - } - tlsConfig.Certificates = []tls.Certificate{cert} - - // Verify client certificates against a CA? - if caPath != "" { - certPool := x509.NewCertPool() - file, err := ioutil.ReadFile(caPath) - if err != nil { - return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) - } - certPool.AppendCertsFromPEM(file) - - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - tlsConfig.ClientCAs = certPool - } - - return tlsConfig, nil -} - -func newTLSConfig() *tls.Config { - return &tls.Config{ - NextProtos: []string{"http/1.1"}, - // Avoid fallback on insecure SSL protocols - MinVersion: tls.VersionTLS10, - } -} - -// parseAddr parses an address into an array of IPs and domains -func parseAddr(addr string) ([]net.IP, []string, error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, nil, err - } - var domains []string - var ips []net.IP - ip := net.ParseIP(host) - if ip != nil { - ips = []net.IP{ip} - } else { - domains = []string{host} - } - return ips, domains, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/rsa_key.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/rsa_key.go deleted file mode 100644 index dac4cacf..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/rsa_key.go +++ /dev/null @@ -1,427 +0,0 @@ -package libtrust - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" -) - -/* - * RSA DSA PUBLIC KEY - */ - -// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. -type rsaPublicKey struct { - *rsa.PublicKey - extended map[string]interface{} -} - -func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { - return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} -} - -// KeyType returns the JWK key type for RSA keys, i.e., "RSA". -func (k *rsaPublicKey) KeyType() string { - return "RSA" -} - -// KeyID returns a distinct identifier which is unique to this Public Key. -func (k *rsaPublicKey) KeyID() string { - return keyIDFromCryptoKey(k) -} - -func (k *rsaPublicKey) String() string { - return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) -} - -// Verify verifyies the signature of the data in the io.Reader using this Public Key. -// The alg parameter should be the name of the JWA digital signature algorithm -// which was used to produce the signature and should be supported by this -// public key. Returns a nil error if the signature is valid. -func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { - // Verify the signature of the given date, return non-nil error if valid. - sigAlg, err := rsaSignatureAlgorithmByName(alg) - if err != nil { - return fmt.Errorf("unable to verify Signature: %s", err) - } - - hasher := sigAlg.HashID().New() - _, err = io.Copy(hasher, data) - if err != nil { - return fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) - if err != nil { - return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) - } - - return nil -} - -// CryptoPublicKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return k.PublicKey -} - -func (k *rsaPublicKey) toMap() map[string]interface{} { - jwk := make(map[string]interface{}) - for k, v := range k.extended { - jwk[k] = v - } - jwk["kty"] = k.KeyType() - jwk["kid"] = k.KeyID() - jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) - jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) - - return jwk -} - -// MarshalJSON serializes this Public Key using the JWK JSON serialization format for -// RSA keys. -func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Public Key to DER-encoded PKIX format. -func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { - derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) - if err != nil { - return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) - } - k.extended["kid"] = k.KeyID() // For display purposes. - return createPemBlock("PUBLIC KEY", derBytes, k.extended) -} - -func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { - k.extended[field] = value -} - -func (k *rsaPublicKey) GetExtendedField(field string) interface{} { - v, ok := k.extended[field] - if !ok { - return nil - } - return v -} - -func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { - // JWK key type (kty) has already been determined to be "RSA". - // Need to extract 'n', 'e', and 'kid' and check for - // consistency. - - // Get the modulus parameter N. - nB64Url, err := stringFromMap(jwk, "n") - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) - } - - n, err := parseRSAModulusParam(nB64Url) - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) - } - - // Get the public exponent E. - eB64Url, err := stringFromMap(jwk, "e") - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) - } - - e, err := parseRSAPublicExponentParam(eB64Url) - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) - } - - key := &rsaPublicKey{ - PublicKey: &rsa.PublicKey{N: n, E: e}, - } - - // Key ID is optional, but if it exists, it should match the key. - _, ok := jwk["kid"] - if ok { - kid, err := stringFromMap(jwk, "kid") - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) - } - if kid != key.KeyID() { - return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) - } - } - - if _, ok := jwk["d"]; ok { - return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") - } - - key.extended = jwk - - return key, nil -} - -/* - * RSA DSA PRIVATE KEY - */ - -// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. -type rsaPrivateKey struct { - rsaPublicKey - *rsa.PrivateKey -} - -func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { - return &rsaPrivateKey{ - *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), - cryptoPrivateKey, - } -} - -// PublicKey returns the Public Key data associated with this Private Key. -func (k *rsaPrivateKey) PublicKey() PublicKey { - return &k.rsaPublicKey -} - -func (k *rsaPrivateKey) String() string { - return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) -} - -// Sign signs the data read from the io.Reader using a signature algorithm supported -// by the RSA private key. If the specified hashing algorithm is supported by -// this key, that hash function is used to generate the signature otherwise the -// the default hashing algorithm for this key is used. Returns the signature -// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", -// "RS512". -func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { - // Generate a signature of the data using the internal alg. - sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) - hasher := sigAlg.HashID().New() - - _, err = io.Copy(hasher, data) - if err != nil { - return nil, "", fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) - if err != nil { - return nil, "", fmt.Errorf("error producing signature: %s", err) - } - - alg = sigAlg.HeaderParam() - - return -} - -// CryptoPrivateKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { - return k.PrivateKey -} - -func (k *rsaPrivateKey) toMap() map[string]interface{} { - k.Precompute() // Make sure the precomputed values are stored. - jwk := k.rsaPublicKey.toMap() - - jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) - jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) - jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) - jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) - jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) - jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) - - otherPrimes := k.Primes[2:] - - if len(otherPrimes) > 0 { - otherPrimesInfo := make([]interface{}, len(otherPrimes)) - for i, r := range otherPrimes { - otherPrimeInfo := make(map[string]string, 3) - otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) - crtVal := k.Precomputed.CRTValues[i] - otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) - otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) - otherPrimesInfo[i] = otherPrimeInfo - } - jwk["oth"] = otherPrimesInfo - } - - return jwk -} - -// MarshalJSON serializes this Private Key using the JWK JSON serialization format for -// RSA keys. -func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Private Key to DER-encoded PKIX format. -func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { - derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) - k.extended["keyID"] = k.KeyID() // For display purposes. - return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) -} - -func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { - // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that - // only the private key exponent 'd' is REQUIRED, the others are just for - // signature/decryption optimizations and SHOULD be included when the JWK - // is produced. We MAY choose to accept a JWK which only includes 'd', but - // we're going to go ahead and not choose to accept it without the extra - // fields. Only the 'oth' field will be optional (for multi-prime keys). - privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) - } - firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) - } - secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) - } - firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) - } - secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) - } - crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) - } - - var oth interface{} - if _, ok := jwk["oth"]; ok { - oth = jwk["oth"] - delete(jwk, "oth") - } - - // JWK key type (kty) has already been determined to be "RSA". - // Need to extract the public key information, then extract the private - // key values. - publicKey, err := rsaPublicKeyFromMap(jwk) - if err != nil { - return nil, err - } - - privateKey := &rsa.PrivateKey{ - PublicKey: *publicKey.PublicKey, - D: privateExponent, - Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, - Precomputed: rsa.PrecomputedValues{ - Dp: firstFactorCRT, - Dq: secondFactorCRT, - Qinv: crtCoeff, - }, - } - - if oth != nil { - // Should be an array of more JSON objects. - otherPrimesInfo, ok := oth.([]interface{}) - if !ok { - return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") - } - numOtherPrimeFactors := len(otherPrimesInfo) - if numOtherPrimeFactors == 0 { - return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") - } - otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) - productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) - crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) - - for i, val := range otherPrimesInfo { - otherPrimeinfo, ok := val.(map[string]interface{}) - if !ok { - return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") - } - - otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) - } - otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) - } - otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) - } - - crtValue := crtValues[i] - crtValue.Exp = otherFactorCRT - crtValue.Coeff = otherCrtCoeff - crtValue.R = productOfPrimes - otherPrimeFactors[i] = otherPrimeFactor - productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) - } - - privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) - privateKey.Precomputed.CRTValues = crtValues - } - - key := &rsaPrivateKey{ - rsaPublicKey: *publicKey, - PrivateKey: privateKey, - } - - return key, nil -} - -/* - * Key Generation Functions. - */ - -func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { - k = new(rsaPrivateKey) - k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) - if err != nil { - return nil, err - } - - k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey - k.extended = make(map[string]interface{}) - - return -} - -// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. -func GenerateRSA2048PrivateKey() (PrivateKey, error) { - k, err := generateRSAPrivateKey(2048) - if err != nil { - return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) - } - - return k, nil -} - -// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. -func GenerateRSA3072PrivateKey() (PrivateKey, error) { - k, err := generateRSAPrivateKey(3072) - if err != nil { - return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) - } - - return k, nil -} - -// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. -func GenerateRSA4096PrivateKey() (PrivateKey, error) { - k, err := generateRSAPrivateKey(4096) - if err != nil { - return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) - } - - return k, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/util.go b/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/util.go deleted file mode 100644 index 45dc3e18..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/docker/libtrust/util.go +++ /dev/null @@ -1,361 +0,0 @@ -package libtrust - -import ( - "bytes" - "crypto" - "crypto/elliptic" - "crypto/tls" - "crypto/x509" - "encoding/base32" - "encoding/base64" - "encoding/binary" - "encoding/pem" - "errors" - "fmt" - "math/big" - "net/url" - "os" - "path/filepath" - "strings" - "time" -) - -// LoadOrCreateTrustKey will load a PrivateKey from the specified path -func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) { - if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { - return nil, err - } - - trustKey, err := LoadKeyFile(trustKeyPath) - if err == ErrKeyFileDoesNotExist { - trustKey, err = GenerateECP256PrivateKey() - if err != nil { - return nil, fmt.Errorf("error generating key: %s", err) - } - - if err := SaveKey(trustKeyPath, trustKey); err != nil { - return nil, fmt.Errorf("error saving key file: %s", err) - } - - dir, file := filepath.Split(trustKeyPath) - if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil { - return nil, fmt.Errorf("error saving public key file: %s", err) - } - } else if err != nil { - return nil, fmt.Errorf("error loading key file: %s", err) - } - return trustKey, nil -} - -// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity -// based authentication from the specified dockerUrl, the rootConfigPath and -// the server name to which it is connecting. -// If trustUnknownHosts is true it will automatically add the host to the -// known-hosts.json in rootConfigPath. -func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) { - tlsConfig := newTLSConfig() - - trustKeyPath := filepath.Join(rootConfigPath, "key.json") - knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json") - - u, err := url.Parse(dockerUrl) - if err != nil { - return nil, fmt.Errorf("unable to parse machine url") - } - - if u.Scheme == "unix" { - return nil, nil - } - - addr := u.Host - proto := "tcp" - - trustKey, err := LoadOrCreateTrustKey(trustKeyPath) - if err != nil { - return nil, fmt.Errorf("unable to load trust key: %s", err) - } - - knownHosts, err := LoadKeySetFile(knownHostsPath) - if err != nil { - return nil, fmt.Errorf("could not load trusted hosts file: %s", err) - } - - allowedHosts, err := FilterByHosts(knownHosts, addr, false) - if err != nil { - return nil, fmt.Errorf("error filtering hosts: %s", err) - } - - certPool, err := GenerateCACertPool(trustKey, allowedHosts) - if err != nil { - return nil, fmt.Errorf("Could not create CA pool: %s", err) - } - - tlsConfig.ServerName = serverName - tlsConfig.RootCAs = certPool - - x509Cert, err := GenerateSelfSignedClientCert(trustKey) - if err != nil { - return nil, fmt.Errorf("certificate generation error: %s", err) - } - - tlsConfig.Certificates = []tls.Certificate{{ - Certificate: [][]byte{x509Cert.Raw}, - PrivateKey: trustKey.CryptoPrivateKey(), - Leaf: x509Cert, - }} - - tlsConfig.InsecureSkipVerify = true - - testConn, err := tls.Dial(proto, addr, tlsConfig) - if err != nil { - return nil, fmt.Errorf("tls Handshake error: %s", err) - } - - opts := x509.VerifyOptions{ - Roots: tlsConfig.RootCAs, - CurrentTime: time.Now(), - DNSName: tlsConfig.ServerName, - Intermediates: x509.NewCertPool(), - } - - certs := testConn.ConnectionState().PeerCertificates - for i, cert := range certs { - if i == 0 { - continue - } - opts.Intermediates.AddCert(cert) - } - - if _, err := certs[0].Verify(opts); err != nil { - if _, ok := err.(x509.UnknownAuthorityError); ok { - if trustUnknownHosts { - pubKey, err := FromCryptoPublicKey(certs[0].PublicKey) - if err != nil { - return nil, fmt.Errorf("error extracting public key from cert: %s", err) - } - - pubKey.AddExtendedField("hosts", []string{addr}) - - if err := AddKeySetFile(knownHostsPath, pubKey); err != nil { - return nil, fmt.Errorf("error adding machine to known hosts: %s", err) - } - } else { - return nil, fmt.Errorf("unable to connect. unknown host: %s", addr) - } - } - } - - testConn.Close() - tlsConfig.InsecureSkipVerify = false - - return tlsConfig, nil -} - -// joseBase64UrlEncode encodes the given data using the standard base64 url -// encoding format but with all trailing '=' characters ommitted in accordance -// with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlEncode(b []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") -} - -// joseBase64UrlDecode decodes the given string using the standard base64 url -// decoder but first adds the appropriate number of trailing '=' characters in -// accordance with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlDecode(s string) ([]byte, error) { - switch len(s) % 4 { - case 0: - case 2: - s += "==" - case 3: - s += "=" - default: - return nil, errors.New("illegal base64url string") - } - return base64.URLEncoding.DecodeString(s) -} - -func keyIDEncode(b []byte) string { - s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") - var buf bytes.Buffer - var i int - for i = 0; i < len(s)/4-1; i++ { - start := i * 4 - end := start + 4 - buf.WriteString(s[start:end] + ":") - } - buf.WriteString(s[i*4:]) - return buf.String() -} - -func keyIDFromCryptoKey(pubKey PublicKey) string { - // Generate and return a 'libtrust' fingerprint of the public key. - // For an RSA key this should be: - // SHA256(DER encoded ASN1) - // Then truncated to 240 bits and encoded into 12 base32 groups like so: - // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP - derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) - if err != nil { - return "" - } - hasher := crypto.SHA256.New() - hasher.Write(derBytes) - return keyIDEncode(hasher.Sum(nil)[:30]) -} - -func stringFromMap(m map[string]interface{}, key string) (string, error) { - val, ok := m[key] - if !ok { - return "", fmt.Errorf("%q value not specified", key) - } - - str, ok := val.(string) - if !ok { - return "", fmt.Errorf("%q value must be a string", key) - } - delete(m, key) - - return str, nil -} - -func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { - curveByteLen := (curve.Params().BitSize + 7) >> 3 - - cBytes, err := joseBase64UrlDecode(cB64Url) - if err != nil { - return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - cByteLength := len(cBytes) - if cByteLength != curveByteLen { - return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) - } - return new(big.Int).SetBytes(cBytes), nil -} - -func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { - dBytes, err := joseBase64UrlDecode(dB64Url) - if err != nil { - return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - - // The length of this octet string MUST be ceiling(log-base-2(n)/8) - // octets (where n is the order of the curve). This is because the private - // key d must be in the interval [1, n-1] so the bitlength of d should be - // no larger than the bitlength of n-1. The easiest way to find the octet - // length is to take bitlength(n-1), add 7 to force a carry, and shift this - // bit sequence right by 3, which is essentially dividing by 8 and adding - // 1 if there is any remainder. Thus, the private key value d should be - // output to (bitlength(n-1)+7)>>3 octets. - n := curve.Params().N - octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 - dByteLength := len(dBytes) - - if dByteLength != octetLength { - return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) - } - - return new(big.Int).SetBytes(dBytes), nil -} - -func parseRSAModulusParam(nB64Url string) (*big.Int, error) { - nBytes, err := joseBase64UrlDecode(nB64Url) - if err != nil { - return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - - return new(big.Int).SetBytes(nBytes), nil -} - -func serializeRSAPublicExponentParam(e int) []byte { - // We MUST use the minimum number of octets to represent E. - // E is supposed to be 65537 for performance and security reasons - // and is what golang's rsa package generates, but it might be - // different if imported from some other generator. - buf := make([]byte, 4) - binary.BigEndian.PutUint32(buf, uint32(e)) - var i int - for i = 0; i < 8; i++ { - if buf[i] != 0 { - break - } - } - return buf[i:] -} - -func parseRSAPublicExponentParam(eB64Url string) (int, error) { - eBytes, err := joseBase64UrlDecode(eB64Url) - if err != nil { - return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - // Only the minimum number of bytes were used to represent E, but - // binary.BigEndian.Uint32 expects at least 4 bytes, so we need - // to add zero padding if necassary. - byteLen := len(eBytes) - buf := make([]byte, 4-byteLen, 4) - eBytes = append(buf, eBytes...) - - return int(binary.BigEndian.Uint32(eBytes)), nil -} - -func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { - b64Url, err := stringFromMap(m, key) - if err != nil { - return nil, err - } - - paramBytes, err := joseBase64UrlDecode(b64Url) - if err != nil { - return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) - } - - return new(big.Int).SetBytes(paramBytes), nil -} - -func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { - pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} - for k, v := range headers { - switch val := v.(type) { - case string: - pemBlock.Headers[k] = val - case []string: - if k == "hosts" { - pemBlock.Headers[k] = strings.Join(val, ",") - } else { - // Return error, non-encodable type - } - default: - // Return error, non-encodable type - } - } - - return pemBlock, nil -} - -func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { - cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) - } - - pubKey, err := FromCryptoPublicKey(cryptoPublicKey) - if err != nil { - return nil, err - } - - addPEMHeadersToKey(pemBlock, pubKey) - - return pubKey, nil -} - -func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { - for key, value := range pemBlock.Headers { - var safeVal interface{} - if key == "hosts" { - safeVal = strings.Split(value, ",") - } else { - safeVal = value - } - pubKey.AddExtendedField(key, safeVal) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/internal/commandinfo.go b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/internal/commandinfo.go deleted file mode 100644 index ce78eff6..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/internal/commandinfo.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2014 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package internal - -import ( - "strings" -) - -const ( - WatchState = 1 << iota - MultiState - SubscribeState - MonitorState -) - -type CommandInfo struct { - Set, Clear int -} - -var commandInfos = map[string]CommandInfo{ - "WATCH": {Set: WatchState}, - "UNWATCH": {Clear: WatchState}, - "MULTI": {Set: MultiState}, - "EXEC": {Clear: WatchState | MultiState}, - "DISCARD": {Clear: WatchState | MultiState}, - "PSUBSCRIBE": {Set: SubscribeState}, - "SUBSCRIBE": {Set: SubscribeState}, - "MONITOR": {Set: MonitorState}, -} - -func LookupCommandInfo(commandName string) CommandInfo { - return commandInfos[strings.ToUpper(commandName)] -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/conn.go b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/conn.go deleted file mode 100644 index ac0e971c..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/conn.go +++ /dev/null @@ -1,455 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "net" - "strconv" - "sync" - "time" -) - -// conn is the low-level implementation of Conn -type conn struct { - - // Shared - mu sync.Mutex - pending int - err error - conn net.Conn - - // Read - readTimeout time.Duration - br *bufio.Reader - - // Write - writeTimeout time.Duration - bw *bufio.Writer - - // Scratch space for formatting argument length. - // '*' or '$', length, "\r\n" - lenScratch [32]byte - - // Scratch space for formatting integers and floats. - numScratch [40]byte -} - -// Dial connects to the Redis server at the given network and address. -func Dial(network, address string) (Conn, error) { - dialer := xDialer{} - return dialer.Dial(network, address) -} - -// DialTimeout acts like Dial but takes timeouts for establishing the -// connection to the server, writing a command and reading a reply. -func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) { - netDialer := net.Dialer{Timeout: connectTimeout} - dialer := xDialer{ - NetDial: netDialer.Dial, - ReadTimeout: readTimeout, - WriteTimeout: writeTimeout, - } - return dialer.Dial(network, address) -} - -// A Dialer specifies options for connecting to a Redis server. -type xDialer struct { - // NetDial specifies the dial function for creating TCP connections. If - // NetDial is nil, then net.Dial is used. - NetDial func(network, addr string) (net.Conn, error) - - // ReadTimeout specifies the timeout for reading a single command - // reply. If ReadTimeout is zero, then no timeout is used. - ReadTimeout time.Duration - - // WriteTimeout specifies the timeout for writing a single command. If - // WriteTimeout is zero, then no timeout is used. - WriteTimeout time.Duration -} - -// Dial connects to the Redis server at address on the named network. -func (d *xDialer) Dial(network, address string) (Conn, error) { - dial := d.NetDial - if dial == nil { - dial = net.Dial - } - netConn, err := dial(network, address) - if err != nil { - return nil, err - } - return &conn{ - conn: netConn, - bw: bufio.NewWriter(netConn), - br: bufio.NewReader(netConn), - readTimeout: d.ReadTimeout, - writeTimeout: d.WriteTimeout, - }, nil -} - -// NewConn returns a new Redigo connection for the given net connection. -func NewConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) Conn { - return &conn{ - conn: netConn, - bw: bufio.NewWriter(netConn), - br: bufio.NewReader(netConn), - readTimeout: readTimeout, - writeTimeout: writeTimeout, - } -} - -func (c *conn) Close() error { - c.mu.Lock() - err := c.err - if c.err == nil { - c.err = errors.New("redigo: closed") - err = c.conn.Close() - } - c.mu.Unlock() - return err -} - -func (c *conn) fatal(err error) error { - c.mu.Lock() - if c.err == nil { - c.err = err - // Close connection to force errors on subsequent calls and to unblock - // other reader or writer. - c.conn.Close() - } - c.mu.Unlock() - return err -} - -func (c *conn) Err() error { - c.mu.Lock() - err := c.err - c.mu.Unlock() - return err -} - -func (c *conn) writeLen(prefix byte, n int) error { - c.lenScratch[len(c.lenScratch)-1] = '\n' - c.lenScratch[len(c.lenScratch)-2] = '\r' - i := len(c.lenScratch) - 3 - for { - c.lenScratch[i] = byte('0' + n%10) - i -= 1 - n = n / 10 - if n == 0 { - break - } - } - c.lenScratch[i] = prefix - _, err := c.bw.Write(c.lenScratch[i:]) - return err -} - -func (c *conn) writeString(s string) error { - c.writeLen('$', len(s)) - c.bw.WriteString(s) - _, err := c.bw.WriteString("\r\n") - return err -} - -func (c *conn) writeBytes(p []byte) error { - c.writeLen('$', len(p)) - c.bw.Write(p) - _, err := c.bw.WriteString("\r\n") - return err -} - -func (c *conn) writeInt64(n int64) error { - return c.writeBytes(strconv.AppendInt(c.numScratch[:0], n, 10)) -} - -func (c *conn) writeFloat64(n float64) error { - return c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64)) -} - -func (c *conn) writeCommand(cmd string, args []interface{}) (err error) { - c.writeLen('*', 1+len(args)) - err = c.writeString(cmd) - for _, arg := range args { - if err != nil { - break - } - switch arg := arg.(type) { - case string: - err = c.writeString(arg) - case []byte: - err = c.writeBytes(arg) - case int: - err = c.writeInt64(int64(arg)) - case int64: - err = c.writeInt64(arg) - case float64: - err = c.writeFloat64(arg) - case bool: - if arg { - err = c.writeString("1") - } else { - err = c.writeString("0") - } - case nil: - err = c.writeString("") - default: - var buf bytes.Buffer - fmt.Fprint(&buf, arg) - err = c.writeBytes(buf.Bytes()) - } - } - return err -} - -type protocolError string - -func (pe protocolError) Error() string { - return fmt.Sprintf("redigo: %s (possible server error or unsupported concurrent read by application)", string(pe)) -} - -func (c *conn) readLine() ([]byte, error) { - p, err := c.br.ReadSlice('\n') - if err == bufio.ErrBufferFull { - return nil, protocolError("long response line") - } - if err != nil { - return nil, err - } - i := len(p) - 2 - if i < 0 || p[i] != '\r' { - return nil, protocolError("bad response line terminator") - } - return p[:i], nil -} - -// parseLen parses bulk string and array lengths. -func parseLen(p []byte) (int, error) { - if len(p) == 0 { - return -1, protocolError("malformed length") - } - - if p[0] == '-' && len(p) == 2 && p[1] == '1' { - // handle $-1 and $-1 null replies. - return -1, nil - } - - var n int - for _, b := range p { - n *= 10 - if b < '0' || b > '9' { - return -1, protocolError("illegal bytes in length") - } - n += int(b - '0') - } - - return n, nil -} - -// parseInt parses an integer reply. -func parseInt(p []byte) (interface{}, error) { - if len(p) == 0 { - return 0, protocolError("malformed integer") - } - - var negate bool - if p[0] == '-' { - negate = true - p = p[1:] - if len(p) == 0 { - return 0, protocolError("malformed integer") - } - } - - var n int64 - for _, b := range p { - n *= 10 - if b < '0' || b > '9' { - return 0, protocolError("illegal bytes in length") - } - n += int64(b - '0') - } - - if negate { - n = -n - } - return n, nil -} - -var ( - okReply interface{} = "OK" - pongReply interface{} = "PONG" -) - -func (c *conn) readReply() (interface{}, error) { - line, err := c.readLine() - if err != nil { - return nil, err - } - if len(line) == 0 { - return nil, protocolError("short response line") - } - switch line[0] { - case '+': - switch { - case len(line) == 3 && line[1] == 'O' && line[2] == 'K': - // Avoid allocation for frequent "+OK" response. - return okReply, nil - case len(line) == 5 && line[1] == 'P' && line[2] == 'O' && line[3] == 'N' && line[4] == 'G': - // Avoid allocation in PING command benchmarks :) - return pongReply, nil - default: - return string(line[1:]), nil - } - case '-': - return Error(string(line[1:])), nil - case ':': - return parseInt(line[1:]) - case '$': - n, err := parseLen(line[1:]) - if n < 0 || err != nil { - return nil, err - } - p := make([]byte, n) - _, err = io.ReadFull(c.br, p) - if err != nil { - return nil, err - } - if line, err := c.readLine(); err != nil { - return nil, err - } else if len(line) != 0 { - return nil, protocolError("bad bulk string format") - } - return p, nil - case '*': - n, err := parseLen(line[1:]) - if n < 0 || err != nil { - return nil, err - } - r := make([]interface{}, n) - for i := range r { - r[i], err = c.readReply() - if err != nil { - return nil, err - } - } - return r, nil - } - return nil, protocolError("unexpected response line") -} - -func (c *conn) Send(cmd string, args ...interface{}) error { - c.mu.Lock() - c.pending += 1 - c.mu.Unlock() - if c.writeTimeout != 0 { - c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) - } - if err := c.writeCommand(cmd, args); err != nil { - return c.fatal(err) - } - return nil -} - -func (c *conn) Flush() error { - if c.writeTimeout != 0 { - c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) - } - if err := c.bw.Flush(); err != nil { - return c.fatal(err) - } - return nil -} - -func (c *conn) Receive() (reply interface{}, err error) { - if c.readTimeout != 0 { - c.conn.SetReadDeadline(time.Now().Add(c.readTimeout)) - } - if reply, err = c.readReply(); err != nil { - return nil, c.fatal(err) - } - // When using pub/sub, the number of receives can be greater than the - // number of sends. To enable normal use of the connection after - // unsubscribing from all channels, we do not decrement pending to a - // negative value. - // - // The pending field is decremented after the reply is read to handle the - // case where Receive is called before Send. - c.mu.Lock() - if c.pending > 0 { - c.pending -= 1 - } - c.mu.Unlock() - if err, ok := reply.(Error); ok { - return nil, err - } - return -} - -func (c *conn) Do(cmd string, args ...interface{}) (interface{}, error) { - c.mu.Lock() - pending := c.pending - c.pending = 0 - c.mu.Unlock() - - if cmd == "" && pending == 0 { - return nil, nil - } - - if c.writeTimeout != 0 { - c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) - } - - if cmd != "" { - c.writeCommand(cmd, args) - } - - if err := c.bw.Flush(); err != nil { - return nil, c.fatal(err) - } - - if c.readTimeout != 0 { - c.conn.SetReadDeadline(time.Now().Add(c.readTimeout)) - } - - if cmd == "" { - reply := make([]interface{}, pending) - for i := range reply { - r, e := c.readReply() - if e != nil { - return nil, c.fatal(e) - } - reply[i] = r - } - return reply, nil - } - - var err error - var reply interface{} - for i := 0; i <= pending; i++ { - var e error - if reply, e = c.readReply(); e != nil { - return nil, c.fatal(e) - } - if e, ok := reply.(Error); ok && err == nil { - err = e - } - } - return reply, err -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/doc.go b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/doc.go deleted file mode 100644 index 1ae6f0cc..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/doc.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -// Package redis is a client for the Redis database. -// -// The Redigo FAQ (https://github.com/garyburd/redigo/wiki/FAQ) contains more -// documentation about this package. -// -// Connections -// -// The Conn interface is the primary interface for working with Redis. -// Applications create connections by calling the Dial, DialWithTimeout or -// NewConn functions. In the future, functions will be added for creating -// sharded and other types of connections. -// -// The application must call the connection Close method when the application -// is done with the connection. -// -// Executing Commands -// -// The Conn interface has a generic method for executing Redis commands: -// -// Do(commandName string, args ...interface{}) (reply interface{}, err error) -// -// The Redis command reference (http://redis.io/commands) lists the available -// commands. An example of using the Redis APPEND command is: -// -// n, err := conn.Do("APPEND", "key", "value") -// -// The Do method converts command arguments to binary strings for transmission -// to the server as follows: -// -// Go Type Conversion -// []byte Sent as is -// string Sent as is -// int, int64 strconv.FormatInt(v) -// float64 strconv.FormatFloat(v, 'g', -1, 64) -// bool true -> "1", false -> "0" -// nil "" -// all other types fmt.Print(v) -// -// Redis command reply types are represented using the following Go types: -// -// Redis type Go type -// error redis.Error -// integer int64 -// simple string string -// bulk string []byte or nil if value not present. -// array []interface{} or nil if value not present. -// -// Use type assertions or the reply helper functions to convert from -// interface{} to the specific Go type for the command result. -// -// Pipelining -// -// Connections support pipelining using the Send, Flush and Receive methods. -// -// Send(commandName string, args ...interface{}) error -// Flush() error -// Receive() (reply interface{}, err error) -// -// Send writes the command to the connection's output buffer. Flush flushes the -// connection's output buffer to the server. Receive reads a single reply from -// the server. The following example shows a simple pipeline. -// -// c.Send("SET", "foo", "bar") -// c.Send("GET", "foo") -// c.Flush() -// c.Receive() // reply from SET -// v, err = c.Receive() // reply from GET -// -// The Do method combines the functionality of the Send, Flush and Receive -// methods. The Do method starts by writing the command and flushing the output -// buffer. Next, the Do method receives all pending replies including the reply -// for the command just sent by Do. If any of the received replies is an error, -// then Do returns the error. If there are no errors, then Do returns the last -// reply. If the command argument to the Do method is "", then the Do method -// will flush the output buffer and receive pending replies without sending a -// command. -// -// Use the Send and Do methods to implement pipelined transactions. -// -// c.Send("MULTI") -// c.Send("INCR", "foo") -// c.Send("INCR", "bar") -// r, err := c.Do("EXEC") -// fmt.Println(r) // prints [1, 1] -// -// Concurrency -// -// Connections do not support concurrent calls to the write methods (Send, -// Flush) or concurrent calls to the read method (Receive). Connections do -// allow a concurrent reader and writer. -// -// Because the Do method combines the functionality of Send, Flush and Receive, -// the Do method cannot be called concurrently with the other methods. -// -// For full concurrent access to Redis, use the thread-safe Pool to get and -// release connections from within a goroutine. -// -// Publish and Subscribe -// -// Use the Send, Flush and Receive methods to implement Pub/Sub subscribers. -// -// c.Send("SUBSCRIBE", "example") -// c.Flush() -// for { -// reply, err := c.Receive() -// if err != nil { -// return err -// } -// // process pushed message -// } -// -// The PubSubConn type wraps a Conn with convenience methods for implementing -// subscribers. The Subscribe, PSubscribe, Unsubscribe and PUnsubscribe methods -// send and flush a subscription management command. The receive method -// converts a pushed message to convenient types for use in a type switch. -// -// psc := redis.PubSubConn{c} -// psc.Subscribe("example") -// for { -// switch v := psc.Receive().(type) { -// case redis.Message: -// fmt.Printf("%s: message: %s\n", v.Channel, v.Data) -// case redis.Subscription: -// fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count) -// case error: -// return v -// } -// } -// -// Reply Helpers -// -// The Bool, Int, Bytes, String, Strings and Values functions convert a reply -// to a value of a specific type. To allow convenient wrapping of calls to the -// connection Do and Receive methods, the functions take a second argument of -// type error. If the error is non-nil, then the helper function returns the -// error. If the error is nil, the function converts the reply to the specified -// type: -// -// exists, err := redis.Bool(c.Do("EXISTS", "foo")) -// if err != nil { -// // handle error return from c.Do or type conversion error. -// } -// -// The Scan function converts elements of a array reply to Go types: -// -// var value1 int -// var value2 string -// reply, err := redis.Values(c.Do("MGET", "key1", "key2")) -// if err != nil { -// // handle error -// } -// if _, err := redis.Scan(reply, &value1, &value2); err != nil { -// // handle error -// } -package redis diff --git a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/log.go b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/log.go deleted file mode 100644 index 129b86d6..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/log.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "bytes" - "fmt" - "log" -) - -// NewLoggingConn returns a logging wrapper around a connection. -func NewLoggingConn(conn Conn, logger *log.Logger, prefix string) Conn { - if prefix != "" { - prefix = prefix + "." - } - return &loggingConn{conn, logger, prefix} -} - -type loggingConn struct { - Conn - logger *log.Logger - prefix string -} - -func (c *loggingConn) Close() error { - err := c.Conn.Close() - var buf bytes.Buffer - fmt.Fprintf(&buf, "%sClose() -> (%v)", c.prefix, err) - c.logger.Output(2, buf.String()) - return err -} - -func (c *loggingConn) printValue(buf *bytes.Buffer, v interface{}) { - const chop = 32 - switch v := v.(type) { - case []byte: - if len(v) > chop { - fmt.Fprintf(buf, "%q...", v[:chop]) - } else { - fmt.Fprintf(buf, "%q", v) - } - case string: - if len(v) > chop { - fmt.Fprintf(buf, "%q...", v[:chop]) - } else { - fmt.Fprintf(buf, "%q", v) - } - case []interface{}: - if len(v) == 0 { - buf.WriteString("[]") - } else { - sep := "[" - fin := "]" - if len(v) > chop { - v = v[:chop] - fin = "...]" - } - for _, vv := range v { - buf.WriteString(sep) - c.printValue(buf, vv) - sep = ", " - } - buf.WriteString(fin) - } - default: - fmt.Fprint(buf, v) - } -} - -func (c *loggingConn) print(method, commandName string, args []interface{}, reply interface{}, err error) { - var buf bytes.Buffer - fmt.Fprintf(&buf, "%s%s(", c.prefix, method) - if method != "Receive" { - buf.WriteString(commandName) - for _, arg := range args { - buf.WriteString(", ") - c.printValue(&buf, arg) - } - } - buf.WriteString(") -> (") - if method != "Send" { - c.printValue(&buf, reply) - buf.WriteString(", ") - } - fmt.Fprintf(&buf, "%v)", err) - c.logger.Output(3, buf.String()) -} - -func (c *loggingConn) Do(commandName string, args ...interface{}) (interface{}, error) { - reply, err := c.Conn.Do(commandName, args...) - c.print("Do", commandName, args, reply, err) - return reply, err -} - -func (c *loggingConn) Send(commandName string, args ...interface{}) error { - err := c.Conn.Send(commandName, args...) - c.print("Send", commandName, args, nil, err) - return err -} - -func (c *loggingConn) Receive() (interface{}, error) { - reply, err := c.Conn.Receive() - c.print("Receive", "", nil, reply, err) - return reply, err -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/pool.go b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/pool.go deleted file mode 100644 index 9daf2e33..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/pool.go +++ /dev/null @@ -1,389 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "bytes" - "container/list" - "crypto/rand" - "crypto/sha1" - "errors" - "io" - "strconv" - "sync" - "time" - - "github.com/garyburd/redigo/internal" -) - -var nowFunc = time.Now // for testing - -// ErrPoolExhausted is returned from a pool connection method (Do, Send, -// Receive, Flush, Err) when the maximum number of database connections in the -// pool has been reached. -var ErrPoolExhausted = errors.New("redigo: connection pool exhausted") - -var ( - errPoolClosed = errors.New("redigo: connection pool closed") - errConnClosed = errors.New("redigo: connection closed") -) - -// Pool maintains a pool of connections. The application calls the Get method -// to get a connection from the pool and the connection's Close method to -// return the connection's resources to the pool. -// -// The following example shows how to use a pool in a web application. The -// application creates a pool at application startup and makes it available to -// request handlers using a global variable. -// -// func newPool(server, password string) *redis.Pool { -// return &redis.Pool{ -// MaxIdle: 3, -// IdleTimeout: 240 * time.Second, -// Dial: func () (redis.Conn, error) { -// c, err := redis.Dial("tcp", server) -// if err != nil { -// return nil, err -// } -// if _, err := c.Do("AUTH", password); err != nil { -// c.Close() -// return nil, err -// } -// return c, err -// }, -// TestOnBorrow: func(c redis.Conn, t time.Time) error { -// _, err := c.Do("PING") -// return err -// }, -// } -// } -// -// var ( -// pool *redis.Pool -// redisServer = flag.String("redisServer", ":6379", "") -// redisPassword = flag.String("redisPassword", "", "") -// ) -// -// func main() { -// flag.Parse() -// pool = newPool(*redisServer, *redisPassword) -// ... -// } -// -// A request handler gets a connection from the pool and closes the connection -// when the handler is done: -// -// func serveHome(w http.ResponseWriter, r *http.Request) { -// conn := pool.Get() -// defer conn.Close() -// .... -// } -// -type Pool struct { - - // Dial is an application supplied function for creating and configuring a - // connection - Dial func() (Conn, error) - - // TestOnBorrow is an optional application supplied function for checking - // the health of an idle connection before the connection is used again by - // the application. Argument t is the time that the connection was returned - // to the pool. If the function returns an error, then the connection is - // closed. - TestOnBorrow func(c Conn, t time.Time) error - - // Maximum number of idle connections in the pool. - MaxIdle int - - // Maximum number of connections allocated by the pool at a given time. - // When zero, there is no limit on the number of connections in the pool. - MaxActive int - - // Close connections after remaining idle for this duration. If the value - // is zero, then idle connections are not closed. Applications should set - // the timeout to a value less than the server's timeout. - IdleTimeout time.Duration - - // If Wait is true and the pool is at the MaxIdle limit, then Get() waits - // for a connection to be returned to the pool before returning. - Wait bool - - // mu protects fields defined below. - mu sync.Mutex - cond *sync.Cond - closed bool - active int - - // Stack of idleConn with most recently used at the front. - idle list.List -} - -type idleConn struct { - c Conn - t time.Time -} - -// NewPool creates a new pool. This function is deprecated. Applications should -// initialize the Pool fields directly as shown in example. -func NewPool(newFn func() (Conn, error), maxIdle int) *Pool { - return &Pool{Dial: newFn, MaxIdle: maxIdle} -} - -// Get gets a connection. The application must close the returned connection. -// This method always returns a valid connection so that applications can defer -// error handling to the first use of the connection. If there is an error -// getting an underlying connection, then the connection Err, Do, Send, Flush -// and Receive methods return that error. -func (p *Pool) Get() Conn { - c, err := p.get() - if err != nil { - return errorConnection{err} - } - return &pooledConnection{p: p, c: c} -} - -// ActiveCount returns the number of active connections in the pool. -func (p *Pool) ActiveCount() int { - p.mu.Lock() - active := p.active - p.mu.Unlock() - return active -} - -// Close releases the resources used by the pool. -func (p *Pool) Close() error { - p.mu.Lock() - idle := p.idle - p.idle.Init() - p.closed = true - p.active -= idle.Len() - if p.cond != nil { - p.cond.Broadcast() - } - p.mu.Unlock() - for e := idle.Front(); e != nil; e = e.Next() { - e.Value.(idleConn).c.Close() - } - return nil -} - -// release decrements the active count and signals waiters. The caller must -// hold p.mu during the call. -func (p *Pool) release() { - p.active -= 1 - if p.cond != nil { - p.cond.Signal() - } -} - -// get prunes stale connections and returns a connection from the idle list or -// creates a new connection. -func (p *Pool) get() (Conn, error) { - p.mu.Lock() - - // Prune stale connections. - - if timeout := p.IdleTimeout; timeout > 0 { - for i, n := 0, p.idle.Len(); i < n; i++ { - e := p.idle.Back() - if e == nil { - break - } - ic := e.Value.(idleConn) - if ic.t.Add(timeout).After(nowFunc()) { - break - } - p.idle.Remove(e) - p.release() - p.mu.Unlock() - ic.c.Close() - p.mu.Lock() - } - } - - for { - - // Get idle connection. - - for i, n := 0, p.idle.Len(); i < n; i++ { - e := p.idle.Front() - if e == nil { - break - } - ic := e.Value.(idleConn) - p.idle.Remove(e) - test := p.TestOnBorrow - p.mu.Unlock() - if test == nil || test(ic.c, ic.t) == nil { - return ic.c, nil - } - ic.c.Close() - p.mu.Lock() - p.release() - } - - // Check for pool closed before dialing a new connection. - - if p.closed { - p.mu.Unlock() - return nil, errors.New("redigo: get on closed pool") - } - - // Dial new connection if under limit. - - if p.MaxActive == 0 || p.active < p.MaxActive { - dial := p.Dial - p.active += 1 - p.mu.Unlock() - c, err := dial() - if err != nil { - p.mu.Lock() - p.release() - p.mu.Unlock() - c = nil - } - return c, err - } - - if !p.Wait { - p.mu.Unlock() - return nil, ErrPoolExhausted - } - - if p.cond == nil { - p.cond = sync.NewCond(&p.mu) - } - p.cond.Wait() - } -} - -func (p *Pool) put(c Conn, forceClose bool) error { - err := c.Err() - p.mu.Lock() - if !p.closed && err == nil && !forceClose { - p.idle.PushFront(idleConn{t: nowFunc(), c: c}) - if p.idle.Len() > p.MaxIdle { - c = p.idle.Remove(p.idle.Back()).(idleConn).c - } else { - c = nil - } - } - - if c == nil { - if p.cond != nil { - p.cond.Signal() - } - p.mu.Unlock() - return nil - } - - p.release() - p.mu.Unlock() - return c.Close() -} - -type pooledConnection struct { - p *Pool - c Conn - state int -} - -var ( - sentinel []byte - sentinelOnce sync.Once -) - -func initSentinel() { - p := make([]byte, 64) - if _, err := rand.Read(p); err == nil { - sentinel = p - } else { - h := sha1.New() - io.WriteString(h, "Oops, rand failed. Use time instead.") - io.WriteString(h, strconv.FormatInt(time.Now().UnixNano(), 10)) - sentinel = h.Sum(nil) - } -} - -func (pc *pooledConnection) Close() error { - c := pc.c - if _, ok := c.(errorConnection); ok { - return nil - } - pc.c = errorConnection{errConnClosed} - - if pc.state&internal.MultiState != 0 { - c.Send("DISCARD") - pc.state &^= (internal.MultiState | internal.WatchState) - } else if pc.state&internal.WatchState != 0 { - c.Send("UNWATCH") - pc.state &^= internal.WatchState - } - if pc.state&internal.SubscribeState != 0 { - c.Send("UNSUBSCRIBE") - c.Send("PUNSUBSCRIBE") - // To detect the end of the message stream, ask the server to echo - // a sentinel value and read until we see that value. - sentinelOnce.Do(initSentinel) - c.Send("ECHO", sentinel) - c.Flush() - for { - p, err := c.Receive() - if err != nil { - break - } - if p, ok := p.([]byte); ok && bytes.Equal(p, sentinel) { - pc.state &^= internal.SubscribeState - break - } - } - } - c.Do("") - pc.p.put(c, pc.state != 0) - return nil -} - -func (pc *pooledConnection) Err() error { - return pc.c.Err() -} - -func (pc *pooledConnection) Do(commandName string, args ...interface{}) (reply interface{}, err error) { - ci := internal.LookupCommandInfo(commandName) - pc.state = (pc.state | ci.Set) &^ ci.Clear - return pc.c.Do(commandName, args...) -} - -func (pc *pooledConnection) Send(commandName string, args ...interface{}) error { - ci := internal.LookupCommandInfo(commandName) - pc.state = (pc.state | ci.Set) &^ ci.Clear - return pc.c.Send(commandName, args...) -} - -func (pc *pooledConnection) Flush() error { - return pc.c.Flush() -} - -func (pc *pooledConnection) Receive() (reply interface{}, err error) { - return pc.c.Receive() -} - -type errorConnection struct{ err error } - -func (ec errorConnection) Do(string, ...interface{}) (interface{}, error) { return nil, ec.err } -func (ec errorConnection) Send(string, ...interface{}) error { return ec.err } -func (ec errorConnection) Err() error { return ec.err } -func (ec errorConnection) Close() error { return ec.err } -func (ec errorConnection) Flush() error { return ec.err } -func (ec errorConnection) Receive() (interface{}, error) { return nil, ec.err } diff --git a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/pubsub.go b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/pubsub.go deleted file mode 100644 index f0790429..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/pubsub.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "errors" -) - -// Subscription represents a subscribe or unsubscribe notification. -type Subscription struct { - - // Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe" - Kind string - - // The channel that was changed. - Channel string - - // The current number of subscriptions for connection. - Count int -} - -// Message represents a message notification. -type Message struct { - - // The originating channel. - Channel string - - // The message data. - Data []byte -} - -// PMessage represents a pmessage notification. -type PMessage struct { - - // The matched pattern. - Pattern string - - // The originating channel. - Channel string - - // The message data. - Data []byte -} - -// PubSubConn wraps a Conn with convenience methods for subscribers. -type PubSubConn struct { - Conn Conn -} - -// Close closes the connection. -func (c PubSubConn) Close() error { - return c.Conn.Close() -} - -// Subscribe subscribes the connection to the specified channels. -func (c PubSubConn) Subscribe(channel ...interface{}) error { - c.Conn.Send("SUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// PSubscribe subscribes the connection to the given patterns. -func (c PubSubConn) PSubscribe(channel ...interface{}) error { - c.Conn.Send("PSUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// Unsubscribe unsubscribes the connection from the given channels, or from all -// of them if none is given. -func (c PubSubConn) Unsubscribe(channel ...interface{}) error { - c.Conn.Send("UNSUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// PUnsubscribe unsubscribes the connection from the given patterns, or from all -// of them if none is given. -func (c PubSubConn) PUnsubscribe(channel ...interface{}) error { - c.Conn.Send("PUNSUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// Receive returns a pushed message as a Subscription, Message, PMessage or -// error. The return value is intended to be used directly in a type switch as -// illustrated in the PubSubConn example. -func (c PubSubConn) Receive() interface{} { - reply, err := Values(c.Conn.Receive()) - if err != nil { - return err - } - - var kind string - reply, err = Scan(reply, &kind) - if err != nil { - return err - } - - switch kind { - case "message": - var m Message - if _, err := Scan(reply, &m.Channel, &m.Data); err != nil { - return err - } - return m - case "pmessage": - var pm PMessage - if _, err := Scan(reply, &pm.Pattern, &pm.Channel, &pm.Data); err != nil { - return err - } - return pm - case "subscribe", "psubscribe", "unsubscribe", "punsubscribe": - s := Subscription{Kind: kind} - if _, err := Scan(reply, &s.Channel, &s.Count); err != nil { - return err - } - return s - } - return errors.New("redigo: unknown pubsub notification") -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/redis.go b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/redis.go deleted file mode 100644 index c90a48ed..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/redis.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -// Error represents an error returned in a command reply. -type Error string - -func (err Error) Error() string { return string(err) } - -// Conn represents a connection to a Redis server. -type Conn interface { - // Close closes the connection. - Close() error - - // Err returns a non-nil value if the connection is broken. The returned - // value is either the first non-nil value returned from the underlying - // network connection or a protocol parsing error. Applications should - // close broken connections. - Err() error - - // Do sends a command to the server and returns the received reply. - Do(commandName string, args ...interface{}) (reply interface{}, err error) - - // Send writes the command to the client's output buffer. - Send(commandName string, args ...interface{}) error - - // Flush flushes the output buffer to the Redis server. - Flush() error - - // Receive receives a single reply from the Redis server - Receive() (reply interface{}, err error) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/reply.go b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/reply.go deleted file mode 100644 index 5648f930..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/reply.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "errors" - "fmt" - "strconv" -) - -// ErrNil indicates that a reply value is nil. -var ErrNil = errors.New("redigo: nil returned") - -// Int is a helper that converts a command reply to an integer. If err is not -// equal to nil, then Int returns 0, err. Otherwise, Int converts the -// reply to an int as follows: -// -// Reply type Result -// integer int(reply), nil -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Int(reply interface{}, err error) (int, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case int64: - x := int(reply) - if int64(x) != reply { - return 0, strconv.ErrRange - } - return x, nil - case []byte: - n, err := strconv.ParseInt(string(reply), 10, 0) - return int(n), err - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, fmt.Errorf("redigo: unexpected type for Int, got type %T", reply) -} - -// Int64 is a helper that converts a command reply to 64 bit integer. If err is -// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the -// reply to an int64 as follows: -// -// Reply type Result -// integer reply, nil -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Int64(reply interface{}, err error) (int64, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case int64: - return reply, nil - case []byte: - n, err := strconv.ParseInt(string(reply), 10, 64) - return n, err - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, fmt.Errorf("redigo: unexpected type for Int64, got type %T", reply) -} - -var errNegativeInt = errors.New("redigo: unexpected value for Uint64") - -// Uint64 is a helper that converts a command reply to 64 bit integer. If err is -// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the -// reply to an int64 as follows: -// -// Reply type Result -// integer reply, nil -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Uint64(reply interface{}, err error) (uint64, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case int64: - if reply < 0 { - return 0, errNegativeInt - } - return uint64(reply), nil - case []byte: - n, err := strconv.ParseUint(string(reply), 10, 64) - return n, err - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, fmt.Errorf("redigo: unexpected type for Uint64, got type %T", reply) -} - -// Float64 is a helper that converts a command reply to 64 bit float. If err is -// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts -// the reply to an int as follows: -// -// Reply type Result -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Float64(reply interface{}, err error) (float64, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case []byte: - n, err := strconv.ParseFloat(string(reply), 64) - return n, err - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, fmt.Errorf("redigo: unexpected type for Float64, got type %T", reply) -} - -// String is a helper that converts a command reply to a string. If err is not -// equal to nil, then String returns "", err. Otherwise String converts the -// reply to a string as follows: -// -// Reply type Result -// bulk string string(reply), nil -// simple string reply, nil -// nil "", ErrNil -// other "", error -func String(reply interface{}, err error) (string, error) { - if err != nil { - return "", err - } - switch reply := reply.(type) { - case []byte: - return string(reply), nil - case string: - return reply, nil - case nil: - return "", ErrNil - case Error: - return "", reply - } - return "", fmt.Errorf("redigo: unexpected type for String, got type %T", reply) -} - -// Bytes is a helper that converts a command reply to a slice of bytes. If err -// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts -// the reply to a slice of bytes as follows: -// -// Reply type Result -// bulk string reply, nil -// simple string []byte(reply), nil -// nil nil, ErrNil -// other nil, error -func Bytes(reply interface{}, err error) ([]byte, error) { - if err != nil { - return nil, err - } - switch reply := reply.(type) { - case []byte: - return reply, nil - case string: - return []byte(reply), nil - case nil: - return nil, ErrNil - case Error: - return nil, reply - } - return nil, fmt.Errorf("redigo: unexpected type for Bytes, got type %T", reply) -} - -// Bool is a helper that converts a command reply to a boolean. If err is not -// equal to nil, then Bool returns false, err. Otherwise Bool converts the -// reply to boolean as follows: -// -// Reply type Result -// integer value != 0, nil -// bulk string strconv.ParseBool(reply) -// nil false, ErrNil -// other false, error -func Bool(reply interface{}, err error) (bool, error) { - if err != nil { - return false, err - } - switch reply := reply.(type) { - case int64: - return reply != 0, nil - case []byte: - return strconv.ParseBool(string(reply)) - case nil: - return false, ErrNil - case Error: - return false, reply - } - return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply) -} - -// MultiBulk is deprecated. Use Values. -func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) } - -// Values is a helper that converts an array command reply to a []interface{}. -// If err is not equal to nil, then Values returns nil, err. Otherwise, Values -// converts the reply as follows: -// -// Reply type Result -// array reply, nil -// nil nil, ErrNil -// other nil, error -func Values(reply interface{}, err error) ([]interface{}, error) { - if err != nil { - return nil, err - } - switch reply := reply.(type) { - case []interface{}: - return reply, nil - case nil: - return nil, ErrNil - case Error: - return nil, reply - } - return nil, fmt.Errorf("redigo: unexpected type for Values, got type %T", reply) -} - -// Strings is a helper that converts an array command reply to a []string. If -// err is not equal to nil, then Strings returns nil, err. Nil array items are -// converted to "" in the output slice. Strings returns an error if an array -// item is not a bulk string or nil. -func Strings(reply interface{}, err error) ([]string, error) { - if err != nil { - return nil, err - } - switch reply := reply.(type) { - case []interface{}: - result := make([]string, len(reply)) - for i := range reply { - if reply[i] == nil { - continue - } - p, ok := reply[i].([]byte) - if !ok { - return nil, fmt.Errorf("redigo: unexpected element type for Strings, got type %T", reply[i]) - } - result[i] = string(p) - } - return result, nil - case nil: - return nil, ErrNil - case Error: - return nil, reply - } - return nil, fmt.Errorf("redigo: unexpected type for Strings, got type %T", reply) -} - -// Ints is a helper that converts an array command reply to a []int. If -// err is not equal to nil, then Ints returns nil, err. -func Ints(reply interface{}, err error) ([]int, error) { - var ints []int - if reply == nil { - return ints, ErrNil - } - values, err := Values(reply, err) - if err != nil { - return ints, err - } - if err := ScanSlice(values, &ints); err != nil { - return ints, err - } - return ints, nil -} - -// StringMap is a helper that converts an array of strings (alternating key, value) -// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format. -// Requires an even number of values in result. -func StringMap(result interface{}, err error) (map[string]string, error) { - values, err := Values(result, err) - if err != nil { - return nil, err - } - if len(values)%2 != 0 { - return nil, errors.New("redigo: StringMap expects even number of values result") - } - m := make(map[string]string, len(values)/2) - for i := 0; i < len(values); i += 2 { - key, okKey := values[i].([]byte) - value, okValue := values[i+1].([]byte) - if !okKey || !okValue { - return nil, errors.New("redigo: ScanMap key not a bulk string value") - } - m[string(key)] = string(value) - } - return m, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/scan.go b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/scan.go deleted file mode 100644 index 8c9cfa18..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/scan.go +++ /dev/null @@ -1,513 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "sync" -) - -func ensureLen(d reflect.Value, n int) { - if n > d.Cap() { - d.Set(reflect.MakeSlice(d.Type(), n, n)) - } else { - d.SetLen(n) - } -} - -func cannotConvert(d reflect.Value, s interface{}) error { - return fmt.Errorf("redigo: Scan cannot convert from %s to %s", - reflect.TypeOf(s), d.Type()) -} - -func convertAssignBytes(d reflect.Value, s []byte) (err error) { - switch d.Type().Kind() { - case reflect.Float32, reflect.Float64: - var x float64 - x, err = strconv.ParseFloat(string(s), d.Type().Bits()) - d.SetFloat(x) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - var x int64 - x, err = strconv.ParseInt(string(s), 10, d.Type().Bits()) - d.SetInt(x) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - var x uint64 - x, err = strconv.ParseUint(string(s), 10, d.Type().Bits()) - d.SetUint(x) - case reflect.Bool: - var x bool - x, err = strconv.ParseBool(string(s)) - d.SetBool(x) - case reflect.String: - d.SetString(string(s)) - case reflect.Slice: - if d.Type().Elem().Kind() != reflect.Uint8 { - err = cannotConvert(d, s) - } else { - d.SetBytes(s) - } - default: - err = cannotConvert(d, s) - } - return -} - -func convertAssignInt(d reflect.Value, s int64) (err error) { - switch d.Type().Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - d.SetInt(s) - if d.Int() != s { - err = strconv.ErrRange - d.SetInt(0) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - if s < 0 { - err = strconv.ErrRange - } else { - x := uint64(s) - d.SetUint(x) - if d.Uint() != x { - err = strconv.ErrRange - d.SetUint(0) - } - } - case reflect.Bool: - d.SetBool(s != 0) - default: - err = cannotConvert(d, s) - } - return -} - -func convertAssignValue(d reflect.Value, s interface{}) (err error) { - switch s := s.(type) { - case []byte: - err = convertAssignBytes(d, s) - case int64: - err = convertAssignInt(d, s) - default: - err = cannotConvert(d, s) - } - return err -} - -func convertAssignValues(d reflect.Value, s []interface{}) error { - if d.Type().Kind() != reflect.Slice { - return cannotConvert(d, s) - } - ensureLen(d, len(s)) - for i := 0; i < len(s); i++ { - if err := convertAssignValue(d.Index(i), s[i]); err != nil { - return err - } - } - return nil -} - -func convertAssign(d interface{}, s interface{}) (err error) { - // Handle the most common destination types using type switches and - // fall back to reflection for all other types. - switch s := s.(type) { - case nil: - // ingore - case []byte: - switch d := d.(type) { - case *string: - *d = string(s) - case *int: - *d, err = strconv.Atoi(string(s)) - case *bool: - *d, err = strconv.ParseBool(string(s)) - case *[]byte: - *d = s - case *interface{}: - *d = s - case nil: - // skip value - default: - if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { - err = cannotConvert(d, s) - } else { - err = convertAssignBytes(d.Elem(), s) - } - } - case int64: - switch d := d.(type) { - case *int: - x := int(s) - if int64(x) != s { - err = strconv.ErrRange - x = 0 - } - *d = x - case *bool: - *d = s != 0 - case *interface{}: - *d = s - case nil: - // skip value - default: - if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { - err = cannotConvert(d, s) - } else { - err = convertAssignInt(d.Elem(), s) - } - } - case []interface{}: - switch d := d.(type) { - case *[]interface{}: - *d = s - case *interface{}: - *d = s - case nil: - // skip value - default: - if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { - err = cannotConvert(d, s) - } else { - err = convertAssignValues(d.Elem(), s) - } - } - case Error: - err = s - default: - err = cannotConvert(reflect.ValueOf(d), s) - } - return -} - -// Scan copies from src to the values pointed at by dest. -// -// The values pointed at by dest must be an integer, float, boolean, string, -// []byte, interface{} or slices of these types. Scan uses the standard strconv -// package to convert bulk strings to numeric and boolean types. -// -// If a dest value is nil, then the corresponding src value is skipped. -// -// If a src element is nil, then the corresponding dest value is not modified. -// -// To enable easy use of Scan in a loop, Scan returns the slice of src -// following the copied values. -func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) { - if len(src) < len(dest) { - return nil, errors.New("redigo: Scan array short") - } - var err error - for i, d := range dest { - err = convertAssign(d, src[i]) - if err != nil { - break - } - } - return src[len(dest):], err -} - -type fieldSpec struct { - name string - index []int - //omitEmpty bool -} - -type structSpec struct { - m map[string]*fieldSpec - l []*fieldSpec -} - -func (ss *structSpec) fieldSpec(name []byte) *fieldSpec { - return ss.m[string(name)] -} - -func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) { - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - switch { - case f.PkgPath != "": - // Ignore unexported fields. - case f.Anonymous: - // TODO: Handle pointers. Requires change to decoder and - // protection against infinite recursion. - if f.Type.Kind() == reflect.Struct { - compileStructSpec(f.Type, depth, append(index, i), ss) - } - default: - fs := &fieldSpec{name: f.Name} - tag := f.Tag.Get("redis") - p := strings.Split(tag, ",") - if len(p) > 0 { - if p[0] == "-" { - continue - } - if len(p[0]) > 0 { - fs.name = p[0] - } - for _, s := range p[1:] { - switch s { - //case "omitempty": - // fs.omitempty = true - default: - panic(errors.New("redigo: unknown field flag " + s + " for type " + t.Name())) - } - } - } - d, found := depth[fs.name] - if !found { - d = 1 << 30 - } - switch { - case len(index) == d: - // At same depth, remove from result. - delete(ss.m, fs.name) - j := 0 - for i := 0; i < len(ss.l); i++ { - if fs.name != ss.l[i].name { - ss.l[j] = ss.l[i] - j += 1 - } - } - ss.l = ss.l[:j] - case len(index) < d: - fs.index = make([]int, len(index)+1) - copy(fs.index, index) - fs.index[len(index)] = i - depth[fs.name] = len(index) - ss.m[fs.name] = fs - ss.l = append(ss.l, fs) - } - } - } -} - -var ( - structSpecMutex sync.RWMutex - structSpecCache = make(map[reflect.Type]*structSpec) - defaultFieldSpec = &fieldSpec{} -) - -func structSpecForType(t reflect.Type) *structSpec { - - structSpecMutex.RLock() - ss, found := structSpecCache[t] - structSpecMutex.RUnlock() - if found { - return ss - } - - structSpecMutex.Lock() - defer structSpecMutex.Unlock() - ss, found = structSpecCache[t] - if found { - return ss - } - - ss = &structSpec{m: make(map[string]*fieldSpec)} - compileStructSpec(t, make(map[string]int), nil, ss) - structSpecCache[t] = ss - return ss -} - -var errScanStructValue = errors.New("redigo: ScanStruct value must be non-nil pointer to a struct") - -// ScanStruct scans alternating names and values from src to a struct. The -// HGETALL and CONFIG GET commands return replies in this format. -// -// ScanStruct uses exported field names to match values in the response. Use -// 'redis' field tag to override the name: -// -// Field int `redis:"myName"` -// -// Fields with the tag redis:"-" are ignored. -// -// Integer, float, boolean, string and []byte fields are supported. Scan uses the -// standard strconv package to convert bulk string values to numeric and -// boolean types. -// -// If a src element is nil, then the corresponding field is not modified. -func ScanStruct(src []interface{}, dest interface{}) error { - d := reflect.ValueOf(dest) - if d.Kind() != reflect.Ptr || d.IsNil() { - return errScanStructValue - } - d = d.Elem() - if d.Kind() != reflect.Struct { - return errScanStructValue - } - ss := structSpecForType(d.Type()) - - if len(src)%2 != 0 { - return errors.New("redigo: ScanStruct expects even number of values in values") - } - - for i := 0; i < len(src); i += 2 { - s := src[i+1] - if s == nil { - continue - } - name, ok := src[i].([]byte) - if !ok { - return errors.New("redigo: ScanStruct key not a bulk string value") - } - fs := ss.fieldSpec(name) - if fs == nil { - continue - } - if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { - return err - } - } - return nil -} - -var ( - errScanSliceValue = errors.New("redigo: ScanSlice dest must be non-nil pointer to a struct") -) - -// ScanSlice scans src to the slice pointed to by dest. The elements the dest -// slice must be integer, float, boolean, string, struct or pointer to struct -// values. -// -// Struct fields must be integer, float, boolean or string values. All struct -// fields are used unless a subset is specified using fieldNames. -func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error { - d := reflect.ValueOf(dest) - if d.Kind() != reflect.Ptr || d.IsNil() { - return errScanSliceValue - } - d = d.Elem() - if d.Kind() != reflect.Slice { - return errScanSliceValue - } - - isPtr := false - t := d.Type().Elem() - if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { - isPtr = true - t = t.Elem() - } - - if t.Kind() != reflect.Struct { - ensureLen(d, len(src)) - for i, s := range src { - if s == nil { - continue - } - if err := convertAssignValue(d.Index(i), s); err != nil { - return err - } - } - return nil - } - - ss := structSpecForType(t) - fss := ss.l - if len(fieldNames) > 0 { - fss = make([]*fieldSpec, len(fieldNames)) - for i, name := range fieldNames { - fss[i] = ss.m[name] - if fss[i] == nil { - return errors.New("redigo: ScanSlice bad field name " + name) - } - } - } - - if len(fss) == 0 { - return errors.New("redigo: ScanSlice no struct fields") - } - - n := len(src) / len(fss) - if n*len(fss) != len(src) { - return errors.New("redigo: ScanSlice length not a multiple of struct field count") - } - - ensureLen(d, n) - for i := 0; i < n; i++ { - d := d.Index(i) - if isPtr { - if d.IsNil() { - d.Set(reflect.New(t)) - } - d = d.Elem() - } - for j, fs := range fss { - s := src[i*len(fss)+j] - if s == nil { - continue - } - if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { - return err - } - } - } - return nil -} - -// Args is a helper for constructing command arguments from structured values. -type Args []interface{} - -// Add returns the result of appending value to args. -func (args Args) Add(value ...interface{}) Args { - return append(args, value...) -} - -// AddFlat returns the result of appending the flattened value of v to args. -// -// Maps are flattened by appending the alternating keys and map values to args. -// -// Slices are flattened by appending the slice elements to args. -// -// Structs are flattened by appending the alternating names and values of -// exported fields to args. If v is a nil struct pointer, then nothing is -// appended. The 'redis' field tag overrides struct field names. See ScanStruct -// for more information on the use of the 'redis' field tag. -// -// Other types are appended to args as is. -func (args Args) AddFlat(v interface{}) Args { - rv := reflect.ValueOf(v) - switch rv.Kind() { - case reflect.Struct: - args = flattenStruct(args, rv) - case reflect.Slice: - for i := 0; i < rv.Len(); i++ { - args = append(args, rv.Index(i).Interface()) - } - case reflect.Map: - for _, k := range rv.MapKeys() { - args = append(args, k.Interface(), rv.MapIndex(k).Interface()) - } - case reflect.Ptr: - if rv.Type().Elem().Kind() == reflect.Struct { - if !rv.IsNil() { - args = flattenStruct(args, rv.Elem()) - } - } else { - args = append(args, v) - } - default: - args = append(args, v) - } - return args -} - -func flattenStruct(args Args, v reflect.Value) Args { - ss := structSpecForType(v.Type()) - for _, fs := range ss.l { - fv := v.FieldByIndex(fs.index) - args = append(args, fs.name, fv.Interface()) - } - return args -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/script.go b/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/script.go deleted file mode 100644 index 78605a90..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/garyburd/redigo/redis/script.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "crypto/sha1" - "encoding/hex" - "io" - "strings" -) - -// Script encapsulates the source, hash and key count for a Lua script. See -// http://redis.io/commands/eval for information on scripts in Redis. -type Script struct { - keyCount int - src string - hash string -} - -// NewScript returns a new script object. If keyCount is greater than or equal -// to zero, then the count is automatically inserted in the EVAL command -// argument list. If keyCount is less than zero, then the application supplies -// the count as the first value in the keysAndArgs argument to the Do, Send and -// SendHash methods. -func NewScript(keyCount int, src string) *Script { - h := sha1.New() - io.WriteString(h, src) - return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))} -} - -func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} { - var args []interface{} - if s.keyCount < 0 { - args = make([]interface{}, 1+len(keysAndArgs)) - args[0] = spec - copy(args[1:], keysAndArgs) - } else { - args = make([]interface{}, 2+len(keysAndArgs)) - args[0] = spec - args[1] = s.keyCount - copy(args[2:], keysAndArgs) - } - return args -} - -// Do evaluates the script. Under the covers, Do optimistically evaluates the -// script using the EVALSHA command. If the command fails because the script is -// not loaded, then Do evaluates the script using the EVAL command (thus -// causing the script to load). -func (s *Script) Do(c Conn, keysAndArgs ...interface{}) (interface{}, error) { - v, err := c.Do("EVALSHA", s.args(s.hash, keysAndArgs)...) - if e, ok := err.(Error); ok && strings.HasPrefix(string(e), "NOSCRIPT ") { - v, err = c.Do("EVAL", s.args(s.src, keysAndArgs)...) - } - return v, err -} - -// SendHash evaluates the script without waiting for the reply. The script is -// evaluated with the EVALSHA command. The application must ensure that the -// script is loaded by a previous call to Send, Do or Load methods. -func (s *Script) SendHash(c Conn, keysAndArgs ...interface{}) error { - return c.Send("EVALSHA", s.args(s.hash, keysAndArgs)...) -} - -// Send evaluates the script without waiting for the reply. -func (s *Script) Send(c Conn, keysAndArgs ...interface{}) error { - return c.Send("EVAL", s.args(s.src, keysAndArgs)...) -} - -// Load loads the script without evaluating it. -func (s *Script) Load(c Conn) error { - _, err := c.Do("SCRIPT", "LOAD", s.src) - return err -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 1b1b1921..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,31 +0,0 @@ -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/Makefile deleted file mode 100644 index e2e0651a..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C testdata - protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto - make diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/clone.go deleted file mode 100644 index e98ddec9..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/clone.go +++ /dev/null @@ -1,223 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) - if in.IsNil() { - return pb - } - - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") - } - if in.IsNil() { - // Merging nil into non-nil is a quiet no-op - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, ok := in.Addr().Interface().(extendableProto); ok { - emOut := out.Addr().Interface().(extendableProto) - mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/decode.go deleted file mode 100644 index f94b9f41..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/decode.go +++ /dev/null @@ -1,868 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" - "os" - "reflect" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - // x, n already 0 - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - // x, err already 0 - - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - return UnmarshalMerge(buf, pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -func (p *Buffer) DecodeGroup(pb Message) error { - typ, base, err := getbase(pb) - if err != nil { - return err - } - return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - ext := e.ExtensionMap()[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - e.ExtensionMap()[int32(tag)] = ext - } - continue - } - } - // Maybe it's a oneof? - if prop.oneofUnmarshaler != nil { - m := structPointer_Interface(base, st).(Message) - // First return value indicates whether tag is a oneof field. - ok, err = prop.oneofUnmarshaler(m, tag, wire, o) - if err == ErrInternalBadWireType { - // Map the error to something more descriptive. - // Do the formatting here to save generated code space. - err = fmt.Errorf("bad wiretype for oneof field in %T", m) - } - if ok { - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - fin := o.index + nb - if fin < o.index { - return errOverflow - } - - y := *v - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() { - keyelem = reflect.Zero(p.mtype.Key()) - } - if !valelem.IsValid() { - valelem = reflect.Zero(p.mtype.Elem()) - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - - return err -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/encode.go deleted file mode 100644 index eb7e0474..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/encode.go +++ /dev/null @@ -1,1331 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "fmt" - "reflect" - "sort" -) - -// RequiredNotSetError is the error returned if Marshal is called with -// a protocol buffer struct whose required fields have not -// all been initialized. It is also the error returned if Unmarshal is -// called with an encoded protocol buffer that does not include all the -// required fields. -// -// When printed, RequiredNotSetError reports the first unset required field in a -// message. If the field cannot be precisely determined, it is reported as -// "{Unknown}". -type RequiredNotSetError struct { - field string -} - -func (e *RequiredNotSetError) Error() string { - return fmt.Sprintf("proto: required field %q not set", e.field) -} - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - return sizeVarint(x) -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -func sizeFixed64(x uint64) int { - return 8 -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -func sizeFixed32(x uint64) int { - return 4 -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -func sizeZigzag64(x uint64) int { - return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - var state errorState - if err != nil && !state.shouldContinue(err, nil) { - return nil, err - } - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - var state errorState - err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) - } - return err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - if err != nil { - return err - } - p.buf = append(p.buf, data...) - return nil - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - stats.Encode++ - } - - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - stats.Size++ - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v && !p.oneof { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += len(p.tagcode) - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - v := *structPointer_ExtMap(base, p.field) - if err := encodeExtensionMap(v); err != nil { - return err - } - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := *structPointer_ExtMap(base, p.field) - return sizeExtensionMap(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil { - return err - } - return nil - } - - // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - - // The only illegal map entry values are nil message pointers. - if val.Kind() == reflect.Ptr && val.IsNil() { - return errors.New("proto: map has nil element") - } - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - } - } - - // Do oneof fields. - if prop.oneofMarshaler != nil { - m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err == ErrNil { - return errOneofHasNil - } else if err != nil { - return err - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - // Factor in any oneof fields. - if prop.oneofSizer != nil { - m := structPointer_Interface(base, prop.stype).(Message) - n += prop.oneofSizer(m) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/equal.go deleted file mode 100644 index f5db1def..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/equal.go +++ /dev/null @@ -1,276 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal (a "bytes" field, - although represented by []byte, is not a repeated field) - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - b1, ok := f1.Interface().(raw) - if ok { - b2 := f2.Interface().(raw) - // RawMessage - if !bytes.Equal(b1.Bytes(), b2.Bytes()) { - return false - } - continue - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - if !bytes.Equal(u1, u2) { - return false - } - - return true -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// em1 and em2 are extension maps. -func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - continue - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/extensions.go deleted file mode 100644 index 054f4f1d..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/extensions.go +++ /dev/null @@ -1,399 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base extendableProto, id int32, b []byte) { - base.ExtensionMap()[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - // Check the extended type. - if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b { - return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m. -func encodeExtensionMap(m map[int32]Extension) error { - for k, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - m[k] = e - } - return nil -} - -func sizeExtensionMap(m map[int32]Extension) (n int) { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - n += props.size(props, toStructPointer(x)) - } - return -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb extendableProto, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - _, ok := pb.ExtensionMap()[extension.Field] - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb extendableProto, extension *ExtensionDesc) { - // TODO: Check types, field numbers, etc.? - delete(pb.ExtensionMap(), extension.Field) -} - -// GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. -func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { - if err := checkExtensionTypes(pb, extension); err != nil { - return nil, err - } - - emap := pb.ExtensionMap() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - - t := reflect.TypeOf(extension.ExtensionType) - - props := extensionProperties(extension) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. - value := reflect.New(t).Elem() - - for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { - return nil, err - } - - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { - return nil, err - } - - if o.index >= len(o.buf) { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, ok := pb.(extendableProto) - if !ok { - err = errors.New("proto: not an extendable proto") - return - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { - if err := checkExtensionTypes(pb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} - return nil -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/lib.go deleted file mode 100644 index 0de8f8df..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/lib.go +++ /dev/null @@ -1,894 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Getters are only generated for message and oneof fields. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/golang/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // write point - - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion1 = true diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/message_set.go deleted file mode 100644 index e25e01e6..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/message_set.go +++ /dev/null @@ -1,280 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { - if err := encodeExtensionMap(m); err != nil { - return nil, err - } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - if i > 0 { - b.WriteByte(',') - } - - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index 749919d2..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,479 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build appengine - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "math" - "reflect" -) - -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} - -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} - -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) -} - -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} -} - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { - v reflect.Value -} - -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) -} - -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value -} - -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { - return p.v.IsNil() -} - -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} -} - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value -} - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} -} - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value -} - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) - } -} - -func (p word32Slice) Len() int { - return p.v.Len() -} - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} -} - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value -} - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] - return - } - panic("unreachable") -} - -func word64_IsNil(p word64) bool { - return p.v.IsNil() -} - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value -} - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) - return - } - panic("unreachable") -} - -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} -} - -type word64Slice struct { - v reflect.Value -} - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) - } -} - -func (p word64Slice) Len() int { - return p.v.Len() -} - -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") -} - -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index e9be0fe9..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,266 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !appengine - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. - -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != ^field(0) -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil -} - -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] -} - -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p -} - -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x -} - -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p -} - -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] -} - -func word64_IsNil(p word64) bool { - return *p == nil -} - -func word64_Get(p word64) uint64 { - return **p -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x -} - -func word64Val_Get(p word64Val) uint64 { - return *p -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 - -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } - -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/properties.go deleted file mode 100644 index 4fe2ec22..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/properties.go +++ /dev/null @@ -1,846 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// A oneofMarshaler does the marshaling for all oneof fields in a message. -type oneofMarshaler func(Message, *Buffer) error - -// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. -type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) - -// A oneofSizer does the sizing for all oneof fields in a message. -type oneofSizer func(Message) int - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto - - oneofMarshaler oneofMarshaler - oneofUnmarshaler oneofUnmarshaler - oneofSizer oneofSizer - stype reflect.Type - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field; set for []byte only - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - def_uint64 uint64 - - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool - - mtype reflect.Type // set for map types only - mkeyprop *Properties // set for map types only - mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s = "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint - case "fixed32": - p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 - case "fixed64": - p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 - case "zigzag32": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 - case "zigzag64": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break - } - } - } -} - -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil - - switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - case reflect.Int32: - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.String: - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: - p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } - } - - case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_byte - p.dec = (*Buffer).dec_slice_byte - p.size = size_slice_byte - // This is a []byte, which is either a bytes field, - // or the value of a map field. In the latter case, - // we always encode an empty []byte, so we should not - // use the proto3 enc/size funcs. - // f == nil iff this is the key/value of a map field. - if p.proto3 && f != nil { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break - case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte - } - } - - case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map - - p.mtype = t1 - p.mkeyprop = &Properties{} - p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.mvalprop = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() -) - -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isMarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isMarshaler") - } - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isUnmarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isUnmarshaler") - } - return t.Implements(unmarshalerType) -} - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if f != nil { - p.field = toField(f) - } - if tag == "" { - return - } - p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - if collectStats { - stats.Chit++ - } - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } - return prop - } - if collectStats { - stats.Cmiss++ - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) - prop.unrecField = invalidField - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - if f.Name == "XXX_extensions" { // special case - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } - if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } - oneof := f.Tag.Get("protobuf_oneof") != "" // special case - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { - var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() - prop.stype = t - - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypes = make(map[string]reflect.Type) - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypes[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] } - -// MessageType returns the message type (pointer to struct) for a named message. -func MessageType(name string) reflect.Type { return protoTypes[name] } diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/text.go deleted file mode 100644 index 37c95357..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/text.go +++ /dev/null @@ -1,849 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - gtNewline = []byte(">\n") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Printf("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.mkeyprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.mvalprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } - - // Enums have a String method, so writeAny will work fine. - if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if pv.Type().Implements(extendableProtoType) { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Interface().([]byte))); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else if err := tm.writeStruct(w, v); err != nil { - return err - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err = w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep := pv.Interface().(extendableProto) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - m := ep.ExtensionMap() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/text_parser.go deleted file mode 100644 index b5e1c8e1..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/golang/protobuf/proto/text_parser.go +++ /dev/null @@ -1,871 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - base := 8 - ss := s[:2] - s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) - if err != nil { - return "", "", err - } - return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b - } - s = s[n:] - return string(bs), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(extendableProto) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - sv.Field(oop.Field).Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // Technically the "key" and "value" could come in any order, - // but in practice they won't. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - if err := p.consumeToken("key"); err != nil { - return err - } - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.mkeyprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - if err := p.consumeToken("value"); err != nil { - return err - } - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.mvalprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - if err := p.consumeToken(terminator); err != nil { - return err - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } else if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // Either "true", "false", 1 or 0. - switch tok.value { - case "true", "1": - fv.SetBool(true) - return nil - case "false", "0": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err - } - pb.Reset() - v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/.travis.yml deleted file mode 100644 index d87d4657..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - 1.2 - - tip diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/LICENSE deleted file mode 100644 index 0e5fb872..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/README.md b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/README.md deleted file mode 100644 index c60a31b0..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/README.md +++ /dev/null @@ -1,7 +0,0 @@ -context -======= -[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) - -gorilla/context is a general purpose registry for global request variables. - -Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/context.go b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/context.go deleted file mode 100644 index 81cb128b..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/context.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "net/http" - "sync" - "time" -) - -var ( - mutex sync.RWMutex - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) -) - -// Set stores a value for a given key in a given request. -func Set(r *http.Request, key, val interface{}) { - mutex.Lock() - if data[r] == nil { - data[r] = make(map[interface{}]interface{}) - datat[r] = time.Now().Unix() - } - data[r][key] = val - mutex.Unlock() -} - -// Get returns a value stored for a given key in a given request. -func Get(r *http.Request, key interface{}) interface{} { - mutex.RLock() - if ctx := data[r]; ctx != nil { - value := ctx[key] - mutex.RUnlock() - return value - } - mutex.RUnlock() - return nil -} - -// GetOk returns stored value and presence state like multi-value return of map access. -func GetOk(r *http.Request, key interface{}) (interface{}, bool) { - mutex.RLock() - if _, ok := data[r]; ok { - value, ok := data[r][key] - mutex.RUnlock() - return value, ok - } - mutex.RUnlock() - return nil, false -} - -// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. -func GetAll(r *http.Request) map[interface{}]interface{} { - mutex.RLock() - if context, ok := data[r]; ok { - result := make(map[interface{}]interface{}, len(context)) - for k, v := range context { - result[k] = v - } - mutex.RUnlock() - return result - } - mutex.RUnlock() - return nil -} - -// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if -// the request was registered. -func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { - mutex.RLock() - context, ok := data[r] - result := make(map[interface{}]interface{}, len(context)) - for k, v := range context { - result[k] = v - } - mutex.RUnlock() - return result, ok -} - -// Delete removes a value stored for a given key in a given request. -func Delete(r *http.Request, key interface{}) { - mutex.Lock() - if data[r] != nil { - delete(data[r], key) - } - mutex.Unlock() -} - -// Clear removes all values stored for a given request. -// -// This is usually called by a handler wrapper to clean up request -// variables at the end of a request lifetime. See ClearHandler(). -func Clear(r *http.Request) { - mutex.Lock() - clear(r) - mutex.Unlock() -} - -// clear is Clear without the lock. -func clear(r *http.Request) { - delete(data, r) - delete(datat, r) -} - -// Purge removes request data stored for longer than maxAge, in seconds. -// It returns the amount of requests removed. -// -// If maxAge <= 0, all request data is removed. -// -// This is only used for sanity check: in case context cleaning was not -// properly set some request data can be kept forever, consuming an increasing -// amount of memory. In case this is detected, Purge() must be called -// periodically until the problem is fixed. -func Purge(maxAge int) int { - mutex.Lock() - count := 0 - if maxAge <= 0 { - count = len(data) - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) - } else { - min := time.Now().Unix() - int64(maxAge) - for r := range data { - if datat[r] < min { - clear(r) - count++ - } - } - } - mutex.Unlock() - return count -} - -// ClearHandler wraps an http.Handler and clears request values at the end -// of a request lifetime. -func ClearHandler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer Clear(r) - h.ServeHTTP(w, r) - }) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/doc.go b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/doc.go deleted file mode 100644 index 73c74003..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/context/doc.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package context stores values shared during a request lifetime. - -For example, a router can set variables extracted from the URL and later -application handlers can access those values, or it can be used to store -sessions values to be saved at the end of a request. There are several -others common uses. - -The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: - - http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 - -Here's the basic usage: first define the keys that you will need. The key -type is interface{} so a key can be of any type that supports equality. -Here we define a key using a custom int type to avoid name collisions: - - package foo - - import ( - "github.com/gorilla/context" - ) - - type key int - - const MyKey key = 0 - -Then set a variable. Variables are bound to an http.Request object, so you -need a request instance to set a value: - - context.Set(r, MyKey, "bar") - -The application can later access the variable using the same key you provided: - - func MyHandler(w http.ResponseWriter, r *http.Request) { - // val is "bar". - val := context.Get(r, foo.MyKey) - - // returns ("bar", true) - val, ok := context.GetOk(r, foo.MyKey) - // ... - } - -And that's all about the basic usage. We discuss some other ideas below. - -Any type can be stored in the context. To enforce a given type, make the key -private and wrap Get() and Set() to accept and return values of a specific -type: - - type key int - - const mykey key = 0 - - // GetMyKey returns a value for this package from the request values. - func GetMyKey(r *http.Request) SomeType { - if rv := context.Get(r, mykey); rv != nil { - return rv.(SomeType) - } - return nil - } - - // SetMyKey sets a value for this package in the request values. - func SetMyKey(r *http.Request, val SomeType) { - context.Set(r, mykey, val) - } - -Variables must be cleared at the end of a request, to remove all values -that were stored. This can be done in an http.Handler, after a request was -served. Just call Clear() passing the request: - - context.Clear(r) - -...or use ClearHandler(), which conveniently wraps an http.Handler to clear -variables at the end of a request lifetime. - -The Routers from the packages gorilla/mux and gorilla/pat call Clear() -so if you are using either of them you don't need to clear the context manually. -*/ -package context diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/.travis.yml deleted file mode 100644 index 354b7f8b..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go - -go: - - 1.1 - - 1.2 - - 1.3 - - 1.4 - - tip diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/LICENSE deleted file mode 100644 index 66ea3c8a..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2013 The Gorilla Handlers Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/README.md b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/README.md deleted file mode 100644 index a340abe0..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/README.md +++ /dev/null @@ -1,52 +0,0 @@ -gorilla/handlers -================ -[![GoDoc](https://godoc.org/github.com/gorilla/handlers?status.svg)](https://godoc.org/github.com/gorilla/handlers) [![Build Status](https://travis-ci.org/gorilla/handlers.svg?branch=master)](https://travis-ci.org/gorilla/handlers) - -Package handlers is a collection of handlers (aka "HTTP middleware") for use -with Go's `net/http` package (or any framework supporting `http.Handler`), including: - -* `LoggingHandler` for logging HTTP requests in the Apache [Common Log - Format](http://httpd.apache.org/docs/2.2/logs.html#common). -* `CombinedLoggingHandler` for logging HTTP requests in the Apache [Combined Log - Format](http://httpd.apache.org/docs/2.2/logs.html#combined) commonly used by - both Apache and nginx. -* `CompressHandler` for gzipping responses. -* `ContentTypeHandler` for validating requests against a list of accepted - content types. -* `MethodHandler` for matching HTTP methods against handlers in a - `map[string]http.Handler` -* `ProxyHeaders` for populating `r.RemoteAddr` and `r.URL.Scheme` based on the - `X-Forwarded-For`, `X-Real-IP`, `X-Forwarded-Proto` and RFC7239 `Forwarded` - headers when running a Go server behind a HTTP reverse proxy. -* `CanonicalHost` for re-directing to the preferred host when handling multiple - domains (i.e. multiple CNAME aliases). - -Other handlers are documented [on the Gorilla -website](http://www.gorillatoolkit.org/pkg/handlers). - -## Example - -A simple example using `handlers.LoggingHandler` and `handlers.CompressHandler`: - -```go -import ( - "net/http" - "github.com/gorilla/handlers" -) - -func main() { - r := http.NewServeMux() - - // Only log requests to our admin dashboard to stdout - r.Handle("/admin", handlers.LoggingHandler(os.Stdout, http.HandlerFunc(ShowAdminDashboard))) - r.HandleFunc("/", ShowIndex) - - // Wrap our server with our gzip handler to gzip compress all responses. - http.ListenAndServe(":8000", handlers.CompressHandler(r)) -} -``` - -## License - -BSD licensed. See the included LICENSE file for details. - diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/canonical.go b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/canonical.go deleted file mode 100644 index 3961695c..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/canonical.go +++ /dev/null @@ -1,71 +0,0 @@ -package handlers - -import ( - "net/http" - "net/url" - "strings" -) - -type canonical struct { - h http.Handler - domain string - code int -} - -// CanonicalHost is HTTP middleware that re-directs requests to the canonical -// domain. It accepts a domain and a status code (e.g. 301 or 302) and -// re-directs clients to this domain. The existing request path is maintained. -// -// Note: If the provided domain is considered invalid by url.Parse or otherwise -// returns an empty scheme or host, clients are not re-directed. -// not re-directed. -// -// Example: -// -// r := mux.NewRouter() -// canonical := handlers.CanonicalHost("http://www.gorillatoolkit.org", 302) -// r.HandleFunc("/route", YourHandler) -// -// log.Fatal(http.ListenAndServe(":7000", canonical(r))) -// -func CanonicalHost(domain string, code int) func(h http.Handler) http.Handler { - fn := func(h http.Handler) http.Handler { - return canonical{h, domain, code} - } - - return fn -} - -func (c canonical) ServeHTTP(w http.ResponseWriter, r *http.Request) { - dest, err := url.Parse(c.domain) - if err != nil { - // Call the next handler if the provided domain fails to parse. - c.h.ServeHTTP(w, r) - return - } - - if dest.Scheme == "" || dest.Host == "" { - // Call the next handler if the scheme or host are empty. - // Note that url.Parse won't fail on in this case. - c.h.ServeHTTP(w, r) - return - } - - if !strings.EqualFold(cleanHost(r.Host), dest.Host) { - // Re-build the destination URL - dest := dest.Scheme + "://" + dest.Host + r.URL.Path - http.Redirect(w, r, dest, c.code) - } - - c.h.ServeHTTP(w, r) -} - -// cleanHost cleans invalid Host headers by stripping anything after '/' or ' '. -// This is backported from Go 1.5 (in response to issue #11206) and attempts to -// mitigate malformed Host headers that do not match the format in RFC7230. -func cleanHost(in string) string { - if i := strings.IndexAny(in, " /"); i != -1 { - return in[:i] - } - return in -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/compress.go b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/compress.go deleted file mode 100644 index 3d90e191..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/compress.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2013 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package handlers - -import ( - "compress/flate" - "compress/gzip" - "io" - "net/http" - "strings" -) - -type compressResponseWriter struct { - io.Writer - http.ResponseWriter - http.Hijacker -} - -func (w *compressResponseWriter) Header() http.Header { - return w.ResponseWriter.Header() -} - -func (w *compressResponseWriter) Write(b []byte) (int, error) { - h := w.ResponseWriter.Header() - if h.Get("Content-Type") == "" { - h.Set("Content-Type", http.DetectContentType(b)) - } - - return w.Writer.Write(b) -} - -// CompressHandler gzip compresses HTTP responses for clients that support it -// via the 'Accept-Encoding' header. -func CompressHandler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - L: - for _, enc := range strings.Split(r.Header.Get("Accept-Encoding"), ",") { - switch strings.TrimSpace(enc) { - case "gzip": - w.Header().Set("Content-Encoding", "gzip") - w.Header().Add("Vary", "Accept-Encoding") - - gw := gzip.NewWriter(w) - defer gw.Close() - - h, hok := w.(http.Hijacker) - if !hok { /* w is not Hijacker... oh well... */ - h = nil - } - - w = &compressResponseWriter{ - Writer: gw, - ResponseWriter: w, - Hijacker: h, - } - - break L - case "deflate": - w.Header().Set("Content-Encoding", "deflate") - w.Header().Add("Vary", "Accept-Encoding") - - fw, _ := flate.NewWriter(w, flate.DefaultCompression) - defer fw.Close() - - h, hok := w.(http.Hijacker) - if !hok { /* w is not Hijacker... oh well... */ - h = nil - } - - w = &compressResponseWriter{ - Writer: fw, - ResponseWriter: w, - Hijacker: h, - } - - break L - } - } - - h.ServeHTTP(w, r) - }) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/doc.go b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/doc.go deleted file mode 100644 index 944e5a8a..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -/* -Package handlers is a collection of handlers (aka "HTTP middleware") for use -with Go's net/http package (or any framework supporting http.Handler). - -The package includes handlers for logging in standardised formats, compressing -HTTP responses, validating content types and other useful tools for manipulating -requests and responses. -*/ -package handlers diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/handlers.go b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/handlers.go deleted file mode 100644 index c3c20e5b..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/handlers.go +++ /dev/null @@ -1,378 +0,0 @@ -// Copyright 2013 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package handlers - -import ( - "bufio" - "fmt" - "io" - "net" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// MethodHandler is an http.Handler that dispatches to a handler whose key in the MethodHandler's -// map matches the name of the HTTP request's method, eg: GET -// -// If the request's method is OPTIONS and OPTIONS is not a key in the map then the handler -// responds with a status of 200 and sets the Allow header to a comma-separated list of -// available methods. -// -// If the request's method doesn't match any of its keys the handler responds with -// a status of 405, Method not allowed and sets the Allow header to a comma-separated list -// of available methods. -type MethodHandler map[string]http.Handler - -func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if handler, ok := h[req.Method]; ok { - handler.ServeHTTP(w, req) - } else { - allow := []string{} - for k := range h { - allow = append(allow, k) - } - sort.Strings(allow) - w.Header().Set("Allow", strings.Join(allow, ", ")) - if req.Method == "OPTIONS" { - w.WriteHeader(http.StatusOK) - } else { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - } - } -} - -// loggingHandler is the http.Handler implementation for LoggingHandlerTo and its friends -type loggingHandler struct { - writer io.Writer - handler http.Handler -} - -// combinedLoggingHandler is the http.Handler implementation for LoggingHandlerTo and its friends -type combinedLoggingHandler struct { - writer io.Writer - handler http.Handler -} - -func (h loggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - t := time.Now() - logger := makeLogger(w) - url := *req.URL - h.handler.ServeHTTP(logger, req) - writeLog(h.writer, req, url, t, logger.Status(), logger.Size()) -} - -func (h combinedLoggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - t := time.Now() - logger := makeLogger(w) - url := *req.URL - h.handler.ServeHTTP(logger, req) - writeCombinedLog(h.writer, req, url, t, logger.Status(), logger.Size()) -} - -func makeLogger(w http.ResponseWriter) loggingResponseWriter { - var logger loggingResponseWriter = &responseLogger{w: w} - if _, ok := w.(http.Hijacker); ok { - logger = &hijackLogger{responseLogger{w: w}} - } - h, ok1 := logger.(http.Hijacker) - c, ok2 := w.(http.CloseNotifier) - if ok1 && ok2 { - return hijackCloseNotifier{logger, h, c} - } - if ok2 { - return &closeNotifyWriter{logger, c} - } - return logger -} - -type loggingResponseWriter interface { - http.ResponseWriter - http.Flusher - Status() int - Size() int -} - -// responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP status -// code and body size -type responseLogger struct { - w http.ResponseWriter - status int - size int -} - -func (l *responseLogger) Header() http.Header { - return l.w.Header() -} - -func (l *responseLogger) Write(b []byte) (int, error) { - if l.status == 0 { - // The status will be StatusOK if WriteHeader has not been called yet - l.status = http.StatusOK - } - size, err := l.w.Write(b) - l.size += size - return size, err -} - -func (l *responseLogger) WriteHeader(s int) { - l.w.WriteHeader(s) - l.status = s -} - -func (l *responseLogger) Status() int { - return l.status -} - -func (l *responseLogger) Size() int { - return l.size -} - -func (l *responseLogger) Flush() { - f, ok := l.w.(http.Flusher) - if ok { - f.Flush() - } -} - -type hijackLogger struct { - responseLogger -} - -func (l *hijackLogger) Hijack() (net.Conn, *bufio.ReadWriter, error) { - h := l.responseLogger.w.(http.Hijacker) - conn, rw, err := h.Hijack() - if err == nil && l.responseLogger.status == 0 { - // The status will be StatusSwitchingProtocols if there was no error and WriteHeader has not been called yet - l.responseLogger.status = http.StatusSwitchingProtocols - } - return conn, rw, err -} - -type closeNotifyWriter struct { - loggingResponseWriter - http.CloseNotifier -} - -type hijackCloseNotifier struct { - loggingResponseWriter - http.Hijacker - http.CloseNotifier -} - -const lowerhex = "0123456789abcdef" - -func appendQuoted(buf []byte, s string) []byte { - var runeTmp [utf8.UTFMax]byte - for width := 0; len(s) > 0; s = s[width:] { - r := rune(s[0]) - width = 1 - if r >= utf8.RuneSelf { - r, width = utf8.DecodeRuneInString(s) - } - if width == 1 && r == utf8.RuneError { - buf = append(buf, `\x`...) - buf = append(buf, lowerhex[s[0]>>4]) - buf = append(buf, lowerhex[s[0]&0xF]) - continue - } - if r == rune('"') || r == '\\' { // always backslashed - buf = append(buf, '\\') - buf = append(buf, byte(r)) - continue - } - if strconv.IsPrint(r) { - n := utf8.EncodeRune(runeTmp[:], r) - buf = append(buf, runeTmp[:n]...) - continue - } - switch r { - case '\a': - buf = append(buf, `\a`...) - case '\b': - buf = append(buf, `\b`...) - case '\f': - buf = append(buf, `\f`...) - case '\n': - buf = append(buf, `\n`...) - case '\r': - buf = append(buf, `\r`...) - case '\t': - buf = append(buf, `\t`...) - case '\v': - buf = append(buf, `\v`...) - default: - switch { - case r < ' ': - buf = append(buf, `\x`...) - buf = append(buf, lowerhex[s[0]>>4]) - buf = append(buf, lowerhex[s[0]&0xF]) - case r > utf8.MaxRune: - r = 0xFFFD - fallthrough - case r < 0x10000: - buf = append(buf, `\u`...) - for s := 12; s >= 0; s -= 4 { - buf = append(buf, lowerhex[r>>uint(s)&0xF]) - } - default: - buf = append(buf, `\U`...) - for s := 28; s >= 0; s -= 4 { - buf = append(buf, lowerhex[r>>uint(s)&0xF]) - } - } - } - } - return buf - -} - -// buildCommonLogLine builds a log entry for req in Apache Common Log Format. -// ts is the timestamp with which the entry should be logged. -// status and size are used to provide the response HTTP status and size. -func buildCommonLogLine(req *http.Request, url url.URL, ts time.Time, status int, size int) []byte { - username := "-" - if url.User != nil { - if name := url.User.Username(); name != "" { - username = name - } - } - - host, _, err := net.SplitHostPort(req.RemoteAddr) - - if err != nil { - host = req.RemoteAddr - } - - uri := url.RequestURI() - - buf := make([]byte, 0, 3*(len(host)+len(username)+len(req.Method)+len(uri)+len(req.Proto)+50)/2) - buf = append(buf, host...) - buf = append(buf, " - "...) - buf = append(buf, username...) - buf = append(buf, " ["...) - buf = append(buf, ts.Format("02/Jan/2006:15:04:05 -0700")...) - buf = append(buf, `] "`...) - buf = append(buf, req.Method...) - buf = append(buf, " "...) - buf = appendQuoted(buf, uri) - buf = append(buf, " "...) - buf = append(buf, req.Proto...) - buf = append(buf, `" `...) - buf = append(buf, strconv.Itoa(status)...) - buf = append(buf, " "...) - buf = append(buf, strconv.Itoa(size)...) - return buf -} - -// writeLog writes a log entry for req to w in Apache Common Log Format. -// ts is the timestamp with which the entry should be logged. -// status and size are used to provide the response HTTP status and size. -func writeLog(w io.Writer, req *http.Request, url url.URL, ts time.Time, status, size int) { - buf := buildCommonLogLine(req, url, ts, status, size) - buf = append(buf, '\n') - w.Write(buf) -} - -// writeCombinedLog writes a log entry for req to w in Apache Combined Log Format. -// ts is the timestamp with which the entry should be logged. -// status and size are used to provide the response HTTP status and size. -func writeCombinedLog(w io.Writer, req *http.Request, url url.URL, ts time.Time, status, size int) { - buf := buildCommonLogLine(req, url, ts, status, size) - buf = append(buf, ` "`...) - buf = appendQuoted(buf, req.Referer()) - buf = append(buf, `" "`...) - buf = appendQuoted(buf, req.UserAgent()) - buf = append(buf, '"', '\n') - w.Write(buf) -} - -// CombinedLoggingHandler return a http.Handler that wraps h and logs requests to out in -// Apache Combined Log Format. -// -// See http://httpd.apache.org/docs/2.2/logs.html#combined for a description of this format. -// -// LoggingHandler always sets the ident field of the log to - -func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler { - return combinedLoggingHandler{out, h} -} - -// LoggingHandler return a http.Handler that wraps h and logs requests to out in -// Apache Common Log Format (CLF). -// -// See http://httpd.apache.org/docs/2.2/logs.html#common for a description of this format. -// -// LoggingHandler always sets the ident field of the log to - -func LoggingHandler(out io.Writer, h http.Handler) http.Handler { - return loggingHandler{out, h} -} - -// isContentType validates the Content-Type header -// is contentType. That is, its type and subtype match. -func isContentType(h http.Header, contentType string) bool { - ct := h.Get("Content-Type") - if i := strings.IndexRune(ct, ';'); i != -1 { - ct = ct[0:i] - } - return ct == contentType -} - -// ContentTypeHandler wraps and returns a http.Handler, validating the request content type -// is acompatible with the contentTypes list. -// It writes a HTTP 415 error if that fails. -// -// Only PUT, POST, and PATCH requests are considered. -func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !(r.Method == "PUT" || r.Method == "POST" || r.Method == "PATCH") { - h.ServeHTTP(w, r) - return - } - - for _, ct := range contentTypes { - if isContentType(r.Header, ct) { - h.ServeHTTP(w, r) - return - } - } - http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q", r.Header.Get("Content-Type"), contentTypes), http.StatusUnsupportedMediaType) - }) -} - -const ( - // HTTPMethodOverrideHeader is a commonly used - // http header to override a request method. - HTTPMethodOverrideHeader = "X-HTTP-Method-Override" - // HTTPMethodOverrideFormKey is a commonly used - // HTML form key to override a request method. - HTTPMethodOverrideFormKey = "_method" -) - -// HTTPMethodOverrideHandler wraps and returns a http.Handler which checks for the X-HTTP-Method-Override header -// or the _method form key, and overrides (if valid) request.Method with its value. -// -// This is especially useful for http clients that don't support many http verbs. -// It isn't secure to override e.g a GET to a POST, so only POST requests are considered. -// Likewise, the override method can only be a "write" method: PUT, PATCH or DELETE. -// -// Form method takes precedence over header method. -func HTTPMethodOverrideHandler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method == "POST" { - om := r.FormValue(HTTPMethodOverrideFormKey) - if om == "" { - om = r.Header.Get(HTTPMethodOverrideHeader) - } - if om == "PUT" || om == "PATCH" || om == "DELETE" { - r.Method = om - } - } - h.ServeHTTP(w, r) - }) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/proxy_headers.go b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/proxy_headers.go deleted file mode 100644 index 268de9c6..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/handlers/proxy_headers.go +++ /dev/null @@ -1,113 +0,0 @@ -package handlers - -import ( - "net/http" - "regexp" - "strings" -) - -var ( - // De-facto standard header keys. - xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For") - xRealIP = http.CanonicalHeaderKey("X-Real-IP") - xForwardedProto = http.CanonicalHeaderKey("X-Forwarded-Scheme") -) - -var ( - // RFC7239 defines a new "Forwarded: " header designed to replace the - // existing use of X-Forwarded-* headers. - // e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43 - forwarded = http.CanonicalHeaderKey("Forwarded") - // Allows for a sub-match of the first value after 'for=' to the next - // comma, semi-colon or space. The match is case-insensitive. - forRegex = regexp.MustCompile(`(?i)(?:for=)([^(;|,| )]+)`) - // Allows for a sub-match for the first instance of scheme (http|https) - // prefixed by 'proto='. The match is case-insensitive. - protoRegex = regexp.MustCompile(`(?i)(?:proto=)(https|http)`) -) - -// ProxyHeaders inspects common reverse proxy headers and sets the corresponding -// fields in the HTTP request struct. These are X-Forwarded-For and X-Real-IP -// for the remote (client) IP address, X-Forwarded-Proto for the scheme -// (http|https) and the RFC7239 Forwarded header, which may include both client -// IPs and schemes. -// -// NOTE: This middleware should only be used when behind a reverse -// proxy like nginx, HAProxy or Apache. Reverse proxies that don't (or are -// configured not to) strip these headers from client requests, or where these -// headers are accepted "as is" from a remote client (e.g. when Go is not behind -// a proxy), can manifest as a vulnerability if your application uses these -// headers for validating the 'trustworthiness' of a request. -func ProxyHeaders(h http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - // Set the remote IP with the value passed from the proxy. - if fwd := getIP(r); fwd != "" { - r.RemoteAddr = fwd - } - - // Set the scheme (proto) with the value passed from the proxy. - if scheme := getScheme(r); scheme != "" { - r.URL.Scheme = scheme - } - - // Call the next handler in the chain. - h.ServeHTTP(w, r) - } - - return http.HandlerFunc(fn) -} - -// getIP retrieves the IP from the X-Forwarded-For, X-Real-IP and RFC7239 -// Forwarded headers (in that order). -func getIP(r *http.Request) string { - var addr string - - if fwd := r.Header.Get(xForwardedFor); fwd != "" { - // Only grab the first (client) address. Note that '192.168.0.1, - // 10.1.1.1' is a valid key for X-Forwarded-For where addresses after - // the first may represent forwarding proxies earlier in the chain. - s := strings.Index(fwd, ", ") - if s == -1 { - s = len(fwd) - } - addr = fwd[:s] - } else if fwd := r.Header.Get(xRealIP); fwd != "" { - // X-Real-IP should only contain one IP address (the client making the - // request). - addr = fwd - } else if fwd := r.Header.Get(forwarded); fwd != "" { - // match should contain at least two elements if the protocol was - // specified in the Forwarded header. The first element will always be - // the 'for=' capture, which we ignore. In the case of multiple IP - // addresses (for=8.8.8.8, 8.8.4.4,172.16.1.20 is valid) we only - // extract the first, which should be the client IP. - if match := forRegex.FindStringSubmatch(fwd); len(match) > 1 { - // IPv6 addresses in Forwarded headers are quoted-strings. We strip - // these quotes. - addr = strings.Trim(match[1], `"`) - } - } - - return addr -} - -// getScheme retrieves the scheme from the X-Forwarded-Proto and RFC7239 -// Forwarded headers (in that order). -func getScheme(r *http.Request) string { - var scheme string - - // Retrieve the scheme from X-Forwarded-Proto. - if proto := r.Header.Get(xForwardedProto); proto != "" { - scheme = strings.ToLower(proto) - } else if proto := r.Header.Get(forwarded); proto != "" { - // match should contain at least two elements if the protocol was - // specified in the Forwarded header. The first element will always be - // the 'proto=' capture, which we ignore. In the case of multiple proto - // parameters (invalid) we only extract the first. - if match := protoRegex.FindStringSubmatch(proto); len(match) > 1 { - scheme = strings.ToLower(match[1]) - } - } - - return scheme -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/.travis.yml deleted file mode 100644 index d87d4657..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - 1.2 - - tip diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/LICENSE deleted file mode 100644 index 0e5fb872..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/README.md deleted file mode 100644 index e60301b0..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/README.md +++ /dev/null @@ -1,7 +0,0 @@ -mux -=== -[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) - -gorilla/mux is a powerful URL router and dispatcher. - -Read the full documentation here: http://www.gorillatoolkit.org/pkg/mux diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/doc.go deleted file mode 100644 index b2deed34..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/doc.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package gorilla/mux implements a request router and dispatcher. - -The name mux stands for "HTTP request multiplexer". Like the standard -http.ServeMux, mux.Router matches incoming requests against a list of -registered routes and calls a handler for the route that matches the URL -or other conditions. The main features are: - - * Requests can be matched based on URL host, path, path prefix, schemes, - header and query values, HTTP methods or using custom matchers. - * URL hosts and paths can have variables with an optional regular - expression. - * Registered URLs can be built, or "reversed", which helps maintaining - references to resources. - * Routes can be used as subrouters: nested routes are only tested if the - parent route matches. This is useful to define groups of routes that - share common conditions like a host, a path prefix or other repeated - attributes. As a bonus, this optimizes request matching. - * It implements the http.Handler interface so it is compatible with the - standard http.ServeMux. - -Let's start registering a couple of URL paths and handlers: - - func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) - } - -Here we register three routes mapping URL paths to handlers. This is -equivalent to how http.HandleFunc() works: if an incoming request URL matches -one of the paths, the corresponding handler is called passing -(http.ResponseWriter, *http.Request) as parameters. - -Paths can have variables. They are defined using the format {name} or -{name:pattern}. If a regular expression pattern is not defined, the matched -variable will be anything until the next slash. For example: - - r := mux.NewRouter() - r.HandleFunc("/products/{key}", ProductHandler) - r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) - -The names are used to create a map of route variables which can be retrieved -calling mux.Vars(): - - vars := mux.Vars(request) - category := vars["category"] - -And this is all you need to know about the basic usage. More advanced options -are explained below. - -Routes can also be restricted to a domain or subdomain. Just define a host -pattern to be matched. They can also have variables: - - r := mux.NewRouter() - // Only matches if domain is "www.domain.com". - r.Host("www.domain.com") - // Matches a dynamic subdomain. - r.Host("{subdomain:[a-z]+}.domain.com") - -There are several other matchers that can be added. To match path prefixes: - - r.PathPrefix("/products/") - -...or HTTP methods: - - r.Methods("GET", "POST") - -...or URL schemes: - - r.Schemes("https") - -...or header values: - - r.Headers("X-Requested-With", "XMLHttpRequest") - -...or query values: - - r.Queries("key", "value") - -...or to use a custom matcher function: - - r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 - }) - -...and finally, it is possible to combine several matchers in a single route: - - r.HandleFunc("/products", ProductsHandler). - Host("www.domain.com"). - Methods("GET"). - Schemes("http") - -Setting the same matching conditions again and again can be boring, so we have -a way to group several routes that share the same requirements. -We call it "subrouting". - -For example, let's say we have several URLs that should only match when the -host is "www.domain.com". Create a route for that host and get a "subrouter" -from it: - - r := mux.NewRouter() - s := r.Host("www.domain.com").Subrouter() - -Then register routes in the subrouter: - - s.HandleFunc("/products/", ProductsHandler) - s.HandleFunc("/products/{key}", ProductHandler) - s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) - -The three URL paths we registered above will only be tested if the domain is -"www.domain.com", because the subrouter is tested first. This is not -only convenient, but also optimizes request matching. You can create -subrouters combining any attribute matchers accepted by a route. - -Subrouters can be used to create domain or path "namespaces": you define -subrouters in a central place and then parts of the app can register its -paths relatively to a given subrouter. - -There's one more thing about subroutes. When a subrouter has a path prefix, -the inner routes use it as base for their paths: - - r := mux.NewRouter() - s := r.PathPrefix("/products").Subrouter() - // "/products/" - s.HandleFunc("/", ProductsHandler) - // "/products/{key}/" - s.HandleFunc("/{key}/", ProductHandler) - // "/products/{key}/details" - s.HandleFunc("/{key}/details", ProductDetailsHandler) - -Now let's see how to build registered URLs. - -Routes can be named. All routes that define a name can have their URLs built, -or "reversed". We define a name calling Name() on a route. For example: - - r := mux.NewRouter() - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") - -To build a URL, get the route and call the URL() method, passing a sequence of -key/value pairs for the route variables. For the previous route, we would do: - - url, err := r.Get("article").URL("category", "technology", "id", "42") - -...and the result will be a url.URL with the following path: - - "/articles/technology/42" - -This also works for host variables: - - r := mux.NewRouter() - r.Host("{subdomain}.domain.com"). - Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // url.String() will be "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") - -All variables defined in the route are required, and their values must -conform to the corresponding patterns. These requirements guarantee that a -generated URL will always match a registered route -- the only exception is -for explicitly defined "build-only" routes which never match. - -There's also a way to build only the URL host or path for a route: -use the methods URLHost() or URLPath() instead. For the previous route, -we would do: - - // "http://news.domain.com/" - host, err := r.Get("article").URLHost("subdomain", "news") - - // "/articles/technology/42" - path, err := r.Get("article").URLPath("category", "technology", "id", "42") - -And if you use subrouters, host and path defined separately can be built -as well: - - r := mux.NewRouter() - s := r.Host("{subdomain}.domain.com").Subrouter() - s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") -*/ -package mux diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/mux.go deleted file mode 100644 index 5b5f8e7d..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/mux.go +++ /dev/null @@ -1,353 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "fmt" - "net/http" - "path" - - "github.com/gorilla/context" -) - -// NewRouter returns a new router instance. -func NewRouter() *Router { - return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} -} - -// Router registers routes to be matched and dispatches a handler. -// -// It implements the http.Handler interface, so it can be registered to serve -// requests: -// -// var router = mux.NewRouter() -// -// func main() { -// http.Handle("/", router) -// } -// -// Or, for Google App Engine, register it in a init() function: -// -// func init() { -// http.Handle("/", router) -// } -// -// This will send all incoming requests to the router. -type Router struct { - // Configurable Handler to be used when no route matches. - NotFoundHandler http.Handler - // Parent route, if this is a subrouter. - parent parentRoute - // Routes to be matched, in order. - routes []*Route - // Routes by name for URL building. - namedRoutes map[string]*Route - // See Router.StrictSlash(). This defines the flag for new routes. - strictSlash bool - // If true, do not clear the request context after handling the request - KeepContext bool -} - -// Match matches registered routes against the request. -func (r *Router) Match(req *http.Request, match *RouteMatch) bool { - for _, route := range r.routes { - if route.Match(req, match) { - return true - } - } - return false -} - -// ServeHTTP dispatches the handler registered in the matched route. -// -// When there is a match, the route variables can be retrieved calling -// mux.Vars(request). -func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { - // Clean path to canonical form and redirect. - if p := cleanPath(req.URL.Path); p != req.URL.Path { - - // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query. - // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: - // http://code.google.com/p/go/issues/detail?id=5252 - url := *req.URL - url.Path = p - p = url.String() - - w.Header().Set("Location", p) - w.WriteHeader(http.StatusMovedPermanently) - return - } - var match RouteMatch - var handler http.Handler - if r.Match(req, &match) { - handler = match.Handler - setVars(req, match.Vars) - setCurrentRoute(req, match.Route) - } - if handler == nil { - handler = r.NotFoundHandler - if handler == nil { - handler = http.NotFoundHandler() - } - } - if !r.KeepContext { - defer context.Clear(req) - } - handler.ServeHTTP(w, req) -} - -// Get returns a route registered with the given name. -func (r *Router) Get(name string) *Route { - return r.getNamedRoutes()[name] -} - -// GetRoute returns a route registered with the given name. This method -// was renamed to Get() and remains here for backwards compatibility. -func (r *Router) GetRoute(name string) *Route { - return r.getNamedRoutes()[name] -} - -// StrictSlash defines the trailing slash behavior for new routes. The initial -// value is false. -// -// When true, if the route path is "/path/", accessing "/path" will redirect -// to the former and vice versa. In other words, your application will always -// see the path as specified in the route. -// -// When false, if the route path is "/path", accessing "/path/" will not match -// this route and vice versa. -// -// Special case: when a route sets a path prefix using the PathPrefix() method, -// strict slash is ignored for that route because the redirect behavior can't -// be determined from a prefix alone. However, any subrouters created from that -// route inherit the original StrictSlash setting. -func (r *Router) StrictSlash(value bool) *Router { - r.strictSlash = value - return r -} - -// ---------------------------------------------------------------------------- -// parentRoute -// ---------------------------------------------------------------------------- - -// getNamedRoutes returns the map where named routes are registered. -func (r *Router) getNamedRoutes() map[string]*Route { - if r.namedRoutes == nil { - if r.parent != nil { - r.namedRoutes = r.parent.getNamedRoutes() - } else { - r.namedRoutes = make(map[string]*Route) - } - } - return r.namedRoutes -} - -// getRegexpGroup returns regexp definitions from the parent route, if any. -func (r *Router) getRegexpGroup() *routeRegexpGroup { - if r.parent != nil { - return r.parent.getRegexpGroup() - } - return nil -} - -// ---------------------------------------------------------------------------- -// Route factories -// ---------------------------------------------------------------------------- - -// NewRoute registers an empty route. -func (r *Router) NewRoute() *Route { - route := &Route{parent: r, strictSlash: r.strictSlash} - r.routes = append(r.routes, route) - return route -} - -// Handle registers a new route with a matcher for the URL path. -// See Route.Path() and Route.Handler(). -func (r *Router) Handle(path string, handler http.Handler) *Route { - return r.NewRoute().Path(path).Handler(handler) -} - -// HandleFunc registers a new route with a matcher for the URL path. -// See Route.Path() and Route.HandlerFunc(). -func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, - *http.Request)) *Route { - return r.NewRoute().Path(path).HandlerFunc(f) -} - -// Headers registers a new route with a matcher for request header values. -// See Route.Headers(). -func (r *Router) Headers(pairs ...string) *Route { - return r.NewRoute().Headers(pairs...) -} - -// Host registers a new route with a matcher for the URL host. -// See Route.Host(). -func (r *Router) Host(tpl string) *Route { - return r.NewRoute().Host(tpl) -} - -// MatcherFunc registers a new route with a custom matcher function. -// See Route.MatcherFunc(). -func (r *Router) MatcherFunc(f MatcherFunc) *Route { - return r.NewRoute().MatcherFunc(f) -} - -// Methods registers a new route with a matcher for HTTP methods. -// See Route.Methods(). -func (r *Router) Methods(methods ...string) *Route { - return r.NewRoute().Methods(methods...) -} - -// Path registers a new route with a matcher for the URL path. -// See Route.Path(). -func (r *Router) Path(tpl string) *Route { - return r.NewRoute().Path(tpl) -} - -// PathPrefix registers a new route with a matcher for the URL path prefix. -// See Route.PathPrefix(). -func (r *Router) PathPrefix(tpl string) *Route { - return r.NewRoute().PathPrefix(tpl) -} - -// Queries registers a new route with a matcher for URL query values. -// See Route.Queries(). -func (r *Router) Queries(pairs ...string) *Route { - return r.NewRoute().Queries(pairs...) -} - -// Schemes registers a new route with a matcher for URL schemes. -// See Route.Schemes(). -func (r *Router) Schemes(schemes ...string) *Route { - return r.NewRoute().Schemes(schemes...) -} - -// ---------------------------------------------------------------------------- -// Context -// ---------------------------------------------------------------------------- - -// RouteMatch stores information about a matched route. -type RouteMatch struct { - Route *Route - Handler http.Handler - Vars map[string]string -} - -type contextKey int - -const ( - varsKey contextKey = iota - routeKey -) - -// Vars returns the route variables for the current request, if any. -func Vars(r *http.Request) map[string]string { - if rv := context.Get(r, varsKey); rv != nil { - return rv.(map[string]string) - } - return nil -} - -// CurrentRoute returns the matched route for the current request, if any. -func CurrentRoute(r *http.Request) *Route { - if rv := context.Get(r, routeKey); rv != nil { - return rv.(*Route) - } - return nil -} - -func setVars(r *http.Request, val interface{}) { - context.Set(r, varsKey, val) -} - -func setCurrentRoute(r *http.Request, val interface{}) { - context.Set(r, routeKey, val) -} - -// ---------------------------------------------------------------------------- -// Helpers -// ---------------------------------------------------------------------------- - -// cleanPath returns the canonical path for p, eliminating . and .. elements. -// Borrowed from the net/http package. -func cleanPath(p string) string { - if p == "" { - return "/" - } - if p[0] != '/' { - p = "/" + p - } - np := path.Clean(p) - // path.Clean removes trailing slash except for root; - // put the trailing slash back if necessary. - if p[len(p)-1] == '/' && np != "/" { - np += "/" - } - return np -} - -// uniqueVars returns an error if two slices contain duplicated strings. -func uniqueVars(s1, s2 []string) error { - for _, v1 := range s1 { - for _, v2 := range s2 { - if v1 == v2 { - return fmt.Errorf("mux: duplicated route variable %q", v2) - } - } - } - return nil -} - -// mapFromPairs converts variadic string parameters to a string map. -func mapFromPairs(pairs ...string) (map[string]string, error) { - length := len(pairs) - if length%2 != 0 { - return nil, fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - } - m := make(map[string]string, length/2) - for i := 0; i < length; i += 2 { - m[pairs[i]] = pairs[i+1] - } - return m, nil -} - -// matchInArray returns true if the given string value is in the array. -func matchInArray(arr []string, value string) bool { - for _, v := range arr { - if v == value { - return true - } - } - return false -} - -// matchMap returns true if the given key/value pairs exist in a given map. -func matchMap(toCheck map[string]string, toMatch map[string][]string, - canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != "" { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v == value { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/regexp.go deleted file mode 100644 index a6305483..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/regexp.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bytes" - "fmt" - "net/http" - "net/url" - "regexp" - "strings" -) - -// newRouteRegexp parses a route template and returns a routeRegexp, -// used to match a host, a path or a query string. -// -// It will extract named variables, assemble a regexp to be matched, create -// a "reverse" template to build URLs and compile regexps to validate variable -// values used in URL building. -// -// Previously we accepted only Python-like identifiers for variable -// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that -// name and pattern can't be empty, and names can't contain a colon. -func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) { - // Check if it is well-formed. - idxs, errBraces := braceIndices(tpl) - if errBraces != nil { - return nil, errBraces - } - // Backup the original. - template := tpl - // Now let's parse it. - defaultPattern := "[^/]+" - if matchQuery { - defaultPattern = "[^?&]+" - matchPrefix = true - } else if matchHost { - defaultPattern = "[^.]+" - matchPrefix = false - } - // Only match strict slash if not matching - if matchPrefix || matchHost || matchQuery { - strictSlash = false - } - // Set a flag for strictSlash. - endSlash := false - if strictSlash && strings.HasSuffix(tpl, "/") { - tpl = tpl[:len(tpl)-1] - endSlash = true - } - varsN := make([]string, len(idxs)/2) - varsR := make([]*regexp.Regexp, len(idxs)/2) - pattern := bytes.NewBufferString("") - if !matchQuery { - pattern.WriteByte('^') - } - reverse := bytes.NewBufferString("") - var end int - var err error - for i := 0; i < len(idxs); i += 2 { - // Set all values we are interested in. - raw := tpl[end:idxs[i]] - end = idxs[i+1] - parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) - name := parts[0] - patt := defaultPattern - if len(parts) == 2 { - patt = parts[1] - } - // Name or pattern can't be empty. - if name == "" || patt == "" { - return nil, fmt.Errorf("mux: missing name or pattern in %q", - tpl[idxs[i]:end]) - } - // Build the regexp pattern. - fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), patt) - // Build the reverse template. - fmt.Fprintf(reverse, "%s%%s", raw) - // Append variable name and compiled pattern. - varsN[i/2] = name - varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) - if err != nil { - return nil, err - } - } - // Add the remaining. - raw := tpl[end:] - pattern.WriteString(regexp.QuoteMeta(raw)) - if strictSlash { - pattern.WriteString("[/]?") - } - if !matchPrefix { - pattern.WriteByte('$') - } - reverse.WriteString(raw) - if endSlash { - reverse.WriteByte('/') - } - // Compile full regexp. - reg, errCompile := regexp.Compile(pattern.String()) - if errCompile != nil { - return nil, errCompile - } - // Done! - return &routeRegexp{ - template: template, - matchHost: matchHost, - matchQuery: matchQuery, - strictSlash: strictSlash, - regexp: reg, - reverse: reverse.String(), - varsN: varsN, - varsR: varsR, - }, nil -} - -// routeRegexp stores a regexp to match a host or path and information to -// collect and validate route variables. -type routeRegexp struct { - // The unmodified template. - template string - // True for host match, false for path or query string match. - matchHost bool - // True for query string match, false for path and host match. - matchQuery bool - // The strictSlash value defined on the route, but disabled if PathPrefix was used. - strictSlash bool - // Expanded regexp. - regexp *regexp.Regexp - // Reverse template. - reverse string - // Variable names. - varsN []string - // Variable regexps (validators). - varsR []*regexp.Regexp -} - -// Match matches the regexp against the URL host or path. -func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { - if !r.matchHost { - if r.matchQuery { - return r.regexp.MatchString(req.URL.RawQuery) - } else { - return r.regexp.MatchString(req.URL.Path) - } - } - return r.regexp.MatchString(getHost(req)) -} - -// url builds a URL part using the given values. -func (r *routeRegexp) url(pairs ...string) (string, error) { - values, err := mapFromPairs(pairs...) - if err != nil { - return "", err - } - urlValues := make([]interface{}, len(r.varsN)) - for k, v := range r.varsN { - value, ok := values[v] - if !ok { - return "", fmt.Errorf("mux: missing route variable %q", v) - } - urlValues[k] = value - } - rv := fmt.Sprintf(r.reverse, urlValues...) - if !r.regexp.MatchString(rv) { - // The URL is checked against the full regexp, instead of checking - // individual variables. This is faster but to provide a good error - // message, we check individual regexps if the URL doesn't match. - for k, v := range r.varsN { - if !r.varsR[k].MatchString(values[v]) { - return "", fmt.Errorf( - "mux: variable %q doesn't match, expected %q", values[v], - r.varsR[k].String()) - } - } - } - return rv, nil -} - -// braceIndices returns the first level curly brace indices from a string. -// It returns an error in case of unbalanced braces. -func braceIndices(s string) ([]int, error) { - var level, idx int - idxs := make([]int, 0) - for i := 0; i < len(s); i++ { - switch s[i] { - case '{': - if level++; level == 1 { - idx = i - } - case '}': - if level--; level == 0 { - idxs = append(idxs, idx, i+1) - } else if level < 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - } - } - if level != 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - return idxs, nil -} - -// ---------------------------------------------------------------------------- -// routeRegexpGroup -// ---------------------------------------------------------------------------- - -// routeRegexpGroup groups the route matchers that carry variables. -type routeRegexpGroup struct { - host *routeRegexp - path *routeRegexp - queries []*routeRegexp -} - -// setMatch extracts the variables from the URL once a route matches. -func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { - // Store host variables. - if v.host != nil { - hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) - if hostVars != nil { - for k, v := range v.host.varsN { - m.Vars[v] = hostVars[k+1] - } - } - } - // Store path variables. - if v.path != nil { - pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) - if pathVars != nil { - for k, v := range v.path.varsN { - m.Vars[v] = pathVars[k+1] - } - // Check if we should redirect. - if v.path.strictSlash { - p1 := strings.HasSuffix(req.URL.Path, "/") - p2 := strings.HasSuffix(v.path.template, "/") - if p1 != p2 { - u, _ := url.Parse(req.URL.String()) - if p1 { - u.Path = u.Path[:len(u.Path)-1] - } else { - u.Path += "/" - } - m.Handler = http.RedirectHandler(u.String(), 301) - } - } - } - } - // Store query string variables. - rawQuery := req.URL.RawQuery - for _, q := range v.queries { - queryVars := q.regexp.FindStringSubmatch(rawQuery) - if queryVars != nil { - for k, v := range q.varsN { - m.Vars[v] = queryVars[k+1] - } - } - } -} - -// getHost tries its best to return the request host. -func getHost(r *http.Request) string { - if r.URL.IsAbs() { - return r.URL.Host - } - host := r.Host - // Slice off any port information. - if i := strings.Index(host, ":"); i != -1 { - host = host[:i] - } - return host - -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/route.go deleted file mode 100644 index c310e66b..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/gorilla/mux/route.go +++ /dev/null @@ -1,524 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "strings" -) - -// Route stores information to match a request and build URLs. -type Route struct { - // Parent where the route was registered (a Router). - parent parentRoute - // Request handler for the route. - handler http.Handler - // List of matchers. - matchers []matcher - // Manager for the variables from host and path. - regexp *routeRegexpGroup - // If true, when the path pattern is "/path/", accessing "/path" will - // redirect to the former and vice versa. - strictSlash bool - // If true, this route never matches: it is only used to build URLs. - buildOnly bool - // The name used to build URLs. - name string - // Error resulted from building a route. - err error -} - -// Match matches the route against the request. -func (r *Route) Match(req *http.Request, match *RouteMatch) bool { - if r.buildOnly || r.err != nil { - return false - } - // Match everything. - for _, m := range r.matchers { - if matched := m.Match(req, match); !matched { - return false - } - } - // Yay, we have a match. Let's collect some info about it. - if match.Route == nil { - match.Route = r - } - if match.Handler == nil { - match.Handler = r.handler - } - if match.Vars == nil { - match.Vars = make(map[string]string) - } - // Set variables. - if r.regexp != nil { - r.regexp.setMatch(req, match, r) - } - return true -} - -// ---------------------------------------------------------------------------- -// Route attributes -// ---------------------------------------------------------------------------- - -// GetError returns an error resulted from building the route, if any. -func (r *Route) GetError() error { - return r.err -} - -// BuildOnly sets the route to never match: it is only used to build URLs. -func (r *Route) BuildOnly() *Route { - r.buildOnly = true - return r -} - -// Handler -------------------------------------------------------------------- - -// Handler sets a handler for the route. -func (r *Route) Handler(handler http.Handler) *Route { - if r.err == nil { - r.handler = handler - } - return r -} - -// HandlerFunc sets a handler function for the route. -func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { - return r.Handler(http.HandlerFunc(f)) -} - -// GetHandler returns the handler for the route, if any. -func (r *Route) GetHandler() http.Handler { - return r.handler -} - -// Name ----------------------------------------------------------------------- - -// Name sets the name for the route, used to build URLs. -// If the name was registered already it will be overwritten. -func (r *Route) Name(name string) *Route { - if r.name != "" { - r.err = fmt.Errorf("mux: route already has name %q, can't set %q", - r.name, name) - } - if r.err == nil { - r.name = name - r.getNamedRoutes()[name] = r - } - return r -} - -// GetName returns the name for the route, if any. -func (r *Route) GetName() string { - return r.name -} - -// ---------------------------------------------------------------------------- -// Matchers -// ---------------------------------------------------------------------------- - -// matcher types try to match a request. -type matcher interface { - Match(*http.Request, *RouteMatch) bool -} - -// addMatcher adds a matcher to the route. -func (r *Route) addMatcher(m matcher) *Route { - if r.err == nil { - r.matchers = append(r.matchers, m) - } - return r -} - -// addRegexpMatcher adds a host or path matcher and builder to a route. -func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error { - if r.err != nil { - return r.err - } - r.regexp = r.getRegexpGroup() - if !matchHost && !matchQuery { - if len(tpl) == 0 || tpl[0] != '/' { - return fmt.Errorf("mux: path must start with a slash, got %q", tpl) - } - if r.regexp.path != nil { - tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl - } - } - rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash) - if err != nil { - return err - } - for _, q := range r.regexp.queries { - if err = uniqueVars(rr.varsN, q.varsN); err != nil { - return err - } - } - if matchHost { - if r.regexp.path != nil { - if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { - return err - } - } - r.regexp.host = rr - } else { - if r.regexp.host != nil { - if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { - return err - } - } - if matchQuery { - r.regexp.queries = append(r.regexp.queries, rr) - } else { - r.regexp.path = rr - } - } - r.addMatcher(rr) - return nil -} - -// Headers -------------------------------------------------------------------- - -// headerMatcher matches the request against header values. -type headerMatcher map[string]string - -func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMap(m, r.Header, true) -} - -// Headers adds a matcher for request header values. -// It accepts a sequence of key/value pairs to be matched. For example: -// -// r := mux.NewRouter() -// r.Headers("Content-Type", "application/json", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both request header values match. -// -// It the value is an empty string, it will match any value if the key is set. -func (r *Route) Headers(pairs ...string) *Route { - if r.err == nil { - var headers map[string]string - headers, r.err = mapFromPairs(pairs...) - return r.addMatcher(headerMatcher(headers)) - } - return r -} - -// Host ----------------------------------------------------------------------- - -// Host adds a matcher for the URL host. -// It accepts a template with zero or more URL variables enclosed by {}. -// Variables can define an optional regexp pattern to me matched: -// -// - {name} matches anything until the next dot. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Host("www.domain.com") -// r.Host("{subdomain}.domain.com") -// r.Host("{subdomain:[a-z]+}.domain.com") -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Host(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, true, false, false) - return r -} - -// MatcherFunc ---------------------------------------------------------------- - -// MatcherFunc is the function signature used by custom matchers. -type MatcherFunc func(*http.Request, *RouteMatch) bool - -func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { - return m(r, match) -} - -// MatcherFunc adds a custom function to be used as request matcher. -func (r *Route) MatcherFunc(f MatcherFunc) *Route { - return r.addMatcher(f) -} - -// Methods -------------------------------------------------------------------- - -// methodMatcher matches the request against HTTP methods. -type methodMatcher []string - -func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.Method) -} - -// Methods adds a matcher for HTTP methods. -// It accepts a sequence of one or more methods to be matched, e.g.: -// "GET", "POST", "PUT". -func (r *Route) Methods(methods ...string) *Route { - for k, v := range methods { - methods[k] = strings.ToUpper(v) - } - return r.addMatcher(methodMatcher(methods)) -} - -// Path ----------------------------------------------------------------------- - -// Path adds a matcher for the URL path. -// It accepts a template with zero or more URL variables enclosed by {}. The -// template must start with a "/". -// Variables can define an optional regexp pattern to me matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Path("/products/").Handler(ProductsHandler) -// r.Path("/products/{key}").Handler(ProductsHandler) -// r.Path("/articles/{category}/{id:[0-9]+}"). -// Handler(ArticleHandler) -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Path(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, false, false, false) - return r -} - -// PathPrefix ----------------------------------------------------------------- - -// PathPrefix adds a matcher for the URL path prefix. This matches if the given -// template is a prefix of the full URL path. See Route.Path() for details on -// the tpl argument. -// -// Note that it does not treat slashes specially ("/foobar/" will be matched by -// the prefix "/foo") so you may want to use a trailing slash here. -// -// Also note that the setting of Router.StrictSlash() has no effect on routes -// with a PathPrefix matcher. -func (r *Route) PathPrefix(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, false, true, false) - return r -} - -// Query ---------------------------------------------------------------------- - -// Queries adds a matcher for URL query values. -// It accepts a sequence of key/value pairs. Values may define variables. -// For example: -// -// r := mux.NewRouter() -// r.Queries("foo", "bar", "id", "{id:[0-9]+}") -// -// The above route will only match if the URL contains the defined queries -// values, e.g.: ?foo=bar&id=42. -// -// It the value is an empty string, it will match any value if the key is set. -// -// Variables can define an optional regexp pattern to me matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -func (r *Route) Queries(pairs ...string) *Route { - length := len(pairs) - if length%2 != 0 { - r.err = fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - return nil - } - for i := 0; i < length; i += 2 { - if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, true, true); r.err != nil { - return r - } - } - - return r -} - -// Schemes -------------------------------------------------------------------- - -// schemeMatcher matches the request against URL schemes. -type schemeMatcher []string - -func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.URL.Scheme) -} - -// Schemes adds a matcher for URL schemes. -// It accepts a sequence of schemes to be matched, e.g.: "http", "https". -func (r *Route) Schemes(schemes ...string) *Route { - for k, v := range schemes { - schemes[k] = strings.ToLower(v) - } - return r.addMatcher(schemeMatcher(schemes)) -} - -// Subrouter ------------------------------------------------------------------ - -// Subrouter creates a subrouter for the route. -// -// It will test the inner routes only if the parent route matched. For example: -// -// r := mux.NewRouter() -// s := r.Host("www.domain.com").Subrouter() -// s.HandleFunc("/products/", ProductsHandler) -// s.HandleFunc("/products/{key}", ProductHandler) -// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) -// -// Here, the routes registered in the subrouter won't be tested if the host -// doesn't match. -func (r *Route) Subrouter() *Router { - router := &Router{parent: r, strictSlash: r.strictSlash} - r.addMatcher(router) - return router -} - -// ---------------------------------------------------------------------------- -// URL building -// ---------------------------------------------------------------------------- - -// URL builds a URL for the route. -// -// It accepts a sequence of key/value pairs for the route variables. For -// example, given this route: -// -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// ...a URL for it can be built using: -// -// url, err := r.Get("article").URL("category", "technology", "id", "42") -// -// ...which will return an url.URL with the following path: -// -// "/articles/technology/42" -// -// This also works for host variables: -// -// r := mux.NewRouter() -// r.Host("{subdomain}.domain.com"). -// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// // url.String() will be "http://news.domain.com/articles/technology/42" -// url, err := r.Get("article").URL("subdomain", "news", -// "category", "technology", -// "id", "42") -// -// All variables defined in the route are required, and their values must -// conform to the corresponding patterns. -func (r *Route) URL(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil { - return nil, errors.New("mux: route doesn't have a host or path") - } - var scheme, host, path string - var err error - if r.regexp.host != nil { - // Set a default scheme. - scheme = "http" - if host, err = r.regexp.host.url(pairs...); err != nil { - return nil, err - } - } - if r.regexp.path != nil { - if path, err = r.regexp.path.url(pairs...); err != nil { - return nil, err - } - } - return &url.URL{ - Scheme: scheme, - Host: host, - Path: path, - }, nil -} - -// URLHost builds the host part of the URL for a route. See Route.URL(). -// -// The route must have a host defined. -func (r *Route) URLHost(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil || r.regexp.host == nil { - return nil, errors.New("mux: route doesn't have a host") - } - host, err := r.regexp.host.url(pairs...) - if err != nil { - return nil, err - } - return &url.URL{ - Scheme: "http", - Host: host, - }, nil -} - -// URLPath builds the path part of the URL for a route. See Route.URL(). -// -// The route must have a path defined. -func (r *Route) URLPath(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil || r.regexp.path == nil { - return nil, errors.New("mux: route doesn't have a path") - } - path, err := r.regexp.path.url(pairs...) - if err != nil { - return nil, err - } - return &url.URL{ - Path: path, - }, nil -} - -// ---------------------------------------------------------------------------- -// parentRoute -// ---------------------------------------------------------------------------- - -// parentRoute allows routes to know about parent host and path definitions. -type parentRoute interface { - getNamedRoutes() map[string]*Route - getRegexpGroup() *routeRegexpGroup -} - -// getNamedRoutes returns the map where named routes are registered. -func (r *Route) getNamedRoutes() map[string]*Route { - if r.parent == nil { - // During tests router is not always set. - r.parent = NewRouter() - } - return r.parent.getNamedRoutes() -} - -// getRegexpGroup returns regexp definitions from this route. -func (r *Route) getRegexpGroup() *routeRegexpGroup { - if r.regexp == nil { - if r.parent == nil { - // During tests router is not always set. - r.parent = NewRouter() - } - regexp := r.parent.getRegexpGroup() - if regexp == nil { - r.regexp = new(routeRegexpGroup) - } else { - // Copy. - r.regexp = &routeRegexpGroup{ - host: regexp.host, - path: regexp.path, - queries: regexp.queries, - } - } - } - return r.regexp -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/LICENSE deleted file mode 100644 index 5f0d1fb6..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2014 Alan Shreve - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/README.md deleted file mode 100644 index 7a950d17..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# mousetrap - -mousetrap is a tiny library that answers a single question. - -On a Windows machine, was the process invoked by someone double clicking on -the executable file while browsing in explorer? - -### Motivation - -Windows developers unfamiliar with command line tools will often "double-click" -the executable for a tool. Because most CLI tools print the help and then exit -when invoked without arguments, this is often very frustrating for those users. - -mousetrap provides a way to detect these invocations so that you can provide -more helpful behavior and instructions on how to run the CLI tool. To see what -this looks like, both from an organizational and a technical perspective, see -https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/ - -### The interface - -The library exposes a single interface: - - func StartedByExplorer() (bool) diff --git a/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/trap_others.go deleted file mode 100644 index 9d2d8a4b..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/trap_others.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows - -package mousetrap - -// StartedByExplorer returns true if the program was invoked by the user -// double-clicking on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -// -// On non-Windows platforms, it always returns false. -func StartedByExplorer() bool { - return false -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/trap_windows.go deleted file mode 100644 index 336142a5..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/trap_windows.go +++ /dev/null @@ -1,98 +0,0 @@ -// +build windows -// +build !go1.4 - -package mousetrap - -import ( - "fmt" - "os" - "syscall" - "unsafe" -) - -const ( - // defined by the Win32 API - th32cs_snapprocess uintptr = 0x2 -) - -var ( - kernel = syscall.MustLoadDLL("kernel32.dll") - CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot") - Process32First = kernel.MustFindProc("Process32FirstW") - Process32Next = kernel.MustFindProc("Process32NextW") -) - -// ProcessEntry32 structure defined by the Win32 API -type processEntry32 struct { - dwSize uint32 - cntUsage uint32 - th32ProcessID uint32 - th32DefaultHeapID int - th32ModuleID uint32 - cntThreads uint32 - th32ParentProcessID uint32 - pcPriClassBase int32 - dwFlags uint32 - szExeFile [syscall.MAX_PATH]uint16 -} - -func getProcessEntry(pid int) (pe *processEntry32, err error) { - snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0)) - if snapshot == uintptr(syscall.InvalidHandle) { - err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1) - return - } - defer syscall.CloseHandle(syscall.Handle(snapshot)) - - var processEntry processEntry32 - processEntry.dwSize = uint32(unsafe.Sizeof(processEntry)) - ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32First: %v", e1) - return - } - - for { - if processEntry.th32ProcessID == uint32(pid) { - pe = &processEntry - return - } - - ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32Next: %v", e1) - return - } - } -} - -func getppid() (pid int, err error) { - pe, err := getProcessEntry(os.Getpid()) - if err != nil { - return - } - - pid = int(pe.th32ParentProcessID) - return -} - -// StartedByExplorer returns true if the program was invoked by the user double-clicking -// on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -func StartedByExplorer() bool { - ppid, err := getppid() - if err != nil { - return false - } - - pe, err := getProcessEntry(ppid) - if err != nil { - return false - } - - name := syscall.UTF16ToString(pe.szExeFile[:]) - return name == "explorer.exe" -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go deleted file mode 100644 index 9a28e57c..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build windows -// +build go1.4 - -package mousetrap - -import ( - "os" - "syscall" - "unsafe" -) - -func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { - snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) - if err != nil { - return nil, err - } - defer syscall.CloseHandle(snapshot) - var procEntry syscall.ProcessEntry32 - procEntry.Size = uint32(unsafe.Sizeof(procEntry)) - if err = syscall.Process32First(snapshot, &procEntry); err != nil { - return nil, err - } - for { - if procEntry.ProcessID == uint32(pid) { - return &procEntry, nil - } - err = syscall.Process32Next(snapshot, &procEntry) - if err != nil { - return nil, err - } - } -} - -// StartedByExplorer returns true if the program was invoked by the user double-clicking -// on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -func StartedByExplorer() bool { - pe, err := getProcessEntry(os.Getppid()) - if err != nil { - return false - } - return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/.travis.yml deleted file mode 100644 index 7f3fe9a9..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go - -go: - - 1.4 - -script: - - go test diff --git a/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/LICENSE deleted file mode 100644 index f9c841a5..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/README.md deleted file mode 100644 index 659d6885..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# mapstructure - -mapstructure is a Go library for decoding generic map values to structures -and vice versa, while providing helpful error handling. - -This library is most useful when decoding values from some data stream (JSON, -Gob, etc.) where you don't _quite_ know the structure of the underlying data -until you read a part of it. You can therefore read a `map[string]interface{}` -and use this library to decode it into the proper underlying native Go -structure. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/mapstructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). - -The `Decode` function has examples associated with it there. - -## But Why?! - -Go offers fantastic standard libraries for decoding formats such as JSON. -The standard method is to have a struct pre-created, and populate that struct -from the bytes of the encoded format. This is great, but the problem is if -you have configuration or an encoding that changes slightly depending on -specific fields. For example, consider this JSON: - -```json -{ - "type": "person", - "name": "Mitchell" -} -``` - -Perhaps we can't populate a specific structure without first reading -the "type" field from the JSON. We could always do two passes over the -decoding of the JSON (reading the "type" first, and the rest later). -However, it is much simpler to just decode this into a `map[string]interface{}` -structure, read the "type" key, then use something like this library -to decode it into the proper structure. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/decode_hooks.go deleted file mode 100644 index aa91f76c..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ /dev/null @@ -1,151 +0,0 @@ -package mapstructure - -import ( - "errors" - "reflect" - "strconv" - "strings" - "time" -) - -// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns -// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. -func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { - // Create variables here so we can reference them with the reflect pkg - var f1 DecodeHookFuncType - var f2 DecodeHookFuncKind - - // Fill in the variables into this interface and the rest is done - // automatically using the reflect package. - potential := []interface{}{f1, f2} - - v := reflect.ValueOf(h) - vt := v.Type() - for _, raw := range potential { - pt := reflect.ValueOf(raw).Type() - if vt.ConvertibleTo(pt) { - return v.Convert(pt).Interface() - } - } - - return nil -} - -// DecodeHookExec executes the given decode hook. This should be used -// since it'll naturally degrade to the older backwards compatible DecodeHookFunc -// that took reflect.Kind instead of reflect.Type. -func DecodeHookExec( - raw DecodeHookFunc, - from reflect.Type, to reflect.Type, - data interface{}) (interface{}, error) { - // Build our arguments that reflect expects - argVals := make([]reflect.Value, 3) - argVals[0] = reflect.ValueOf(from) - argVals[1] = reflect.ValueOf(to) - argVals[2] = reflect.ValueOf(data) - - switch f := typedDecodeHook(raw).(type) { - case DecodeHookFuncType: - return f(from, to, data) - case DecodeHookFuncKind: - return f(from.Kind(), to.Kind(), data) - default: - return nil, errors.New("invalid decode hook signature") - } -} - -// ComposeDecodeHookFunc creates a single DecodeHookFunc that -// automatically composes multiple DecodeHookFuncs. -// -// The composed funcs are called in order, with the result of the -// previous transformation. -func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - var err error - for _, f1 := range fs { - data, err = DecodeHookExec(f1, f, t, data) - if err != nil { - return nil, err - } - - // Modify the from kind to be correct with the new data - f = reflect.ValueOf(data).Type() - } - - return data, nil - } -} - -// StringToSliceHookFunc returns a DecodeHookFunc that converts -// string to []string by splitting on the given sep. -func StringToSliceHookFunc(sep string) DecodeHookFunc { - return func( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - if f != reflect.String || t != reflect.Slice { - return data, nil - } - - raw := data.(string) - if raw == "" { - return []string{}, nil - } - - return strings.Split(raw, sep), nil - } -} - -// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts -// strings to time.Duration. -func StringToTimeDurationHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Duration(5)) { - return data, nil - } - - // Convert it by parsing - return time.ParseDuration(data.(string)) - } -} - -func WeaklyTypedHook( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - dataVal := reflect.ValueOf(data) - switch t { - case reflect.String: - switch f { - case reflect.Bool: - if dataVal.Bool() { - return "1", nil - } else { - return "0", nil - } - case reflect.Float32: - return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil - case reflect.Int: - return strconv.FormatInt(dataVal.Int(), 10), nil - case reflect.Slice: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - if elemKind == reflect.Uint8 { - return string(dataVal.Interface().([]uint8)), nil - } - case reflect.Uint: - return strconv.FormatUint(dataVal.Uint(), 10), nil - } - } - - return data, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/error.go deleted file mode 100644 index 47a99e5a..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/error.go +++ /dev/null @@ -1,50 +0,0 @@ -package mapstructure - -import ( - "errors" - "fmt" - "sort" - "strings" -) - -// Error implements the error interface and can represents multiple -// errors that occur in the course of a single decode. -type Error struct { - Errors []string -} - -func (e *Error) Error() string { - points := make([]string, len(e.Errors)) - for i, err := range e.Errors { - points[i] = fmt.Sprintf("* %s", err) - } - - sort.Strings(points) - return fmt.Sprintf( - "%d error(s) decoding:\n\n%s", - len(e.Errors), strings.Join(points, "\n")) -} - -// WrappedErrors implements the errwrap.Wrapper interface to make this -// return value more useful with the errwrap and go-multierror libraries. -func (e *Error) WrappedErrors() []error { - if e == nil { - return nil - } - - result := make([]error, len(e.Errors)) - for i, e := range e.Errors { - result[i] = errors.New(e) - } - - return result -} - -func appendErrors(errors []string, err error) []string { - switch e := err.(type) { - case *Error: - return append(errors, e.Errors...) - default: - return append(errors, e.Error()) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/mapstructure.go deleted file mode 100644 index d3cb4e8f..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ /dev/null @@ -1,746 +0,0 @@ -// The mapstructure package exposes functionality to convert an -// abitrary map[string]interface{} into a native Go structure. -// -// The Go structure can be arbitrarily complex, containing slices, -// other structs, etc. and the decoder will properly decode nested -// maps and so on into the proper structures in the native Go struct. -// See the examples to see what the decoder is capable of. -package mapstructure - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" -) - -// DecodeHookFunc is the callback function that can be used for -// data transformations. See "DecodeHook" in the DecoderConfig -// struct. -// -// The type should be DecodeHookFuncType or DecodeHookFuncKind. -// Either is accepted. Types are a superset of Kinds (Types can return -// Kinds) and are generally a richer thing to use, but Kinds are simpler -// if you only need those. -// -// The reason DecodeHookFunc is multi-typed is for backwards compatibility: -// we started with Kinds and then realized Types were the better solution, -// but have a promise to not break backwards compat so we now support -// both. -type DecodeHookFunc interface{} - -type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) -type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) - -// DecoderConfig is the configuration that is used to create a new decoder -// and allows customization of various aspects of decoding. -type DecoderConfig struct { - // DecodeHook, if set, will be called before any decoding and any - // type conversion (if WeaklyTypedInput is on). This lets you modify - // the values before they're set down onto the resulting struct. - // - // If an error is returned, the entire decode will fail with that - // error. - DecodeHook DecodeHookFunc - - // If ErrorUnused is true, then it is an error for there to exist - // keys in the original map that were unused in the decoding process - // (extra keys). - ErrorUnused bool - - // ZeroFields, if set to true, will zero fields before writing them. - // For example, a map will be emptied before decoded values are put in - // it. If this is false, a map will be merged. - ZeroFields bool - - // If WeaklyTypedInput is true, the decoder will make the following - // "weak" conversions: - // - // - bools to string (true = "1", false = "0") - // - numbers to string (base 10) - // - bools to int/uint (true = 1, false = 0) - // - strings to int/uint (base implied by prefix) - // - int to bool (true if value != 0) - // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, - // FALSE, false, False. Anything else is an error) - // - empty array = empty map and vice versa - // - negative numbers to overflowed uint values (base 10) - // - WeaklyTypedInput bool - - // Metadata is the struct that will contain extra metadata about - // the decoding. If this is nil, then no metadata will be tracked. - Metadata *Metadata - - // Result is a pointer to the struct that will contain the decoded - // value. - Result interface{} - - // The tag name that mapstructure reads for field names. This - // defaults to "mapstructure" - TagName string -} - -// A Decoder takes a raw interface value and turns it into structured -// data, keeping track of rich error information along the way in case -// anything goes wrong. Unlike the basic top-level Decode method, you can -// more finely control how the Decoder behaves using the DecoderConfig -// structure. The top-level Decode method is just a convenience that sets -// up the most basic Decoder. -type Decoder struct { - config *DecoderConfig -} - -// Metadata contains information about decoding a structure that -// is tedious or difficult to get otherwise. -type Metadata struct { - // Keys are the keys of the structure which were successfully decoded - Keys []string - - // Unused is a slice of keys that were found in the raw value but - // weren't decoded since there was no matching field in the result interface - Unused []string -} - -// Decode takes a map and uses reflection to convert it into the -// given Go native structure. val must be a pointer to a struct. -func Decode(m interface{}, rawVal interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: rawVal, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(m) -} - -// WeakDecode is the same as Decode but is shorthand to enable -// WeaklyTypedInput. See DecoderConfig for more info. -func WeakDecode(input, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// NewDecoder returns a new decoder for the given configuration. Once -// a decoder has been returned, the same configuration must not be used -// again. -func NewDecoder(config *DecoderConfig) (*Decoder, error) { - val := reflect.ValueOf(config.Result) - if val.Kind() != reflect.Ptr { - return nil, errors.New("result must be a pointer") - } - - val = val.Elem() - if !val.CanAddr() { - return nil, errors.New("result must be addressable (a pointer)") - } - - if config.Metadata != nil { - if config.Metadata.Keys == nil { - config.Metadata.Keys = make([]string, 0) - } - - if config.Metadata.Unused == nil { - config.Metadata.Unused = make([]string, 0) - } - } - - if config.TagName == "" { - config.TagName = "mapstructure" - } - - result := &Decoder{ - config: config, - } - - return result, nil -} - -// Decode decodes the given raw interface to the target pointer specified -// by the configuration. -func (d *Decoder) Decode(raw interface{}) error { - return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem()) -} - -// Decodes an unknown data type into a specific reflection value. -func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error { - if data == nil { - // If the data is nil, then we don't set anything. - return nil - } - - dataVal := reflect.ValueOf(data) - if !dataVal.IsValid() { - // If the data value is invalid, then we just set the value - // to be the zero value. - val.Set(reflect.Zero(val.Type())) - return nil - } - - if d.config.DecodeHook != nil { - // We have a DecodeHook, so let's pre-process the data. - var err error - data, err = DecodeHookExec( - d.config.DecodeHook, - dataVal.Type(), val.Type(), data) - if err != nil { - return err - } - } - - var err error - dataKind := getKind(val) - switch dataKind { - case reflect.Bool: - err = d.decodeBool(name, data, val) - case reflect.Interface: - err = d.decodeBasic(name, data, val) - case reflect.String: - err = d.decodeString(name, data, val) - case reflect.Int: - err = d.decodeInt(name, data, val) - case reflect.Uint: - err = d.decodeUint(name, data, val) - case reflect.Float32: - err = d.decodeFloat(name, data, val) - case reflect.Struct: - err = d.decodeStruct(name, data, val) - case reflect.Map: - err = d.decodeMap(name, data, val) - case reflect.Ptr: - err = d.decodePtr(name, data, val) - case reflect.Slice: - err = d.decodeSlice(name, data, val) - default: - // If we reached this point then we weren't able to decode it - return fmt.Errorf("%s: unsupported type: %s", name, dataKind) - } - - // If we reached here, then we successfully decoded SOMETHING, so - // mark the key as used if we're tracking metadata. - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - - return err -} - -// This decodes a basic type (bool, int, string, etc.) and sets the -// value to "data" of that type. -func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataValType := dataVal.Type() - if !dataValType.AssignableTo(val.Type()) { - return fmt.Errorf( - "'%s' expected type '%s', got '%s'", - name, val.Type(), dataValType) - } - - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - - converted := true - switch { - case dataKind == reflect.String: - val.SetString(dataVal.String()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetString("1") - } else { - val.SetString("0") - } - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatInt(dataVal.Int(), 10)) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) - case dataKind == reflect.Slice && d.config.WeaklyTypedInput: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - switch { - case elemKind == reflect.Uint8: - val.SetString(string(dataVal.Interface().([]uint8))) - default: - converted = false - } - default: - converted = false - } - - if !converted { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Int: - val.SetInt(dataVal.Int()) - case dataKind == reflect.Uint: - val.SetInt(int64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetInt(int64(dataVal.Float())) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetInt(1) - } else { - val.SetInt(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) - if err == nil { - val.SetInt(i) - } else { - return fmt.Errorf("cannot parse '%s' as int: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Int: - i := dataVal.Int() - if i < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %d overflows uint", - name, i) - } - val.SetUint(uint64(i)) - case dataKind == reflect.Uint: - val.SetUint(dataVal.Uint()) - case dataKind == reflect.Float32: - f := dataVal.Float() - if f < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %f overflows uint", - name, f) - } - val.SetUint(uint64(f)) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetUint(1) - } else { - val.SetUint(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) - if err == nil { - val.SetUint(i) - } else { - return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Bool: - val.SetBool(dataVal.Bool()) - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Int() != 0) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Uint() != 0) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Float() != 0) - case dataKind == reflect.String && d.config.WeaklyTypedInput: - b, err := strconv.ParseBool(dataVal.String()) - if err == nil { - val.SetBool(b) - } else if dataVal.String() == "" { - val.SetBool(false) - } else { - return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Int: - val.SetFloat(float64(dataVal.Int())) - case dataKind == reflect.Uint: - val.SetFloat(float64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetFloat(float64(dataVal.Float())) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetFloat(1) - } else { - val.SetFloat(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) - if err == nil { - val.SetFloat(f) - } else { - return fmt.Errorf("cannot parse '%s' as float: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // By default we overwrite keys in the current map - valMap := val - - // If the map is nil or we're purposely zeroing fields, make a new map - if valMap.IsNil() || d.config.ZeroFields { - // Make a new map to hold our result - mapType := reflect.MapOf(valKeyType, valElemType) - valMap = reflect.MakeMap(mapType) - } - - // Check input type - dataVal := reflect.Indirect(reflect.ValueOf(data)) - if dataVal.Kind() != reflect.Map { - // Accept empty array/slice instead of an empty map in weakly typed mode - if d.config.WeaklyTypedInput && - (dataVal.Kind() == reflect.Slice || dataVal.Kind() == reflect.Array) && - dataVal.Len() == 0 { - val.Set(valMap) - return nil - } else { - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) - } - } - - // Accumulate errors - errors := make([]string, 0) - - for _, k := range dataVal.MapKeys() { - fieldName := fmt.Sprintf("%s[%s]", name, k) - - // First decode the key into the proper type - currentKey := reflect.Indirect(reflect.New(valKeyType)) - if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { - errors = appendErrors(errors, err) - continue - } - - // Next decode the data into the proper type - v := dataVal.MapIndex(k).Interface() - currentVal := reflect.Indirect(reflect.New(valElemType)) - if err := d.decode(fieldName, v, currentVal); err != nil { - errors = appendErrors(errors, err) - continue - } - - valMap.SetMapIndex(currentKey, currentVal) - } - - // Set the built up map to the value - val.Set(valMap) - - // If we had errors, return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - valType := val.Type() - valElemType := valType.Elem() - realVal := reflect.New(valElemType) - if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { - return err - } - - val.Set(realVal) - return nil -} - -func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - sliceType := reflect.SliceOf(valElemType) - - // Check input type - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - // Accept empty map instead of array/slice in weakly typed mode - if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 { - val.Set(reflect.MakeSlice(sliceType, 0, 0)) - return nil - } else { - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - } - } - - // Make a new slice to hold our result, same size as the original data. - valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - currentField := valSlice.Index(i) - - fieldName := fmt.Sprintf("%s[%d]", name, i) - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the slice we built up - val.Set(valSlice) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - - // If the type of the value to write to and the data match directly, - // then we just set it directly instead of recursing into the structure. - if dataVal.Type() == val.Type() { - val.Set(dataVal) - return nil - } - - dataValKind := dataVal.Kind() - if dataValKind != reflect.Map { - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind) - } - - dataValType := dataVal.Type() - if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { - return fmt.Errorf( - "'%s' needs a map with string keys, has '%s' keys", - name, dataValType.Key().Kind()) - } - - dataValKeys := make(map[reflect.Value]struct{}) - dataValKeysUnused := make(map[interface{}]struct{}) - for _, dataValKey := range dataVal.MapKeys() { - dataValKeys[dataValKey] = struct{}{} - dataValKeysUnused[dataValKey.Interface()] = struct{}{} - } - - errors := make([]string, 0) - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = val - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - fields := make(map[*reflect.StructField]reflect.Value) - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - - if fieldType.Anonymous { - fieldKind := fieldType.Type.Kind() - if fieldKind != reflect.Struct { - errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type: %s", fieldType.Name, fieldKind)) - continue - } - - // We have an embedded field. We "squash" the fields down - // if specified in the tag. - squash := false - tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - } - - if squash { - structs = append(structs, val.FieldByName(fieldType.Name)) - continue - } - } - - // Normal struct field, store it away - fields[&fieldType] = structVal.Field(i) - } - } - - for fieldType, field := range fields { - fieldName := fieldType.Name - - tagValue := fieldType.Tag.Get(d.config.TagName) - tagValue = strings.SplitN(tagValue, ",", 2)[0] - if tagValue != "" { - fieldName = tagValue - } - - rawMapKey := reflect.ValueOf(fieldName) - rawMapVal := dataVal.MapIndex(rawMapKey) - if !rawMapVal.IsValid() { - // Do a slower search by iterating over each key and - // doing case-insensitive search. - for dataValKey, _ := range dataValKeys { - mK, ok := dataValKey.Interface().(string) - if !ok { - // Not a string key - continue - } - - if strings.EqualFold(mK, fieldName) { - rawMapKey = dataValKey - rawMapVal = dataVal.MapIndex(dataValKey) - break - } - } - - if !rawMapVal.IsValid() { - // There was no matching key in the map for the value in - // the struct. Just ignore. - continue - } - } - - // Delete the key we're using from the unused map so we stop tracking - delete(dataValKeysUnused, rawMapKey.Interface()) - - if !field.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !field.CanSet() { - continue - } - - // If the name is empty string, then we're at the root, and we - // don't dot-join the fields. - if name != "" { - fieldName = fmt.Sprintf("%s.%s", name, fieldName) - } - - if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil { - errors = appendErrors(errors, err) - } - } - - if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { - keys := make([]string, 0, len(dataValKeysUnused)) - for rawKey, _ := range dataValKeysUnused { - keys = append(keys, rawKey.(string)) - } - sort.Strings(keys) - - err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) - } - - if len(errors) > 0 { - return &Error{errors} - } - - // Add the unused keys to the list of unused keys if we're tracking metadata - if d.config.Metadata != nil { - for rawKey, _ := range dataValKeysUnused { - key := rawKey.(string) - if name != "" { - key = fmt.Sprintf("%s.%s", name, key) - } - - d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) - } - } - - return nil -} - -func getKind(val reflect.Value) reflect.Kind { - kind := val.Kind() - - switch { - case kind >= reflect.Int && kind <= reflect.Int64: - return reflect.Int - case kind >= reflect.Uint && kind <= reflect.Uint64: - return reflect.Uint - case kind >= reflect.Float32 && kind <= reflect.Float64: - return reflect.Float32 - default: - return kind - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/.gitignore b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/.gitignore deleted file mode 100644 index 5cdbab79..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*~ -*.pyc -test-env* -junk/ \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/.travis.yml deleted file mode 100644 index bf37e41f..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go -sudo: false - -go: - - 1.1.2 - - 1.2.2 - - 1.3.3 - - 1.4.2 - - 1.5.1 - - tip - -script: - - test -z "$(go fmt ./...)" - - go test diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/COPYING b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/COPYING deleted file mode 100644 index 8c27c67f..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/COPYING +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/ - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/README.md b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/README.md deleted file mode 100644 index 3855a394..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/README.md +++ /dev/null @@ -1,139 +0,0 @@ -Swift -===== - -This package provides an easy to use library for interfacing with -Swift / Openstack Object Storage / Rackspace cloud files from the Go -Language - -See here for package docs - - http://godoc.org/github.com/ncw/swift - -[![Build Status](https://api.travis-ci.org/ncw/swift.svg?branch=master)](https://travis-ci.org/ncw/swift) [![GoDoc](https://godoc.org/github.com/ncw/swift?status.svg)](https://godoc.org/github.com/ncw/swift) - -Install -------- - -Use go to install the library - - go get github.com/ncw/swift - -Usage ------ - -See here for full package docs - -- http://godoc.org/github.com/ncw/swift - -Here is a short example from the docs - - import "github.com/ncw/swift" - - // Create a connection - c := swift.Connection{ - UserName: "user", - ApiKey: "key", - AuthUrl: "auth_url", - Domain: "domain", // Name of the domain (v3 auth only) - Tenant: "tenant", // Name of the tenant (v2 auth only) - } - // Authenticate - err := c.Authenticate() - if err != nil { - panic(err) - } - // List all the containers - containers, err := c.ContainerNames(nil) - fmt.Println(containers) - // etc... - -Additions ---------- - -The `rs` sub project contains a wrapper for the Rackspace specific CDN Management interface. - -Testing -------- - -To run the tests you can either use an embedded fake Swift server -either use a real Openstack Swift server or a Rackspace Cloud files account. - -When using a real Swift server, you need to set these environment variables -before running the tests - - export SWIFT_API_USER='user' - export SWIFT_API_KEY='key' - export SWIFT_AUTH_URL='https://url.of.auth.server/v1.0' - -And optionally these if using v2 authentication - - export SWIFT_TENANT='TenantName' - export SWIFT_TENANT_ID='TenantId' - -And optionally these if using v3 authentication - - export SWIFT_TENANT='TenantName' - export SWIFT_TENANT_ID='TenantId' - export SWIFT_API_DOMAIN_ID='domain id' - export SWIFT_API_DOMAIN='domain name' - -And optionally these if using v3 trust - - export SWIFT_TRUST_ID='TrustId' - -And optionally this if you want to skip server certificate validation - - export SWIFT_AUTH_INSECURE=1 - -And optionally this to configure the connect channel timeout, in seconds - - export SWIFT_CONNECTION_CHANNEL_TIMEOUT=60 - -And optionally this to configure the data channel timeout, in seconds - - export SWIFT_DATA_CHANNEL_TIMEOUT=60 - -Then run the tests with `go test` - -License -------- - -This is free software under the terms of MIT license (check COPYING file -included in this package). - -Contact and support -------------------- - -The project website is at: - -- https://github.com/ncw/swift - -There you can file bug reports, ask for help or contribute patches. - -Authors -------- - -- Nick Craig-Wood - -Contributors ------------- - -- Brian "bojo" Jones -- Janika Liiv -- Yamamoto, Hirotaka -- Stephen -- platformpurple -- Paul Querna -- Livio Soares -- thesyncim -- lsowen -- Sylvain Baubeau -- Chris Kastorff -- Dai HaoJun -- Hua Wang -- Fabian Ruff -- Arturo Reuschenbach Puncernau -- Petr Kotek -- Stefan Majewsky -- Cezar Sa Espinola -- Sam Gunaratne diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/auth.go b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/auth.go deleted file mode 100644 index 316dc7fe..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/auth.go +++ /dev/null @@ -1,320 +0,0 @@ -package swift - -import ( - "bytes" - "encoding/json" - "net/http" - "net/url" - "strings" -) - -// Auth defines the operations needed to authenticate with swift -// -// This encapsulates the different authentication schemes in use -type Authenticator interface { - // Request creates an http.Request for the auth - return nil if not needed - Request(*Connection) (*http.Request, error) - // Response parses the http.Response - Response(resp *http.Response) error - // The public storage URL - set Internal to true to read - // internal/service net URL - StorageUrl(Internal bool) string - // The access token - Token() string - // The CDN url if available - CdnUrl() string -} - -type CustomEndpointAuthenticator interface { - StorageUrlForEndpoint(endpointType EndpointType) string -} - -type EndpointType string - -const ( - // Use public URL as storage URL - EndpointTypePublic = EndpointType("public") - - // Use internal URL as storage URL - EndpointTypeInternal = EndpointType("internal") - - // Use admin URL as storage URL - EndpointTypeAdmin = EndpointType("admin") -) - -// newAuth - create a new Authenticator from the AuthUrl -// -// A hint for AuthVersion can be provided -func newAuth(c *Connection) (Authenticator, error) { - AuthVersion := c.AuthVersion - if AuthVersion == 0 { - if strings.Contains(c.AuthUrl, "v3") { - AuthVersion = 3 - } else if strings.Contains(c.AuthUrl, "v2") { - AuthVersion = 2 - } else if strings.Contains(c.AuthUrl, "v1") { - AuthVersion = 1 - } else { - return nil, newErrorf(500, "Can't find AuthVersion in AuthUrl - set explicitly") - } - } - switch AuthVersion { - case 1: - return &v1Auth{}, nil - case 2: - return &v2Auth{ - // Guess as to whether using API key or - // password it will try both eventually so - // this is just an optimization. - useApiKey: len(c.ApiKey) >= 32, - }, nil - case 3: - return &v3Auth{}, nil - } - return nil, newErrorf(500, "Auth Version %d not supported", AuthVersion) -} - -// ------------------------------------------------------------ - -// v1 auth -type v1Auth struct { - Headers http.Header // V1 auth: the authentication headers so extensions can access them -} - -// v1 Authentication - make request -func (auth *v1Auth) Request(c *Connection) (*http.Request, error) { - req, err := http.NewRequest("GET", c.AuthUrl, nil) - if err != nil { - return nil, err - } - req.Header.Set("User-Agent", c.UserAgent) - req.Header.Set("X-Auth-Key", c.ApiKey) - req.Header.Set("X-Auth-User", c.UserName) - return req, nil -} - -// v1 Authentication - read response -func (auth *v1Auth) Response(resp *http.Response) error { - auth.Headers = resp.Header - return nil -} - -// v1 Authentication - read storage url -func (auth *v1Auth) StorageUrl(Internal bool) string { - storageUrl := auth.Headers.Get("X-Storage-Url") - if Internal { - newUrl, err := url.Parse(storageUrl) - if err != nil { - return storageUrl - } - newUrl.Host = "snet-" + newUrl.Host - storageUrl = newUrl.String() - } - return storageUrl -} - -// v1 Authentication - read auth token -func (auth *v1Auth) Token() string { - return auth.Headers.Get("X-Auth-Token") -} - -// v1 Authentication - read cdn url -func (auth *v1Auth) CdnUrl() string { - return auth.Headers.Get("X-CDN-Management-Url") -} - -// ------------------------------------------------------------ - -// v2 Authentication -type v2Auth struct { - Auth *v2AuthResponse - Region string - useApiKey bool // if set will use API key not Password - useApiKeyOk bool // if set won't change useApiKey any more - notFirst bool // set after first run -} - -// v2 Authentication - make request -func (auth *v2Auth) Request(c *Connection) (*http.Request, error) { - auth.Region = c.Region - // Toggle useApiKey if not first run and not OK yet - if auth.notFirst && !auth.useApiKeyOk { - auth.useApiKey = !auth.useApiKey - } - auth.notFirst = true - // Create a V2 auth request for the body of the connection - var v2i interface{} - if !auth.useApiKey { - // Normal swift authentication - v2 := v2AuthRequest{} - v2.Auth.PasswordCredentials.UserName = c.UserName - v2.Auth.PasswordCredentials.Password = c.ApiKey - v2.Auth.Tenant = c.Tenant - v2.Auth.TenantId = c.TenantId - v2i = v2 - } else { - // Rackspace special with API Key - v2 := v2AuthRequestRackspace{} - v2.Auth.ApiKeyCredentials.UserName = c.UserName - v2.Auth.ApiKeyCredentials.ApiKey = c.ApiKey - v2.Auth.Tenant = c.Tenant - v2.Auth.TenantId = c.TenantId - v2i = v2 - } - body, err := json.Marshal(v2i) - if err != nil { - return nil, err - } - url := c.AuthUrl - if !strings.HasSuffix(url, "/") { - url += "/" - } - url += "tokens" - req, err := http.NewRequest("POST", url, bytes.NewBuffer(body)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("User-Agent", c.UserAgent) - return req, nil -} - -// v2 Authentication - read response -func (auth *v2Auth) Response(resp *http.Response) error { - auth.Auth = new(v2AuthResponse) - err := readJson(resp, auth.Auth) - // If successfully read Auth then no need to toggle useApiKey any more - if err == nil { - auth.useApiKeyOk = true - } - return err -} - -// Finds the Endpoint Url of "type" from the v2AuthResponse using the -// Region if set or defaulting to the first one if not -// -// Returns "" if not found -func (auth *v2Auth) endpointUrl(Type string, endpointType EndpointType) string { - for _, catalog := range auth.Auth.Access.ServiceCatalog { - if catalog.Type == Type { - for _, endpoint := range catalog.Endpoints { - if auth.Region == "" || (auth.Region == endpoint.Region) { - switch endpointType { - case EndpointTypeInternal: - return endpoint.InternalUrl - case EndpointTypePublic: - return endpoint.PublicUrl - case EndpointTypeAdmin: - return endpoint.AdminUrl - default: - return "" - } - } - } - } - } - return "" -} - -// v2 Authentication - read storage url -// -// If Internal is true then it reads the private (internal / service -// net) URL. -func (auth *v2Auth) StorageUrl(Internal bool) string { - endpointType := EndpointTypePublic - if Internal { - endpointType = EndpointTypeInternal - } - return auth.StorageUrlForEndpoint(endpointType) -} - -// v2 Authentication - read storage url -// -// Use the indicated endpointType to choose a URL. -func (auth *v2Auth) StorageUrlForEndpoint(endpointType EndpointType) string { - return auth.endpointUrl("object-store", endpointType) -} - -// v2 Authentication - read auth token -func (auth *v2Auth) Token() string { - return auth.Auth.Access.Token.Id -} - -// v2 Authentication - read cdn url -func (auth *v2Auth) CdnUrl() string { - return auth.endpointUrl("rax:object-cdn", EndpointTypePublic) -} - -// ------------------------------------------------------------ - -// V2 Authentication request -// -// http://docs.openstack.org/developer/keystone/api_curl_examples.html -// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html -// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html -type v2AuthRequest struct { - Auth struct { - PasswordCredentials struct { - UserName string `json:"username"` - Password string `json:"password"` - } `json:"passwordCredentials"` - Tenant string `json:"tenantName,omitempty"` - TenantId string `json:"tenantId,omitempty"` - } `json:"auth"` -} - -// V2 Authentication request - Rackspace variant -// -// http://docs.openstack.org/developer/keystone/api_curl_examples.html -// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html -// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html -type v2AuthRequestRackspace struct { - Auth struct { - ApiKeyCredentials struct { - UserName string `json:"username"` - ApiKey string `json:"apiKey"` - } `json:"RAX-KSKEY:apiKeyCredentials"` - Tenant string `json:"tenantName,omitempty"` - TenantId string `json:"tenantId,omitempty"` - } `json:"auth"` -} - -// V2 Authentication reply -// -// http://docs.openstack.org/developer/keystone/api_curl_examples.html -// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html -// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html -type v2AuthResponse struct { - Access struct { - ServiceCatalog []struct { - Endpoints []struct { - InternalUrl string - PublicUrl string - AdminUrl string - Region string - TenantId string - } - Name string - Type string - } - Token struct { - Expires string - Id string - Tenant struct { - Id string - Name string - } - } - User struct { - DefaultRegion string `json:"RAX-AUTH:defaultRegion"` - Id string - Name string - Roles []struct { - Description string - Id string - Name string - TenantId string - } - } - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/auth_v3.go b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/auth_v3.go deleted file mode 100644 index 21e96718..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/auth_v3.go +++ /dev/null @@ -1,227 +0,0 @@ -package swift - -import ( - "bytes" - "encoding/json" - "net/http" - "strings" -) - -const ( - v3AuthMethodToken = "token" - v3AuthMethodPassword = "password" - v3CatalogTypeObjectStore = "object-store" -) - -// V3 Authentication request -// http://docs.openstack.org/developer/keystone/api_curl_examples.html -// http://developer.openstack.org/api-ref-identity-v3.html -type v3AuthRequest struct { - Auth struct { - Identity struct { - Methods []string `json:"methods"` - Password *v3AuthPassword `json:"password,omitempty"` - Token *v3AuthToken `json:"token,omitempty"` - } `json:"identity"` - Scope *v3Scope `json:"scope,omitempty"` - } `json:"auth"` -} - -type v3Scope struct { - Project *v3Project `json:"project,omitempty"` - Domain *v3Domain `json:"domain,omitempty"` - Trust *v3Trust `json:"OS-TRUST:trust,omitempty"` -} - -type v3Domain struct { - Id string `json:"id,omitempty"` - Name string `json:"name,omitempty"` -} - -type v3Project struct { - Name string `json:"name,omitempty"` - Id string `json:"id,omitempty"` - Domain *v3Domain `json:"domain,omitempty"` -} - -type v3Trust struct { - Id string `json:"id"` -} - -type v3User struct { - Domain *v3Domain `json:"domain,omitempty"` - Id string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - Password string `json:"password,omitempty"` -} - -type v3AuthToken struct { - Id string `json:"id"` -} - -type v3AuthPassword struct { - User v3User `json:"user"` -} - -// V3 Authentication response -type v3AuthResponse struct { - Token struct { - Expires_At, Issued_At string - Methods []string - Roles []struct { - Id, Name string - Links struct { - Self string - } - } - - Project struct { - Domain struct { - Id, Name string - } - Id, Name string - } - - Catalog []struct { - Id, Namem, Type string - Endpoints []struct { - Id, Region_Id, Url, Region string - Interface EndpointType - } - } - - User struct { - Id, Name string - Domain struct { - Id, Name string - Links struct { - Self string - } - } - } - - Audit_Ids []string - } -} - -type v3Auth struct { - Region string - Auth *v3AuthResponse - Headers http.Header -} - -func (auth *v3Auth) Request(c *Connection) (*http.Request, error) { - auth.Region = c.Region - - var v3i interface{} - - v3 := v3AuthRequest{} - - if c.UserName == "" { - v3.Auth.Identity.Methods = []string{v3AuthMethodToken} - v3.Auth.Identity.Token = &v3AuthToken{Id: c.ApiKey} - } else { - v3.Auth.Identity.Methods = []string{v3AuthMethodPassword} - v3.Auth.Identity.Password = &v3AuthPassword{ - User: v3User{ - Name: c.UserName, - Password: c.ApiKey, - }, - } - - var domain *v3Domain - - if c.Domain != "" { - domain = &v3Domain{Name: c.Domain} - } else if c.DomainId != "" { - domain = &v3Domain{Id: c.DomainId} - } - v3.Auth.Identity.Password.User.Domain = domain - } - - if c.TrustId != "" { - v3.Auth.Scope = &v3Scope{Trust: &v3Trust{Id: c.TrustId}} - } else if c.TenantId != "" || c.Tenant != "" { - - v3.Auth.Scope = &v3Scope{Project: &v3Project{}} - - if c.TenantId != "" { - v3.Auth.Scope.Project.Id = c.TenantId - } else if c.Tenant != "" { - v3.Auth.Scope.Project.Name = c.Tenant - switch { - case c.TenantDomain != "": - v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.TenantDomain} - case c.TenantDomainId != "": - v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.TenantDomainId} - case c.Domain != "": - v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.Domain} - case c.DomainId != "": - v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.DomainId} - default: - v3.Auth.Scope.Project.Domain = &v3Domain{Name: "Default"} - } - } - } - - v3i = v3 - - body, err := json.Marshal(v3i) - - if err != nil { - return nil, err - } - - url := c.AuthUrl - if !strings.HasSuffix(url, "/") { - url += "/" - } - url += "auth/tokens" - req, err := http.NewRequest("POST", url, bytes.NewBuffer(body)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("User-Agent", c.UserAgent) - return req, nil -} - -func (auth *v3Auth) Response(resp *http.Response) error { - auth.Auth = &v3AuthResponse{} - auth.Headers = resp.Header - err := readJson(resp, auth.Auth) - return err -} - -func (auth *v3Auth) endpointUrl(Type string, endpointType EndpointType) string { - for _, catalog := range auth.Auth.Token.Catalog { - if catalog.Type == Type { - for _, endpoint := range catalog.Endpoints { - if endpoint.Interface == endpointType && (auth.Region == "" || (auth.Region == endpoint.Region)) { - return endpoint.Url - } - } - } - } - return "" -} - -func (auth *v3Auth) StorageUrl(Internal bool) string { - endpointType := EndpointTypePublic - if Internal { - endpointType = EndpointTypeInternal - } - return auth.StorageUrlForEndpoint(endpointType) -} - -func (auth *v3Auth) StorageUrlForEndpoint(endpointType EndpointType) string { - return auth.endpointUrl("object-store", endpointType) -} - -func (auth *v3Auth) Token() string { - return auth.Headers.Get("X-Subject-Token") -} - -func (auth *v3Auth) CdnUrl() string { - return "" -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/compatibility_1_0.go b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/compatibility_1_0.go deleted file mode 100644 index 7b69a757..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/compatibility_1_0.go +++ /dev/null @@ -1,28 +0,0 @@ -// Go 1.0 compatibility functions - -// +build !go1.1 - -package swift - -import ( - "log" - "net/http" - "time" -) - -// Cancel the request - doesn't work under < go 1.1 -func cancelRequest(transport http.RoundTripper, req *http.Request) { - log.Printf("Tried to cancel a request but couldn't - recompile with go 1.1") -} - -// Reset a timer - Doesn't work properly < go 1.1 -// -// This is quite hard to do properly under go < 1.1 so we do a crude -// approximation and hope that everyone upgrades to go 1.1 quickly -func resetTimer(t *time.Timer, d time.Duration) { - t.Stop() - // Very likely this doesn't actually work if we are already - // selecting on t.C. However we've stopped the original timer - // so won't break transfers but may not time them out :-( - *t = *time.NewTimer(d) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/compatibility_1_1.go b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/compatibility_1_1.go deleted file mode 100644 index a4f9c3ab..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/compatibility_1_1.go +++ /dev/null @@ -1,24 +0,0 @@ -// Go 1.1 and later compatibility functions -// -// +build go1.1 - -package swift - -import ( - "net/http" - "time" -) - -// Cancel the request -func cancelRequest(transport http.RoundTripper, req *http.Request) { - if tr, ok := transport.(interface { - CancelRequest(*http.Request) - }); ok { - tr.CancelRequest(req) - } -} - -// Reset a timer -func resetTimer(t *time.Timer, d time.Duration) { - t.Reset(d) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/doc.go b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/doc.go deleted file mode 100644 index 44efde7b..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Package swift provides an easy to use interface to Swift / Openstack Object Storage / Rackspace Cloud Files - -Standard Usage - -Most of the work is done through the Container*() and Object*() methods. - -All methods are safe to use concurrently in multiple go routines. - -Object Versioning - -As defined by http://docs.openstack.org/api/openstack-object-storage/1.0/content/Object_Versioning-e1e3230.html#d6e983 one can create a container which allows for version control of files. The suggested method is to create a version container for holding all non-current files, and a current container for holding the latest version that the file points to. The container and objects inside it can be used in the standard manner, however, pushing a file multiple times will result in it being copied to the version container and the new file put in it's place. If the current file is deleted, the previous file in the version container will replace it. This means that if a file is updated 5 times, it must be deleted 5 times to be completely removed from the system. - -Rackspace Sub Module - -This module specifically allows the enabling/disabling of Rackspace Cloud File CDN management on a container. This is specific to the Rackspace API and not Swift/Openstack, therefore it has been placed in a submodule. One can easily create a RsConnection and use it like the standard Connection to access and manipulate containers and objects. - -*/ -package swift diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/meta.go b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/meta.go deleted file mode 100644 index e52d6860..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/meta.go +++ /dev/null @@ -1,174 +0,0 @@ -// Metadata manipulation in and out of Headers - -package swift - -import ( - "fmt" - "net/http" - "strconv" - "strings" - "time" -) - -// Metadata stores account, container or object metadata. -type Metadata map[string]string - -// Metadata gets the Metadata starting with the metaPrefix out of the Headers. -// -// The keys in the Metadata will be converted to lower case -func (h Headers) Metadata(metaPrefix string) Metadata { - m := Metadata{} - metaPrefix = http.CanonicalHeaderKey(metaPrefix) - for key, value := range h { - if strings.HasPrefix(key, metaPrefix) { - metaKey := strings.ToLower(key[len(metaPrefix):]) - m[metaKey] = value - } - } - return m -} - -// AccountMetadata converts Headers from account to a Metadata. -// -// The keys in the Metadata will be converted to lower case. -func (h Headers) AccountMetadata() Metadata { - return h.Metadata("X-Account-Meta-") -} - -// ContainerMetadata converts Headers from container to a Metadata. -// -// The keys in the Metadata will be converted to lower case. -func (h Headers) ContainerMetadata() Metadata { - return h.Metadata("X-Container-Meta-") -} - -// ObjectMetadata converts Headers from object to a Metadata. -// -// The keys in the Metadata will be converted to lower case. -func (h Headers) ObjectMetadata() Metadata { - return h.Metadata("X-Object-Meta-") -} - -// Headers convert the Metadata starting with the metaPrefix into a -// Headers. -// -// The keys in the Metadata will be converted from lower case to http -// Canonical (see http.CanonicalHeaderKey). -func (m Metadata) Headers(metaPrefix string) Headers { - h := Headers{} - for key, value := range m { - key = http.CanonicalHeaderKey(metaPrefix + key) - h[key] = value - } - return h -} - -// AccountHeaders converts the Metadata for the account. -func (m Metadata) AccountHeaders() Headers { - return m.Headers("X-Account-Meta-") -} - -// ContainerHeaders converts the Metadata for the container. -func (m Metadata) ContainerHeaders() Headers { - return m.Headers("X-Container-Meta-") -} - -// ObjectHeaders converts the Metadata for the object. -func (m Metadata) ObjectHeaders() Headers { - return m.Headers("X-Object-Meta-") -} - -// Turns a number of ns into a floating point string in seconds -// -// Trims trailing zeros and guaranteed to be perfectly accurate -func nsToFloatString(ns int64) string { - if ns < 0 { - return "-" + nsToFloatString(-ns) - } - result := fmt.Sprintf("%010d", ns) - split := len(result) - 9 - result, decimals := result[:split], result[split:] - decimals = strings.TrimRight(decimals, "0") - if decimals != "" { - result += "." - result += decimals - } - return result -} - -// Turns a floating point string in seconds into a ns integer -// -// Guaranteed to be perfectly accurate -func floatStringToNs(s string) (int64, error) { - const zeros = "000000000" - if point := strings.IndexRune(s, '.'); point >= 0 { - tail := s[point+1:] - if fill := 9 - len(tail); fill < 0 { - tail = tail[:9] - } else { - tail += zeros[:fill] - } - s = s[:point] + tail - } else if len(s) > 0 { // Make sure empty string produces an error - s += zeros - } - return strconv.ParseInt(s, 10, 64) -} - -// FloatStringToTime converts a floating point number string to a time.Time -// -// The string is floating point number of seconds since the epoch -// (Unix time). The number should be in fixed point format (not -// exponential), eg "1354040105.123456789" which represents the time -// "2012-11-27T18:15:05.123456789Z" -// -// Some care is taken to preserve all the accuracy in the time.Time -// (which wouldn't happen with a naive conversion through float64) so -// a round trip conversion won't change the data. -// -// If an error is returned then time will be returned as the zero time. -func FloatStringToTime(s string) (t time.Time, err error) { - ns, err := floatStringToNs(s) - if err != nil { - return - } - t = time.Unix(0, ns) - return -} - -// TimeToFloatString converts a time.Time object to a floating point string -// -// The string is floating point number of seconds since the epoch -// (Unix time). The number is in fixed point format (not -// exponential), eg "1354040105.123456789" which represents the time -// "2012-11-27T18:15:05.123456789Z". Trailing zeros will be dropped -// from the output. -// -// Some care is taken to preserve all the accuracy in the time.Time -// (which wouldn't happen with a naive conversion through float64) so -// a round trip conversion won't change the data. -func TimeToFloatString(t time.Time) string { - return nsToFloatString(t.UnixNano()) -} - -// Read a modification time (mtime) from a Metadata object -// -// This is a defacto standard (used in the official python-swiftclient -// amongst others) for storing the modification time (as read using -// os.Stat) for an object. It is stored using the key 'mtime', which -// for example when written to an object will be 'X-Object-Meta-Mtime'. -// -// If an error is returned then time will be returned as the zero time. -func (m Metadata) GetModTime() (t time.Time, err error) { - return FloatStringToTime(m["mtime"]) -} - -// Write an modification time (mtime) to a Metadata object -// -// This is a defacto standard (used in the official python-swiftclient -// amongst others) for storing the modification time (as read using -// os.Stat) for an object. It is stored using the key 'mtime', which -// for example when written to an object will be 'X-Object-Meta-Mtime'. -func (m Metadata) SetModTime(t time.Time) { - m["mtime"] = TimeToFloatString(t) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/notes.txt b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/notes.txt deleted file mode 100644 index f738552c..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/notes.txt +++ /dev/null @@ -1,55 +0,0 @@ -Notes on Go Swift -================= - -Make a builder style interface like the Google Go APIs? Advantages -are that it is easy to add named methods to the service object to do -specific things. Slightly less efficient. Not sure about how to -return extra stuff though - in an object? - -Make a container struct so these could be methods on it? - -Make noResponse check for 204? - -Make storage public so it can be extended easily? - -Rename to go-swift to match user agent string? - -Reconnect on auth error - 401 when token expires isn't tested - -Make more api compatible with python cloudfiles? - -Retry operations on timeout / network errors? -- also 408 error -- GET requests only? - -Make Connection thread safe - whenever it is changed take a write lock whenever it is read from a read lock - -Add extra headers field to Connection (for via etc) - -Make errors use an error heirachy then can catch them with a type assertion - - Error(...) - ObjectCorrupted{ Error } - -Make a Debug flag in connection for logging stuff - -Object If-Match, If-None-Match, If-Modified-Since, If-Unmodified-Since etc - -Object range - -Object create, update with X-Delete-At or X-Delete-After - -Large object support -- check uploads are less than 5GB in normal mode? - -Access control CORS? - -Swift client retries and backs off for all types of errors - -Implement net error interface? - -type Error interface { - error - Timeout() bool // Is the error a timeout? - Temporary() bool // Is the error temporary? -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/swift.go b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/swift.go deleted file mode 100644 index 1337f10d..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/swift.go +++ /dev/null @@ -1,1909 +0,0 @@ -package swift - -import ( - "bufio" - "bytes" - "crypto/hmac" - "crypto/md5" - "crypto/sha1" - "encoding/hex" - "encoding/json" - "fmt" - "hash" - "io" - "mime" - "net/http" - "net/url" - "path" - "strconv" - "strings" - "sync" - "time" -) - -const ( - DefaultUserAgent = "goswift/1.0" // Default user agent - DefaultRetries = 3 // Default number of retries on token expiry - TimeFormat = "2006-01-02T15:04:05" // Python date format for json replies parsed as UTC - UploadTar = "tar" // Data format specifier for Connection.BulkUpload(). - UploadTarGzip = "tar.gz" // Data format specifier for Connection.BulkUpload(). - UploadTarBzip2 = "tar.bz2" // Data format specifier for Connection.BulkUpload(). - allContainersLimit = 10000 // Number of containers to fetch at once - allObjectsLimit = 10000 // Number objects to fetch at once - allObjectsChanLimit = 1000 // ...when fetching to a channel -) - -// Connection holds the details of the connection to the swift server. -// -// You need to provide UserName, ApiKey and AuthUrl when you create a -// connection then call Authenticate on it. -// -// The auth version in use will be detected from the AuthURL - you can -// override this with the AuthVersion parameter. -// -// If using v2 auth you can also set Region in the Connection -// structure. If you don't set Region you will get the default region -// which may not be what you want. -// -// For reference some common AuthUrls looks like this: -// -// Rackspace US https://auth.api.rackspacecloud.com/v1.0 -// Rackspace UK https://lon.auth.api.rackspacecloud.com/v1.0 -// Rackspace v2 https://identity.api.rackspacecloud.com/v2.0 -// Memset Memstore UK https://auth.storage.memset.com/v1.0 -// Memstore v2 https://auth.storage.memset.com/v2.0 -// -// When using Google Appengine you must provide the Connection with an -// appengine-specific Transport: -// -// import ( -// "appengine/urlfetch" -// "fmt" -// "github.com/ncw/swift" -// ) -// -// func handler(w http.ResponseWriter, r *http.Request) { -// ctx := appengine.NewContext(r) -// tr := urlfetch.Transport{Context: ctx} -// c := swift.Connection{ -// UserName: "user", -// ApiKey: "key", -// AuthUrl: "auth_url", -// Transport: tr, -// } -// _ := c.Authenticate() -// containers, _ := c.ContainerNames(nil) -// fmt.Fprintf(w, "containers: %q", containers) -// } -// -// If you don't supply a Transport, one is made which relies on -// http.ProxyFromEnvironment (http://golang.org/pkg/net/http/#ProxyFromEnvironment). -// This means that the connection will respect the HTTP proxy specified by the -// environment variables $HTTP_PROXY and $NO_PROXY. -type Connection struct { - // Parameters - fill these in before calling Authenticate - // They are all optional except UserName, ApiKey and AuthUrl - Domain string // User's domain name - DomainId string // User's domain Id - UserName string // UserName for api - ApiKey string // Key for api access - AuthUrl string // Auth URL - Retries int // Retries on error (default is 3) - UserAgent string // Http User agent (default goswift/1.0) - ConnectTimeout time.Duration // Connect channel timeout (default 10s) - Timeout time.Duration // Data channel timeout (default 60s) - Region string // Region to use eg "LON", "ORD" - default is use first region (v2,v3 auth only) - AuthVersion int // Set to 1, 2 or 3 or leave at 0 for autodetect - Internal bool // Set this to true to use the the internal / service network - Tenant string // Name of the tenant (v2,v3 auth only) - TenantId string // Id of the tenant (v2,v3 auth only) - EndpointType EndpointType // Endpoint type (v2,v3 auth only) (default is public URL unless Internal is set) - TenantDomain string // Name of the tenant's domain (v3 auth only), only needed if it differs from the user domain - TenantDomainId string // Id of the tenant's domain (v3 auth only), only needed if it differs the from user domain - TrustId string // Id of the trust (v3 auth only) - Transport http.RoundTripper `json:"-" xml:"-"` // Optional specialised http.Transport (eg. for Google Appengine) - // These are filled in after Authenticate is called as are the defaults for above - StorageUrl string - AuthToken string - client *http.Client - Auth Authenticator `json:"-" xml:"-"` // the current authenticator - authLock sync.Mutex // lock when R/W StorageUrl, AuthToken, Auth -} - -// Error - all errors generated by this package are of this type. Other error -// may be passed on from library functions though. -type Error struct { - StatusCode int // HTTP status code if relevant or 0 if not - Text string -} - -// Error satisfy the error interface. -func (e *Error) Error() string { - return e.Text -} - -// newError make a new error from a string. -func newError(StatusCode int, Text string) *Error { - return &Error{ - StatusCode: StatusCode, - Text: Text, - } -} - -// newErrorf makes a new error from sprintf parameters. -func newErrorf(StatusCode int, Text string, Parameters ...interface{}) *Error { - return newError(StatusCode, fmt.Sprintf(Text, Parameters...)) -} - -// errorMap defines http error codes to error mappings. -type errorMap map[int]error - -var ( - // Specific Errors you might want to check for equality - BadRequest = newError(400, "Bad Request") - AuthorizationFailed = newError(401, "Authorization Failed") - ContainerNotFound = newError(404, "Container Not Found") - ContainerNotEmpty = newError(409, "Container Not Empty") - ObjectNotFound = newError(404, "Object Not Found") - ObjectCorrupted = newError(422, "Object Corrupted") - TimeoutError = newError(408, "Timeout when reading or writing data") - Forbidden = newError(403, "Operation forbidden") - TooLargeObject = newError(413, "Too Large Object") - - // Mappings for authentication errors - authErrorMap = errorMap{ - 400: BadRequest, - 401: AuthorizationFailed, - 403: Forbidden, - } - - // Mappings for container errors - ContainerErrorMap = errorMap{ - 400: BadRequest, - 403: Forbidden, - 404: ContainerNotFound, - 409: ContainerNotEmpty, - } - - // Mappings for object errors - objectErrorMap = errorMap{ - 400: BadRequest, - 403: Forbidden, - 404: ObjectNotFound, - 413: TooLargeObject, - 422: ObjectCorrupted, - } -) - -// checkClose is used to check the return from Close in a defer -// statement. -func checkClose(c io.Closer, err *error) { - cerr := c.Close() - if *err == nil { - *err = cerr - } -} - -// parseHeaders checks a response for errors and translates into -// standard errors if necessary. -func (c *Connection) parseHeaders(resp *http.Response, errorMap errorMap) error { - if errorMap != nil { - if err, ok := errorMap[resp.StatusCode]; ok { - return err - } - } - if resp.StatusCode < 200 || resp.StatusCode > 299 { - return newErrorf(resp.StatusCode, "HTTP Error: %d: %s", resp.StatusCode, resp.Status) - } - return nil -} - -// readHeaders returns a Headers object from the http.Response. -// -// If it receives multiple values for a key (which should never -// happen) it will use the first one -func readHeaders(resp *http.Response) Headers { - headers := Headers{} - for key, values := range resp.Header { - headers[key] = values[0] - } - return headers -} - -// Headers stores HTTP headers (can only have one of each header like Swift). -type Headers map[string]string - -// Does an http request using the running timer passed in -func (c *Connection) doTimeoutRequest(timer *time.Timer, req *http.Request) (*http.Response, error) { - // Do the request in the background so we can check the timeout - type result struct { - resp *http.Response - err error - } - done := make(chan result, 1) - go func() { - resp, err := c.client.Do(req) - done <- result{resp, err} - }() - // Wait for the read or the timeout - select { - case r := <-done: - return r.resp, r.err - case <-timer.C: - // Kill the connection on timeout so we don't leak sockets or goroutines - cancelRequest(c.Transport, req) - return nil, TimeoutError - } - panic("unreachable") // For Go 1.0 -} - -// Set defaults for any unset values -// -// Call with authLock held -func (c *Connection) setDefaults() { - if c.UserAgent == "" { - c.UserAgent = DefaultUserAgent - } - if c.Retries == 0 { - c.Retries = DefaultRetries - } - if c.ConnectTimeout == 0 { - c.ConnectTimeout = 10 * time.Second - } - if c.Timeout == 0 { - c.Timeout = 60 * time.Second - } - if c.Transport == nil { - c.Transport = &http.Transport{ - // TLSClientConfig: &tls.Config{RootCAs: pool}, - // DisableCompression: true, - Proxy: http.ProxyFromEnvironment, - MaxIdleConnsPerHost: 2048, - } - } - if c.client == nil { - c.client = &http.Client{ - // CheckRedirect: redirectPolicyFunc, - Transport: c.Transport, - } - } -} - -// Authenticate connects to the Swift server. -// -// If you don't call it before calling one of the connection methods -// then it will be called for you on the first access. -func (c *Connection) Authenticate() (err error) { - c.authLock.Lock() - defer c.authLock.Unlock() - return c.authenticate() -} - -// Internal implementation of Authenticate -// -// Call with authLock held -func (c *Connection) authenticate() (err error) { - c.setDefaults() - - // Flush the keepalives connection - if we are - // re-authenticating then stuff has gone wrong - flushKeepaliveConnections(c.Transport) - - if c.Auth == nil { - c.Auth, err = newAuth(c) - if err != nil { - return - } - } - - retries := 1 -again: - var req *http.Request - req, err = c.Auth.Request(c) - if err != nil { - return - } - if req != nil { - timer := time.NewTimer(c.ConnectTimeout) - var resp *http.Response - resp, err = c.doTimeoutRequest(timer, req) - if err != nil { - return - } - defer func() { - checkClose(resp.Body, &err) - // Flush the auth connection - we don't want to keep - // it open if keepalives were enabled - flushKeepaliveConnections(c.Transport) - }() - if err = c.parseHeaders(resp, authErrorMap); err != nil { - // Try again for a limited number of times on - // AuthorizationFailed or BadRequest. This allows us - // to try some alternate forms of the request - if (err == AuthorizationFailed || err == BadRequest) && retries > 0 { - retries-- - goto again - } - return - } - err = c.Auth.Response(resp) - if err != nil { - return - } - } - if customAuth, isCustom := c.Auth.(CustomEndpointAuthenticator); isCustom && c.EndpointType != "" { - c.StorageUrl = customAuth.StorageUrlForEndpoint(c.EndpointType) - } else { - c.StorageUrl = c.Auth.StorageUrl(c.Internal) - } - c.AuthToken = c.Auth.Token() - if !c.authenticated() { - err = newError(0, "Response didn't have storage url and auth token") - return - } - return -} - -// Get an authToken and url -// -// The Url may be updated if it needed to authenticate using the OnReAuth function -func (c *Connection) getUrlAndAuthToken(targetUrlIn string, OnReAuth func() (string, error)) (targetUrlOut, authToken string, err error) { - c.authLock.Lock() - defer c.authLock.Unlock() - targetUrlOut = targetUrlIn - if !c.authenticated() { - err = c.authenticate() - if err != nil { - return - } - if OnReAuth != nil { - targetUrlOut, err = OnReAuth() - if err != nil { - return - } - } - } - authToken = c.AuthToken - return -} - -// flushKeepaliveConnections is called to flush pending requests after an error. -func flushKeepaliveConnections(transport http.RoundTripper) { - if tr, ok := transport.(interface { - CloseIdleConnections() - }); ok { - tr.CloseIdleConnections() - } -} - -// UnAuthenticate removes the authentication from the Connection. -func (c *Connection) UnAuthenticate() { - c.authLock.Lock() - c.StorageUrl = "" - c.AuthToken = "" - c.authLock.Unlock() -} - -// Authenticated returns a boolean to show if the current connection -// is authenticated. -// -// Doesn't actually check the credentials against the server. -func (c *Connection) Authenticated() bool { - c.authLock.Lock() - defer c.authLock.Unlock() - return c.authenticated() -} - -// Internal version of Authenticated() -// -// Call with authLock held -func (c *Connection) authenticated() bool { - return c.StorageUrl != "" && c.AuthToken != "" -} - -// SwiftInfo contains the JSON object returned by Swift when the /info -// route is queried. The object contains, among others, the Swift version, -// the enabled middlewares and their configuration -type SwiftInfo map[string]interface{} - -// Discover Swift configuration by doing a request against /info -func (c *Connection) QueryInfo() (infos SwiftInfo, err error) { - infoUrl, err := url.Parse(c.StorageUrl) - if err != nil { - return nil, err - } - infoUrl.Path = path.Join(infoUrl.Path, "..", "..", "info") - resp, err := http.Get(infoUrl.String()) - if err == nil { - err = readJson(resp, &infos) - return infos, err - } - return nil, err -} - -// RequestOpts contains parameters for Connection.storage. -type RequestOpts struct { - Container string - ObjectName string - Operation string - Parameters url.Values - Headers Headers - ErrorMap errorMap - NoResponse bool - Body io.Reader - Retries int - // if set this is called on re-authentication to refresh the targetUrl - OnReAuth func() (string, error) -} - -// Call runs a remote command on the targetUrl, returns a -// response, headers and possible error. -// -// operation is GET, HEAD etc -// container is the name of a container -// Any other parameters (if not None) are added to the targetUrl -// -// Returns a response or an error. If response is returned then -// resp.Body.Close() must be called on it, unless noResponse is set in -// which case the body will be closed in this function -// -// If "Content-Length" is set in p.Headers it will be used - this can -// be used to override the default chunked transfer encoding for -// uploads. -// -// This will Authenticate if necessary, and re-authenticate if it -// receives a 401 error which means the token has expired -// -// This method is exported so extensions can call it. -func (c *Connection) Call(targetUrl string, p RequestOpts) (resp *http.Response, headers Headers, err error) { - c.authLock.Lock() - c.setDefaults() - c.authLock.Unlock() - retries := p.Retries - if retries == 0 { - retries = c.Retries - } - var req *http.Request - for { - var authToken string - if targetUrl, authToken, err = c.getUrlAndAuthToken(targetUrl, p.OnReAuth); err != nil { - return //authentication failure - } - var URL *url.URL - URL, err = url.Parse(targetUrl) - if err != nil { - return - } - if p.Container != "" { - URL.Path += "/" + p.Container - if p.ObjectName != "" { - URL.Path += "/" + p.ObjectName - } - } - if p.Parameters != nil { - URL.RawQuery = p.Parameters.Encode() - } - timer := time.NewTimer(c.ConnectTimeout) - reader := p.Body - if reader != nil { - reader = newWatchdogReader(reader, c.Timeout, timer) - } - req, err = http.NewRequest(p.Operation, URL.String(), reader) - if err != nil { - return - } - if p.Headers != nil { - for k, v := range p.Headers { - // Set ContentLength in req if the user passed it in in the headers - if k == "Content-Length" { - contentLength, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return nil, nil, fmt.Errorf("Invalid %q header %q: %v", k, v, err) - } - req.ContentLength = contentLength - } else { - req.Header.Add(k, v) - } - } - } - req.Header.Add("User-Agent", c.UserAgent) - req.Header.Add("X-Auth-Token", authToken) - resp, err = c.doTimeoutRequest(timer, req) - if err != nil { - if (p.Operation == "HEAD" || p.Operation == "GET") && retries > 0 { - retries-- - continue - } - return nil, nil, err - } - // Check to see if token has expired - if resp.StatusCode == 401 && retries > 0 { - _ = resp.Body.Close() - c.UnAuthenticate() - retries-- - } else { - break - } - } - - if err = c.parseHeaders(resp, p.ErrorMap); err != nil { - _ = resp.Body.Close() - return nil, nil, err - } - headers = readHeaders(resp) - if p.NoResponse { - err = resp.Body.Close() - if err != nil { - return nil, nil, err - } - } else { - // Cancel the request on timeout - cancel := func() { - cancelRequest(c.Transport, req) - } - // Wrap resp.Body to make it obey an idle timeout - resp.Body = newTimeoutReader(resp.Body, c.Timeout, cancel) - } - return -} - -// storage runs a remote command on a the storage url, returns a -// response, headers and possible error. -// -// operation is GET, HEAD etc -// container is the name of a container -// Any other parameters (if not None) are added to the storage url -// -// Returns a response or an error. If response is returned then -// resp.Body.Close() must be called on it, unless noResponse is set in -// which case the body will be closed in this function -// -// This will Authenticate if necessary, and re-authenticate if it -// receives a 401 error which means the token has expired -func (c *Connection) storage(p RequestOpts) (resp *http.Response, headers Headers, err error) { - p.OnReAuth = func() (string, error) { - return c.StorageUrl, nil - } - c.authLock.Lock() - url := c.StorageUrl - c.authLock.Unlock() - return c.Call(url, p) -} - -// readLines reads the response into an array of strings. -// -// Closes the response when done -func readLines(resp *http.Response) (lines []string, err error) { - defer checkClose(resp.Body, &err) - reader := bufio.NewReader(resp.Body) - buffer := bytes.NewBuffer(make([]byte, 0, 128)) - var part []byte - var prefix bool - for { - if part, prefix, err = reader.ReadLine(); err != nil { - break - } - buffer.Write(part) - if !prefix { - lines = append(lines, buffer.String()) - buffer.Reset() - } - } - if err == io.EOF { - err = nil - } - return -} - -// readJson reads the response into the json type passed in -// -// Closes the response when done -func readJson(resp *http.Response, result interface{}) (err error) { - defer checkClose(resp.Body, &err) - decoder := json.NewDecoder(resp.Body) - return decoder.Decode(result) -} - -/* ------------------------------------------------------------ */ - -// ContainersOpts is options for Containers() and ContainerNames() -type ContainersOpts struct { - Limit int // For an integer value n, limits the number of results to at most n values. - Prefix string // Given a string value x, return container names matching the specified prefix. - Marker string // Given a string value x, return container names greater in value than the specified marker. - EndMarker string // Given a string value x, return container names less in value than the specified marker. - Headers Headers // Any additional HTTP headers - can be nil -} - -// parse the ContainerOpts -func (opts *ContainersOpts) parse() (url.Values, Headers) { - v := url.Values{} - var h Headers - if opts != nil { - if opts.Limit > 0 { - v.Set("limit", strconv.Itoa(opts.Limit)) - } - if opts.Prefix != "" { - v.Set("prefix", opts.Prefix) - } - if opts.Marker != "" { - v.Set("marker", opts.Marker) - } - if opts.EndMarker != "" { - v.Set("end_marker", opts.EndMarker) - } - h = opts.Headers - } - return v, h -} - -// ContainerNames returns a slice of names of containers in this account. -func (c *Connection) ContainerNames(opts *ContainersOpts) ([]string, error) { - v, h := opts.parse() - resp, _, err := c.storage(RequestOpts{ - Operation: "GET", - Parameters: v, - ErrorMap: ContainerErrorMap, - Headers: h, - }) - if err != nil { - return nil, err - } - lines, err := readLines(resp) - return lines, err -} - -// Container contains information about a container -type Container struct { - Name string // Name of the container - Count int64 // Number of objects in the container - Bytes int64 // Total number of bytes used in the container -} - -// Containers returns a slice of structures with full information as -// described in Container. -func (c *Connection) Containers(opts *ContainersOpts) ([]Container, error) { - v, h := opts.parse() - v.Set("format", "json") - resp, _, err := c.storage(RequestOpts{ - Operation: "GET", - Parameters: v, - ErrorMap: ContainerErrorMap, - Headers: h, - }) - if err != nil { - return nil, err - } - var containers []Container - err = readJson(resp, &containers) - return containers, err -} - -// containersAllOpts makes a copy of opts if set or makes a new one and -// overrides Limit and Marker -func containersAllOpts(opts *ContainersOpts) *ContainersOpts { - var newOpts ContainersOpts - if opts != nil { - newOpts = *opts - } - if newOpts.Limit == 0 { - newOpts.Limit = allContainersLimit - } - newOpts.Marker = "" - return &newOpts -} - -// ContainersAll is like Containers but it returns all the Containers -// -// It calls Containers multiple times using the Marker parameter -// -// It has a default Limit parameter but you may pass in your own -func (c *Connection) ContainersAll(opts *ContainersOpts) ([]Container, error) { - opts = containersAllOpts(opts) - containers := make([]Container, 0) - for { - newContainers, err := c.Containers(opts) - if err != nil { - return nil, err - } - containers = append(containers, newContainers...) - if len(newContainers) < opts.Limit { - break - } - opts.Marker = newContainers[len(newContainers)-1].Name - } - return containers, nil -} - -// ContainerNamesAll is like ContainerNamess but it returns all the Containers -// -// It calls ContainerNames multiple times using the Marker parameter -// -// It has a default Limit parameter but you may pass in your own -func (c *Connection) ContainerNamesAll(opts *ContainersOpts) ([]string, error) { - opts = containersAllOpts(opts) - containers := make([]string, 0) - for { - newContainers, err := c.ContainerNames(opts) - if err != nil { - return nil, err - } - containers = append(containers, newContainers...) - if len(newContainers) < opts.Limit { - break - } - opts.Marker = newContainers[len(newContainers)-1] - } - return containers, nil -} - -/* ------------------------------------------------------------ */ - -// ObjectOpts is options for Objects() and ObjectNames() -type ObjectsOpts struct { - Limit int // For an integer value n, limits the number of results to at most n values. - Marker string // Given a string value x, return object names greater in value than the specified marker. - EndMarker string // Given a string value x, return object names less in value than the specified marker - Prefix string // For a string value x, causes the results to be limited to object names beginning with the substring x. - Path string // For a string value x, return the object names nested in the pseudo path - Delimiter rune // For a character c, return all the object names nested in the container - Headers Headers // Any additional HTTP headers - can be nil -} - -// parse reads values out of ObjectsOpts -func (opts *ObjectsOpts) parse() (url.Values, Headers) { - v := url.Values{} - var h Headers - if opts != nil { - if opts.Limit > 0 { - v.Set("limit", strconv.Itoa(opts.Limit)) - } - if opts.Marker != "" { - v.Set("marker", opts.Marker) - } - if opts.EndMarker != "" { - v.Set("end_marker", opts.EndMarker) - } - if opts.Prefix != "" { - v.Set("prefix", opts.Prefix) - } - if opts.Path != "" { - v.Set("path", opts.Path) - } - if opts.Delimiter != 0 { - v.Set("delimiter", string(opts.Delimiter)) - } - h = opts.Headers - } - return v, h -} - -// ObjectNames returns a slice of names of objects in a given container. -func (c *Connection) ObjectNames(container string, opts *ObjectsOpts) ([]string, error) { - v, h := opts.parse() - resp, _, err := c.storage(RequestOpts{ - Container: container, - Operation: "GET", - Parameters: v, - ErrorMap: ContainerErrorMap, - Headers: h, - }) - if err != nil { - return nil, err - } - return readLines(resp) -} - -// Object contains information about an object -type Object struct { - Name string `json:"name"` // object name - ContentType string `json:"content_type"` // eg application/directory - Bytes int64 `json:"bytes"` // size in bytes - ServerLastModified string `json:"last_modified"` // Last modified time, eg '2011-06-30T08:20:47.736680' as a string supplied by the server - LastModified time.Time // Last modified time converted to a time.Time - Hash string `json:"hash"` // MD5 hash, eg "d41d8cd98f00b204e9800998ecf8427e" - PseudoDirectory bool // Set when using delimiter to show that this directory object does not really exist - SubDir string `json:"subdir"` // returned only when using delimiter to mark "pseudo directories" -} - -// Objects returns a slice of Object with information about each -// object in the container. -// -// If Delimiter is set in the opts then PseudoDirectory may be set, -// with ContentType 'application/directory'. These are not real -// objects but represent directories of objects which haven't had an -// object created for them. -func (c *Connection) Objects(container string, opts *ObjectsOpts) ([]Object, error) { - v, h := opts.parse() - v.Set("format", "json") - resp, _, err := c.storage(RequestOpts{ - Container: container, - Operation: "GET", - Parameters: v, - ErrorMap: ContainerErrorMap, - Headers: h, - }) - if err != nil { - return nil, err - } - var objects []Object - err = readJson(resp, &objects) - // Convert Pseudo directories and dates - for i := range objects { - object := &objects[i] - if object.SubDir != "" { - object.Name = object.SubDir - object.PseudoDirectory = true - object.ContentType = "application/directory" - } - if object.ServerLastModified != "" { - // 2012-11-11T14:49:47.887250 - // - // Remove fractional seconds if present. This - // then keeps it consistent with Object - // which can only return timestamps accurate - // to 1 second - // - // The TimeFormat will parse fractional - // seconds if desired though - datetime := strings.SplitN(object.ServerLastModified, ".", 2)[0] - object.LastModified, err = time.Parse(TimeFormat, datetime) - if err != nil { - return nil, err - } - } - } - return objects, err -} - -// objectsAllOpts makes a copy of opts if set or makes a new one and -// overrides Limit and Marker -func objectsAllOpts(opts *ObjectsOpts, Limit int) *ObjectsOpts { - var newOpts ObjectsOpts - if opts != nil { - newOpts = *opts - } - if newOpts.Limit == 0 { - newOpts.Limit = Limit - } - newOpts.Marker = "" - return &newOpts -} - -// A closure defined by the caller to iterate through all objects -// -// Call Objects or ObjectNames from here with the *ObjectOpts passed in -// -// Do whatever is required with the results then return them -type ObjectsWalkFn func(*ObjectsOpts) (interface{}, error) - -// ObjectsWalk is uses to iterate through all the objects in chunks as -// returned by Objects or ObjectNames using the Marker and Limit -// parameters in the ObjectsOpts. -// -// Pass in a closure `walkFn` which calls Objects or ObjectNames with -// the *ObjectsOpts passed to it and does something with the results. -// -// Errors will be returned from this function -// -// It has a default Limit parameter but you may pass in your own -func (c *Connection) ObjectsWalk(container string, opts *ObjectsOpts, walkFn ObjectsWalkFn) error { - opts = objectsAllOpts(opts, allObjectsChanLimit) - for { - objects, err := walkFn(opts) - if err != nil { - return err - } - var n int - var last string - switch objects := objects.(type) { - case []string: - n = len(objects) - if n > 0 { - last = objects[len(objects)-1] - } - case []Object: - n = len(objects) - if n > 0 { - last = objects[len(objects)-1].Name - } - default: - panic("Unknown type returned to ObjectsWalk") - } - if n < opts.Limit { - break - } - opts.Marker = last - } - return nil -} - -// ObjectsAll is like Objects but it returns an unlimited number of Objects in a slice -// -// It calls Objects multiple times using the Marker parameter -func (c *Connection) ObjectsAll(container string, opts *ObjectsOpts) ([]Object, error) { - objects := make([]Object, 0) - err := c.ObjectsWalk(container, opts, func(opts *ObjectsOpts) (interface{}, error) { - newObjects, err := c.Objects(container, opts) - if err == nil { - objects = append(objects, newObjects...) - } - return newObjects, err - }) - return objects, err -} - -// ObjectNamesAll is like ObjectNames but it returns all the Objects -// -// It calls ObjectNames multiple times using the Marker parameter -// -// It has a default Limit parameter but you may pass in your own -func (c *Connection) ObjectNamesAll(container string, opts *ObjectsOpts) ([]string, error) { - objects := make([]string, 0) - err := c.ObjectsWalk(container, opts, func(opts *ObjectsOpts) (interface{}, error) { - newObjects, err := c.ObjectNames(container, opts) - if err == nil { - objects = append(objects, newObjects...) - } - return newObjects, err - }) - return objects, err -} - -// Account contains information about this account. -type Account struct { - BytesUsed int64 // total number of bytes used - Containers int64 // total number of containers - Objects int64 // total number of objects -} - -// getInt64FromHeader is a helper function to decode int64 from header. -func getInt64FromHeader(resp *http.Response, header string) (result int64, err error) { - value := resp.Header.Get(header) - result, err = strconv.ParseInt(value, 10, 64) - if err != nil { - err = newErrorf(0, "Bad Header '%s': '%s': %s", header, value, err) - } - return -} - -// Account returns info about the account in an Account struct. -func (c *Connection) Account() (info Account, headers Headers, err error) { - var resp *http.Response - resp, headers, err = c.storage(RequestOpts{ - Operation: "HEAD", - ErrorMap: ContainerErrorMap, - NoResponse: true, - }) - if err != nil { - return - } - // Parse the headers into a dict - // - // {'Accept-Ranges': 'bytes', - // 'Content-Length': '0', - // 'Date': 'Tue, 05 Jul 2011 16:37:06 GMT', - // 'X-Account-Bytes-Used': '316598182', - // 'X-Account-Container-Count': '4', - // 'X-Account-Object-Count': '1433'} - if info.BytesUsed, err = getInt64FromHeader(resp, "X-Account-Bytes-Used"); err != nil { - return - } - if info.Containers, err = getInt64FromHeader(resp, "X-Account-Container-Count"); err != nil { - return - } - if info.Objects, err = getInt64FromHeader(resp, "X-Account-Object-Count"); err != nil { - return - } - return -} - -// AccountUpdate adds, replaces or remove account metadata. -// -// Add or update keys by mentioning them in the Headers. -// -// Remove keys by setting them to an empty string. -func (c *Connection) AccountUpdate(h Headers) error { - _, _, err := c.storage(RequestOpts{ - Operation: "POST", - ErrorMap: ContainerErrorMap, - NoResponse: true, - Headers: h, - }) - return err -} - -// ContainerCreate creates a container. -// -// If you don't want to add Headers just pass in nil -// -// No error is returned if it already exists but the metadata if any will be updated. -func (c *Connection) ContainerCreate(container string, h Headers) error { - _, _, err := c.storage(RequestOpts{ - Container: container, - Operation: "PUT", - ErrorMap: ContainerErrorMap, - NoResponse: true, - Headers: h, - }) - return err -} - -// ContainerDelete deletes a container. -// -// May return ContainerDoesNotExist or ContainerNotEmpty -func (c *Connection) ContainerDelete(container string) error { - _, _, err := c.storage(RequestOpts{ - Container: container, - Operation: "DELETE", - ErrorMap: ContainerErrorMap, - NoResponse: true, - }) - return err -} - -// Container returns info about a single container including any -// metadata in the headers. -func (c *Connection) Container(container string) (info Container, headers Headers, err error) { - var resp *http.Response - resp, headers, err = c.storage(RequestOpts{ - Container: container, - Operation: "HEAD", - ErrorMap: ContainerErrorMap, - NoResponse: true, - }) - if err != nil { - return - } - // Parse the headers into the struct - info.Name = container - if info.Bytes, err = getInt64FromHeader(resp, "X-Container-Bytes-Used"); err != nil { - return - } - if info.Count, err = getInt64FromHeader(resp, "X-Container-Object-Count"); err != nil { - return - } - return -} - -// ContainerUpdate adds, replaces or removes container metadata. -// -// Add or update keys by mentioning them in the Metadata. -// -// Remove keys by setting them to an empty string. -// -// Container metadata can only be read with Container() not with Containers(). -func (c *Connection) ContainerUpdate(container string, h Headers) error { - _, _, err := c.storage(RequestOpts{ - Container: container, - Operation: "POST", - ErrorMap: ContainerErrorMap, - NoResponse: true, - Headers: h, - }) - return err -} - -// ------------------------------------------------------------ - -// ObjectCreateFile represents a swift object open for writing -type ObjectCreateFile struct { - checkHash bool // whether we are checking the hash - pipeReader *io.PipeReader // pipe for the caller to use - pipeWriter *io.PipeWriter - hash hash.Hash // hash being build up as we go along - done chan struct{} // signals when the upload has finished - resp *http.Response // valid when done has signalled - err error // ditto - headers Headers // ditto -} - -// Write bytes to the object - see io.Writer -func (file *ObjectCreateFile) Write(p []byte) (n int, err error) { - n, err = file.pipeWriter.Write(p) - if err == io.ErrClosedPipe { - if file.err != nil { - return 0, file.err - } - return 0, newError(500, "Write on closed file") - } - if err == nil && file.checkHash { - _, _ = file.hash.Write(p) - } - return -} - -// Close the object and checks the md5sum if it was required. -// -// Also returns any other errors from the server (eg container not -// found) so it is very important to check the errors on this method. -func (file *ObjectCreateFile) Close() error { - // Close the body - err := file.pipeWriter.Close() - if err != nil { - return err - } - - // Wait for the HTTP operation to complete - <-file.done - - // Check errors - if file.err != nil { - return file.err - } - if file.checkHash { - receivedMd5 := strings.ToLower(file.headers["Etag"]) - calculatedMd5 := fmt.Sprintf("%x", file.hash.Sum(nil)) - if receivedMd5 != calculatedMd5 { - return ObjectCorrupted - } - } - return nil -} - -// Check it satisfies the interface -var _ io.WriteCloser = &ObjectCreateFile{} - -// objectPutHeaders create a set of headers for a PUT -// -// It guesses the contentType from the objectName if it isn't set -// -// checkHash may be changed -func objectPutHeaders(objectName string, checkHash *bool, Hash string, contentType string, h Headers) Headers { - if contentType == "" { - contentType = mime.TypeByExtension(path.Ext(objectName)) - if contentType == "" { - contentType = "application/octet-stream" - } - } - // Meta stuff - extraHeaders := map[string]string{ - "Content-Type": contentType, - } - for key, value := range h { - extraHeaders[key] = value - } - if Hash != "" { - extraHeaders["Etag"] = Hash - *checkHash = false // the server will do it - } - return extraHeaders -} - -// ObjectCreate creates or updates the object in the container. It -// returns an io.WriteCloser you should write the contents to. You -// MUST call Close() on it and you MUST check the error return from -// Close(). -// -// If checkHash is True then it will calculate the MD5 Hash of the -// file as it is being uploaded and check it against that returned -// from the server. If it is wrong then it will return -// ObjectCorrupted on Close() -// -// If you know the MD5 hash of the object ahead of time then set the -// Hash parameter and it will be sent to the server (as an Etag -// header) and the server will check the MD5 itself after the upload, -// and this will return ObjectCorrupted on Close() if it is incorrect. -// -// If you don't want any error protection (not recommended) then set -// checkHash to false and Hash to "". -// -// If contentType is set it will be used, otherwise one will be -// guessed from objectName using mime.TypeByExtension -func (c *Connection) ObjectCreate(container string, objectName string, checkHash bool, Hash string, contentType string, h Headers) (file *ObjectCreateFile, err error) { - extraHeaders := objectPutHeaders(objectName, &checkHash, Hash, contentType, h) - pipeReader, pipeWriter := io.Pipe() - file = &ObjectCreateFile{ - hash: md5.New(), - checkHash: checkHash, - pipeReader: pipeReader, - pipeWriter: pipeWriter, - done: make(chan struct{}), - } - // Run the PUT in the background piping it data - go func() { - file.resp, file.headers, file.err = c.storage(RequestOpts{ - Container: container, - ObjectName: objectName, - Operation: "PUT", - Headers: extraHeaders, - Body: pipeReader, - NoResponse: true, - ErrorMap: objectErrorMap, - }) - // Signal finished - pipeReader.Close() - close(file.done) - }() - return -} - -// ObjectPut creates or updates the path in the container from -// contents. contents should be an open io.Reader which will have all -// its contents read. -// -// This is a low level interface. -// -// If checkHash is True then it will calculate the MD5 Hash of the -// file as it is being uploaded and check it against that returned -// from the server. If it is wrong then it will return -// ObjectCorrupted. -// -// If you know the MD5 hash of the object ahead of time then set the -// Hash parameter and it will be sent to the server (as an Etag -// header) and the server will check the MD5 itself after the upload, -// and this will return ObjectCorrupted if it is incorrect. -// -// If you don't want any error protection (not recommended) then set -// checkHash to false and Hash to "". -// -// If contentType is set it will be used, otherwise one will be -// guessed from objectName using mime.TypeByExtension -func (c *Connection) ObjectPut(container string, objectName string, contents io.Reader, checkHash bool, Hash string, contentType string, h Headers) (headers Headers, err error) { - extraHeaders := objectPutHeaders(objectName, &checkHash, Hash, contentType, h) - hash := md5.New() - var body io.Reader = contents - if checkHash { - body = io.TeeReader(contents, hash) - } - _, headers, err = c.storage(RequestOpts{ - Container: container, - ObjectName: objectName, - Operation: "PUT", - Headers: extraHeaders, - Body: body, - NoResponse: true, - ErrorMap: objectErrorMap, - }) - if err != nil { - return - } - if checkHash { - receivedMd5 := strings.ToLower(headers["Etag"]) - calculatedMd5 := fmt.Sprintf("%x", hash.Sum(nil)) - if receivedMd5 != calculatedMd5 { - err = ObjectCorrupted - return - } - } - return -} - -// ObjectPutBytes creates an object from a []byte in a container. -// -// This is a simplified interface which checks the MD5. -func (c *Connection) ObjectPutBytes(container string, objectName string, contents []byte, contentType string) (err error) { - buf := bytes.NewBuffer(contents) - _, err = c.ObjectPut(container, objectName, buf, true, "", contentType, nil) - return -} - -// ObjectPutString creates an object from a string in a container. -// -// This is a simplified interface which checks the MD5 -func (c *Connection) ObjectPutString(container string, objectName string, contents string, contentType string) (err error) { - buf := strings.NewReader(contents) - _, err = c.ObjectPut(container, objectName, buf, true, "", contentType, nil) - return -} - -// ObjectOpenFile represents a swift object open for reading -type ObjectOpenFile struct { - connection *Connection // stored copy of Connection used in Open - container string // stored copy of container used in Open - objectName string // stored copy of objectName used in Open - headers Headers // stored copy of headers used in Open - resp *http.Response // http connection - body io.Reader // read data from this - checkHash bool // true if checking MD5 - hash hash.Hash // currently accumulating MD5 - bytes int64 // number of bytes read on this connection - eof bool // whether we have read end of file - pos int64 // current position when reading - lengthOk bool // whether length is valid - length int64 // length of the object if read - seeked bool // whether we have seeked this file or not -} - -// Read bytes from the object - see io.Reader -func (file *ObjectOpenFile) Read(p []byte) (n int, err error) { - n, err = file.body.Read(p) - file.bytes += int64(n) - file.pos += int64(n) - if err == io.EOF { - file.eof = true - } - return -} - -// Seek sets the offset for the next Read to offset, interpreted -// according to whence: 0 means relative to the origin of the file, 1 -// means relative to the current offset, and 2 means relative to the -// end. Seek returns the new offset and an Error, if any. -// -// Seek uses HTTP Range headers which, if the file pointer is moved, -// will involve reopening the HTTP connection. -// -// Note that you can't seek to the end of a file or beyond; HTTP Range -// requests don't support the file pointer being outside the data, -// unlike os.File -// -// Seek(0, 1) will return the current file pointer. -func (file *ObjectOpenFile) Seek(offset int64, whence int) (newPos int64, err error) { - switch whence { - case 0: // relative to start - newPos = offset - case 1: // relative to current - newPos = file.pos + offset - case 2: // relative to end - if !file.lengthOk { - return file.pos, newError(0, "Length of file unknown so can't seek from end") - } - newPos = file.length + offset - default: - panic("Unknown whence in ObjectOpenFile.Seek") - } - // If at correct position (quite likely), do nothing - if newPos == file.pos { - return - } - // Close the file... - file.seeked = true - err = file.Close() - if err != nil { - return - } - // ...and re-open with a Range header - if file.headers == nil { - file.headers = Headers{} - } - if newPos > 0 { - file.headers["Range"] = fmt.Sprintf("bytes=%d-", newPos) - } else { - delete(file.headers, "Range") - } - newFile, _, err := file.connection.ObjectOpen(file.container, file.objectName, false, file.headers) - if err != nil { - return - } - // Update the file - file.resp = newFile.resp - file.body = newFile.body - file.checkHash = false - file.pos = newPos - return -} - -// Length gets the objects content length either from a cached copy or -// from the server. -func (file *ObjectOpenFile) Length() (int64, error) { - if !file.lengthOk { - info, _, err := file.connection.Object(file.container, file.objectName) - file.length = info.Bytes - file.lengthOk = (err == nil) - return file.length, err - } - return file.length, nil -} - -// Close the object and checks the length and md5sum if it was -// required and all the object was read -func (file *ObjectOpenFile) Close() (err error) { - // Close the body at the end - defer checkClose(file.resp.Body, &err) - - // If not end of file or seeked then can't check anything - if !file.eof || file.seeked { - return - } - - // Check the MD5 sum if requested - if file.checkHash { - receivedMd5 := strings.ToLower(file.resp.Header.Get("Etag")) - calculatedMd5 := fmt.Sprintf("%x", file.hash.Sum(nil)) - if receivedMd5 != calculatedMd5 { - err = ObjectCorrupted - return - } - } - - // Check to see we read the correct number of bytes - if file.lengthOk && file.length != file.bytes { - err = ObjectCorrupted - return - } - return -} - -// Check it satisfies the interfaces -var _ io.ReadCloser = &ObjectOpenFile{} -var _ io.Seeker = &ObjectOpenFile{} - -// ObjectOpen returns an ObjectOpenFile for reading the contents of -// the object. This satisfies the io.ReadCloser and the io.Seeker -// interfaces. -// -// You must call Close() on contents when finished -// -// Returns the headers of the response. -// -// If checkHash is true then it will calculate the md5sum of the file -// as it is being received and check it against that returned from the -// server. If it is wrong then it will return ObjectCorrupted. It -// will also check the length returned. No checking will be done if -// you don't read all the contents. -// -// Note that objects with X-Object-Manifest or X-Static-Large-Object -// set won't ever have their md5sum's checked as the md5sum reported -// on the object is actually the md5sum of the md5sums of the -// parts. This isn't very helpful to detect a corrupted download as -// the size of the parts aren't known without doing more operations. -// If you want to ensure integrity of an object with a manifest then -// you will need to download everything in the manifest separately. -// -// headers["Content-Type"] will give the content type if desired. -func (c *Connection) ObjectOpen(container string, objectName string, checkHash bool, h Headers) (file *ObjectOpenFile, headers Headers, err error) { - var resp *http.Response - resp, headers, err = c.storage(RequestOpts{ - Container: container, - ObjectName: objectName, - Operation: "GET", - ErrorMap: objectErrorMap, - Headers: h, - }) - if err != nil { - return - } - // Can't check MD5 on an object with X-Object-Manifest or X-Static-Large-Object set - if checkHash && (headers["X-Object-Manifest"] != "" || headers["X-Static-Large-Object"] != "") { - // log.Printf("swift: turning off md5 checking on object with manifest %v", objectName) - checkHash = false - } - file = &ObjectOpenFile{ - connection: c, - container: container, - objectName: objectName, - headers: h, - resp: resp, - checkHash: checkHash, - body: resp.Body, - } - if checkHash { - file.hash = md5.New() - file.body = io.TeeReader(resp.Body, file.hash) - } - // Read Content-Length - if resp.Header.Get("Content-Length") != "" { - file.length, err = getInt64FromHeader(resp, "Content-Length") - file.lengthOk = (err == nil) - } - return -} - -// ObjectGet gets the object into the io.Writer contents. -// -// Returns the headers of the response. -// -// If checkHash is true then it will calculate the md5sum of the file -// as it is being received and check it against that returned from the -// server. If it is wrong then it will return ObjectCorrupted. -// -// headers["Content-Type"] will give the content type if desired. -func (c *Connection) ObjectGet(container string, objectName string, contents io.Writer, checkHash bool, h Headers) (headers Headers, err error) { - file, headers, err := c.ObjectOpen(container, objectName, checkHash, h) - if err != nil { - return - } - defer checkClose(file, &err) - _, err = io.Copy(contents, file) - return -} - -// ObjectGetBytes returns an object as a []byte. -// -// This is a simplified interface which checks the MD5 -func (c *Connection) ObjectGetBytes(container string, objectName string) (contents []byte, err error) { - var buf bytes.Buffer - _, err = c.ObjectGet(container, objectName, &buf, true, nil) - contents = buf.Bytes() - return -} - -// ObjectGetString returns an object as a string. -// -// This is a simplified interface which checks the MD5 -func (c *Connection) ObjectGetString(container string, objectName string) (contents string, err error) { - var buf bytes.Buffer - _, err = c.ObjectGet(container, objectName, &buf, true, nil) - contents = buf.String() - return -} - -// ObjectDelete deletes the object. -// -// May return ObjectNotFound if the object isn't found -func (c *Connection) ObjectDelete(container string, objectName string) error { - _, _, err := c.storage(RequestOpts{ - Container: container, - ObjectName: objectName, - Operation: "DELETE", - ErrorMap: objectErrorMap, - }) - return err -} - -// ObjectTempUrl returns a temporary URL for an object -func (c *Connection) ObjectTempUrl(container string, objectName string, secretKey string, method string, expires time.Time) string { - mac := hmac.New(sha1.New, []byte(secretKey)) - prefix, _ := url.Parse(c.StorageUrl) - body := fmt.Sprintf("%s\n%d\n%s/%s/%s", method, expires.Unix(), prefix.Path, container, objectName) - mac.Write([]byte(body)) - sig := hex.EncodeToString(mac.Sum(nil)) - return fmt.Sprintf("%s/%s/%s?temp_url_sig=%s&temp_url_expires=%d", c.StorageUrl, container, objectName, sig, expires.Unix()) -} - -// parseResponseStatus parses string like "200 OK" and returns Error. -// -// For status codes beween 200 and 299, this returns nil. -func parseResponseStatus(resp string, errorMap errorMap) error { - code := 0 - reason := resp - t := strings.SplitN(resp, " ", 2) - if len(t) == 2 { - ncode, err := strconv.Atoi(t[0]) - if err == nil { - code = ncode - reason = t[1] - } - } - if errorMap != nil { - if err, ok := errorMap[code]; ok { - return err - } - } - if 200 <= code && code <= 299 { - return nil - } - return newError(code, reason) -} - -// BulkDeleteResult stores results of BulkDelete(). -// -// Individual errors may (or may not) be returned by Errors. -// Errors is a map whose keys are a full path of where the object was -// to be deleted, and whose values are Error objects. A full path of -// object looks like "/API_VERSION/USER_ACCOUNT/CONTAINER/OBJECT_PATH". -type BulkDeleteResult struct { - NumberNotFound int64 // # of objects not found. - NumberDeleted int64 // # of deleted objects. - Errors map[string]error // Mapping between object name and an error. - Headers Headers // Response HTTP headers. -} - -// BulkDelete deletes multiple objectNames from container in one operation. -// -// Some servers may not accept bulk-delete requests since bulk-delete is -// an optional feature of swift - these will return the Forbidden error. -// -// See also: -// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-bulk-delete.html -// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Bulk_Delete-d1e2338.html -func (c *Connection) BulkDelete(container string, objectNames []string) (result BulkDeleteResult, err error) { - var buffer bytes.Buffer - for _, s := range objectNames { - buffer.WriteString(fmt.Sprintf("/%s/%s\n", container, - url.QueryEscape(s))) - } - resp, headers, err := c.storage(RequestOpts{ - Operation: "DELETE", - Parameters: url.Values{"bulk-delete": []string{"1"}}, - Headers: Headers{ - "Accept": "application/json", - "Content-Type": "text/plain", - }, - ErrorMap: ContainerErrorMap, - Body: &buffer, - }) - if err != nil { - return - } - var jsonResult struct { - NotFound int64 `json:"Number Not Found"` - Status string `json:"Response Status"` - Errors [][]string - Deleted int64 `json:"Number Deleted"` - } - err = readJson(resp, &jsonResult) - if err != nil { - return - } - - err = parseResponseStatus(jsonResult.Status, objectErrorMap) - result.NumberNotFound = jsonResult.NotFound - result.NumberDeleted = jsonResult.Deleted - result.Headers = headers - el := make(map[string]error, len(jsonResult.Errors)) - for _, t := range jsonResult.Errors { - if len(t) != 2 { - continue - } - el[t[0]] = parseResponseStatus(t[1], objectErrorMap) - } - result.Errors = el - return -} - -// BulkUploadResult stores results of BulkUpload(). -// -// Individual errors may (or may not) be returned by Errors. -// Errors is a map whose keys are a full path of where an object was -// to be created, and whose values are Error objects. A full path of -// object looks like "/API_VERSION/USER_ACCOUNT/CONTAINER/OBJECT_PATH". -type BulkUploadResult struct { - NumberCreated int64 // # of created objects. - Errors map[string]error // Mapping between object name and an error. - Headers Headers // Response HTTP headers. -} - -// BulkUpload uploads multiple files in one operation. -// -// uploadPath can be empty, a container name, or a pseudo-directory -// within a container. If uploadPath is empty, new containers may be -// automatically created. -// -// Files are read from dataStream. The format of the stream is specified -// by the format parameter. Available formats are: -// * UploadTar - Plain tar stream. -// * UploadTarGzip - Gzip compressed tar stream. -// * UploadTarBzip2 - Bzip2 compressed tar stream. -// -// Some servers may not accept bulk-upload requests since bulk-upload is -// an optional feature of swift - these will return the Forbidden error. -// -// See also: -// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-extract-archive.html -// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Extract_Archive-d1e2338.html -func (c *Connection) BulkUpload(uploadPath string, dataStream io.Reader, format string, h Headers) (result BulkUploadResult, err error) { - extraHeaders := Headers{"Accept": "application/json"} - for key, value := range h { - extraHeaders[key] = value - } - // The following code abuses Container parameter intentionally. - // The best fix might be to rename Container to UploadPath. - resp, headers, err := c.storage(RequestOpts{ - Container: uploadPath, - Operation: "PUT", - Parameters: url.Values{"extract-archive": []string{format}}, - Headers: extraHeaders, - ErrorMap: ContainerErrorMap, - Body: dataStream, - }) - if err != nil { - return - } - // Detect old servers which don't support this feature - if headers["Content-Type"] != "application/json" { - err = Forbidden - return - } - var jsonResult struct { - Created int64 `json:"Number Files Created"` - Status string `json:"Response Status"` - Errors [][]string - } - err = readJson(resp, &jsonResult) - if err != nil { - return - } - - err = parseResponseStatus(jsonResult.Status, objectErrorMap) - result.NumberCreated = jsonResult.Created - result.Headers = headers - el := make(map[string]error, len(jsonResult.Errors)) - for _, t := range jsonResult.Errors { - if len(t) != 2 { - continue - } - el[t[0]] = parseResponseStatus(t[1], objectErrorMap) - } - result.Errors = el - return -} - -// Object returns info about a single object including any metadata in the header. -// -// May return ObjectNotFound. -// -// Use headers.ObjectMetadata() to read the metadata in the Headers. -func (c *Connection) Object(container string, objectName string) (info Object, headers Headers, err error) { - var resp *http.Response - resp, headers, err = c.storage(RequestOpts{ - Container: container, - ObjectName: objectName, - Operation: "HEAD", - ErrorMap: objectErrorMap, - NoResponse: true, - }) - if err != nil { - return - } - // Parse the headers into the struct - // HTTP/1.1 200 OK - // Date: Thu, 07 Jun 2010 20:59:39 GMT - // Server: Apache - // Last-Modified: Fri, 12 Jun 2010 13:40:18 GMT - // ETag: 8a964ee2a5e88be344f36c22562a6486 - // Content-Length: 512000 - // Content-Type: text/plain; charset=UTF-8 - // X-Object-Meta-Meat: Bacon - // X-Object-Meta-Fruit: Bacon - // X-Object-Meta-Veggie: Bacon - // X-Object-Meta-Dairy: Bacon - info.Name = objectName - info.ContentType = resp.Header.Get("Content-Type") - if resp.Header.Get("Content-Length") != "" { - if info.Bytes, err = getInt64FromHeader(resp, "Content-Length"); err != nil { - return - } - } - //HACK - //Currently ceph doestn't return Last-Modified header for DLO manifest without any segments - //Currently it affects all versions of ceph http://tracker.ceph.com/issues/15812 - if resp.Header.Get("Last-Modified") != "" { - info.ServerLastModified = resp.Header.Get("Last-Modified") - if info.LastModified, err = time.Parse(http.TimeFormat, info.ServerLastModified); err != nil { - return - } - } - info.Hash = resp.Header.Get("Etag") - return -} - -// ObjectUpdate adds, replaces or removes object metadata. -// -// Add or Update keys by mentioning them in the Metadata. Use -// Metadata.ObjectHeaders and Headers.ObjectMetadata to convert your -// Metadata to and from normal HTTP headers. -// -// This removes all metadata previously added to the object and -// replaces it with that passed in so to delete keys, just don't -// mention them the headers you pass in. -// -// Object metadata can only be read with Object() not with Objects(). -// -// This can also be used to set headers not already assigned such as -// X-Delete-At or X-Delete-After for expiring objects. -// -// You cannot use this to change any of the object's other headers -// such as Content-Type, ETag, etc. -// -// Refer to copying an object when you need to update metadata or -// other headers such as Content-Type or CORS headers. -// -// May return ObjectNotFound. -func (c *Connection) ObjectUpdate(container string, objectName string, h Headers) error { - _, _, err := c.storage(RequestOpts{ - Container: container, - ObjectName: objectName, - Operation: "POST", - ErrorMap: objectErrorMap, - NoResponse: true, - Headers: h, - }) - return err -} - -// ObjectCopy does a server side copy of an object to a new position -// -// All metadata is preserved. If metadata is set in the headers then -// it overrides the old metadata on the copied object. -// -// The destination container must exist before the copy. -// -// You can use this to copy an object to itself - this is the only way -// to update the content type of an object. -func (c *Connection) ObjectCopy(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string, h Headers) (headers Headers, err error) { - // Meta stuff - extraHeaders := map[string]string{ - "Destination": dstContainer + "/" + dstObjectName, - } - for key, value := range h { - extraHeaders[key] = value - } - _, headers, err = c.storage(RequestOpts{ - Container: srcContainer, - ObjectName: srcObjectName, - Operation: "COPY", - ErrorMap: objectErrorMap, - NoResponse: true, - Headers: extraHeaders, - }) - return -} - -// ObjectMove does a server side move of an object to a new position -// -// This is a convenience method which calls ObjectCopy then ObjectDelete -// -// All metadata is preserved. -// -// The destination container must exist before the copy. -func (c *Connection) ObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) (err error) { - _, err = c.ObjectCopy(srcContainer, srcObjectName, dstContainer, dstObjectName, nil) - if err != nil { - return - } - return c.ObjectDelete(srcContainer, srcObjectName) -} - -// ObjectUpdateContentType updates the content type of an object -// -// This is a convenience method which calls ObjectCopy -// -// All other metadata is preserved. -func (c *Connection) ObjectUpdateContentType(container string, objectName string, contentType string) (err error) { - h := Headers{"Content-Type": contentType} - _, err = c.ObjectCopy(container, objectName, container, objectName, h) - return -} - -// ------------------------------------------------------------ - -// VersionContainerCreate is a helper method for creating and enabling version controlled containers. -// -// It builds the current object container, the non-current object version container, and enables versioning. -// -// If the server doesn't support versioning then it will return -// Forbidden however it will have created both the containers at that point. -func (c *Connection) VersionContainerCreate(current, version string) error { - if err := c.ContainerCreate(version, nil); err != nil { - return err - } - if err := c.ContainerCreate(current, nil); err != nil { - return err - } - if err := c.VersionEnable(current, version); err != nil { - return err - } - return nil -} - -// VersionEnable enables versioning on the current container with version as the tracking container. -// -// May return Forbidden if this isn't supported by the server -func (c *Connection) VersionEnable(current, version string) error { - h := Headers{"X-Versions-Location": version} - if err := c.ContainerUpdate(current, h); err != nil { - return err - } - // Check to see if the header was set properly - _, headers, err := c.Container(current) - if err != nil { - return err - } - // If failed to set versions header, return Forbidden as the server doesn't support this - if headers["X-Versions-Location"] != version { - return Forbidden - } - return nil -} - -// VersionDisable disables versioning on the current container. -func (c *Connection) VersionDisable(current string) error { - h := Headers{"X-Versions-Location": ""} - if err := c.ContainerUpdate(current, h); err != nil { - return err - } - return nil -} - -// VersionObjectList returns a list of older versions of the object. -// -// Objects are returned in the format / -func (c *Connection) VersionObjectList(version, object string) ([]string, error) { - opts := &ObjectsOpts{ - // <3-character zero-padded hexadecimal character length>/ - Prefix: fmt.Sprintf("%03x", len(object)) + object + "/", - } - return c.ObjectNames(version, opts) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/swifttest/server.go b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/swifttest/server.go deleted file mode 100644 index a49abb9a..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/swifttest/server.go +++ /dev/null @@ -1,941 +0,0 @@ -// This implements a very basic Swift server -// Everything is stored in memory -// -// This comes from the https://github.com/mitchellh/goamz -// and was adapted for Swift -// -package swifttest - -import ( - "bytes" - "crypto/hmac" - "crypto/md5" - "crypto/rand" - "crypto/sha1" - "encoding/hex" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "mime" - "net" - "net/http" - "net/url" - "path" - "regexp" - "sort" - "strconv" - "strings" - "sync" - "testing" - "time" - - "github.com/ncw/swift" -) - -const ( - DEBUG = false - TEST_ACCOUNT = "swifttest" -) - -type SwiftServer struct { - t *testing.T - reqId int - mu sync.Mutex - Listener net.Listener - AuthURL string - URL string - Accounts map[string]*account - Sessions map[string]*session -} - -// The Folder type represents a container stored in an account -type Folder struct { - Count int `json:"count"` - Bytes int `json:"bytes"` - Name string `json:"name"` -} - -// The Key type represents an item stored in an container. -type Key struct { - Key string `json:"name"` - LastModified string `json:"last_modified"` - Size int64 `json:"bytes"` - // ETag gives the hex-encoded MD5 sum of the contents, - // surrounded with double-quotes. - ETag string `json:"hash"` - ContentType string `json:"content_type"` - // Owner Owner -} - -type Subdir struct { - Subdir string `json:"subdir"` -} - -type swiftError struct { - statusCode int - Code string - Message string -} - -type action struct { - srv *SwiftServer - w http.ResponseWriter - req *http.Request - reqId string - user *account -} - -type session struct { - username string -} - -type metadata struct { - meta http.Header // metadata to return with requests. -} - -type account struct { - swift.Account - metadata - password string - Containers map[string]*container -} - -type object struct { - metadata - name string - mtime time.Time - checksum []byte // also held as ETag in meta. - data []byte - content_type string -} - -type container struct { - metadata - name string - ctime time.Time - objects map[string]*object - bytes int -} - -// A resource encapsulates the subject of an HTTP request. -// The resource referred to may or may not exist -// when the request is made. -type resource interface { - put(a *action) interface{} - get(a *action) interface{} - post(a *action) interface{} - delete(a *action) interface{} - copy(a *action) interface{} -} - -type objectResource struct { - name string - version string - container *container // always non-nil. - object *object // may be nil. -} - -type containerResource struct { - name string - container *container // non-nil if the container already exists. -} - -var responseParams = map[string]bool{ - "content-type": true, - "content-language": true, - "expires": true, - "cache-control": true, - "content-disposition": true, - "content-encoding": true, -} - -func fatalf(code int, codeStr string, errf string, a ...interface{}) { - panic(&swiftError{ - statusCode: code, - Code: codeStr, - Message: fmt.Sprintf(errf, a...), - }) -} - -func (m metadata) setMetadata(a *action, resource string) { - for key, values := range a.req.Header { - key = http.CanonicalHeaderKey(key) - if metaHeaders[key] || strings.HasPrefix(key, "X-"+strings.Title(resource)+"-Meta-") { - if values[0] != "" || resource == "object" { - m.meta[key] = values - } else { - m.meta.Del(key) - } - } - } -} - -func (m metadata) getMetadata(a *action) { - h := a.w.Header() - for name, d := range m.meta { - h[name] = d - } -} - -func (c container) list(delimiter string, marker string, prefix string, parent string) (resp []interface{}) { - var tmp orderedObjects - - // first get all matching objects and arrange them in alphabetical order. - for _, obj := range c.objects { - if strings.HasPrefix(obj.name, prefix) { - tmp = append(tmp, obj) - } - } - sort.Sort(tmp) - - var prefixes []string - for _, obj := range tmp { - if !strings.HasPrefix(obj.name, prefix) { - continue - } - - isPrefix := false - name := obj.name - if parent != "" { - if path.Dir(obj.name) != path.Clean(parent) { - continue - } - } else if delimiter != "" { - if i := strings.Index(obj.name[len(prefix):], delimiter); i >= 0 { - name = obj.name[:len(prefix)+i+len(delimiter)] - if prefixes != nil && prefixes[len(prefixes)-1] == name { - continue - } - isPrefix = true - } - } - - if name <= marker { - continue - } - - if isPrefix { - prefixes = append(prefixes, name) - - resp = append(resp, Subdir{ - Subdir: name, - }) - } else { - resp = append(resp, obj) - } - } - - return -} - -// GET on a container lists the objects in the container. -func (r containerResource) get(a *action) interface{} { - if r.container == nil { - fatalf(404, "NoSuchContainer", "The specified container does not exist") - } - - delimiter := a.req.Form.Get("delimiter") - marker := a.req.Form.Get("marker") - prefix := a.req.Form.Get("prefix") - format := a.req.URL.Query().Get("format") - parent := a.req.Form.Get("path") - - a.w.Header().Set("X-Container-Bytes-Used", strconv.Itoa(r.container.bytes)) - a.w.Header().Set("X-Container-Object-Count", strconv.Itoa(len(r.container.objects))) - r.container.getMetadata(a) - - if a.req.Method == "HEAD" { - return nil - } - - objects := r.container.list(delimiter, marker, prefix, parent) - - if format == "json" { - a.w.Header().Set("Content-Type", "application/json") - var resp []interface{} - for _, item := range objects { - if obj, ok := item.(*object); ok { - resp = append(resp, obj.Key()) - } else { - resp = append(resp, item) - } - } - return resp - } else { - for _, item := range objects { - if obj, ok := item.(*object); ok { - a.w.Write([]byte(obj.name + "\n")) - } else if subdir, ok := item.(Subdir); ok { - a.w.Write([]byte(subdir.Subdir + "\n")) - } - } - return nil - } -} - -// orderedContainers holds a slice of containers that can be sorted -// by name. -type orderedContainers []*container - -func (s orderedContainers) Len() int { - return len(s) -} -func (s orderedContainers) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s orderedContainers) Less(i, j int) bool { - return s[i].name < s[j].name -} - -func (r containerResource) delete(a *action) interface{} { - b := r.container - if b == nil { - fatalf(404, "NoSuchContainer", "The specified container does not exist") - } - if len(b.objects) > 0 { - fatalf(409, "Conflict", "The container you tried to delete is not empty") - } - delete(a.user.Containers, b.name) - a.user.Account.Containers-- - return nil -} - -func (r containerResource) put(a *action) interface{} { - if a.req.URL.Query().Get("extract-archive") != "" { - fatalf(403, "Operation forbidden", "Bulk upload is not supported") - } - - if r.container == nil { - if !validContainerName(r.name) { - fatalf(400, "InvalidContainerName", "The specified container is not valid") - } - r.container = &container{ - name: r.name, - objects: make(map[string]*object), - metadata: metadata{ - meta: make(http.Header), - }, - } - r.container.setMetadata(a, "container") - a.user.Containers[r.name] = r.container - a.user.Account.Containers++ - } - - return nil -} - -func (r containerResource) post(a *action) interface{} { - if r.container == nil { - fatalf(400, "Method", "The resource could not be found.") - } else { - r.container.setMetadata(a, "container") - a.w.WriteHeader(201) - jsonMarshal(a.w, Folder{ - Count: len(r.container.objects), - Bytes: r.container.bytes, - Name: r.container.name, - }) - } - return nil -} - -func (containerResource) copy(a *action) interface{} { return notAllowed() } - -// validContainerName returns whether name is a valid bucket name. -// Here are the rules, from: -// http://docs.openstack.org/api/openstack-object-storage/1.0/content/ch_object-storage-dev-api-storage.html -// -// Container names cannot exceed 256 bytes and cannot contain the / character. -// -func validContainerName(name string) bool { - if len(name) == 0 || len(name) > 256 { - return false - } - for _, r := range name { - switch { - case r == '/': - return false - default: - } - } - return true -} - -// orderedObjects holds a slice of objects that can be sorted -// by name. -type orderedObjects []*object - -func (s orderedObjects) Len() int { - return len(s) -} -func (s orderedObjects) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s orderedObjects) Less(i, j int) bool { - return s[i].name < s[j].name -} - -func (obj *object) Key() Key { - return Key{ - Key: obj.name, - LastModified: obj.mtime.Format("2006-01-02T15:04:05"), - Size: int64(len(obj.data)), - ETag: fmt.Sprintf("%x", obj.checksum), - ContentType: obj.content_type, - } -} - -var metaHeaders = map[string]bool{ - "Content-Type": true, - "Content-Encoding": true, - "Content-Disposition": true, - "X-Object-Manifest": true, -} - -var rangeRegexp = regexp.MustCompile("(bytes=)?([0-9]*)-([0-9]*)") - -// GET on an object gets the contents of the object. -func (objr objectResource) get(a *action) interface{} { - var ( - etag []byte - reader io.Reader - start int - end int = -1 - ) - obj := objr.object - if obj == nil { - fatalf(404, "Not Found", "The resource could not be found.") - } - - h := a.w.Header() - // add metadata - obj.getMetadata(a) - - if r := a.req.Header.Get("Range"); r != "" { - m := rangeRegexp.FindStringSubmatch(r) - if m[2] != "" { - start, _ = strconv.Atoi(m[2]) - } - if m[3] != "" { - end, _ = strconv.Atoi(m[3]) - } - } - - max := func(a int, b int) int { - if a > b { - return a - } - return b - } - - if manifest, ok := obj.meta["X-Object-Manifest"]; ok { - var segments []io.Reader - components := strings.SplitN(manifest[0], "/", 2) - segContainer := a.user.Containers[components[0]] - prefix := components[1] - resp := segContainer.list("", "", prefix, "") - sum := md5.New() - cursor := 0 - size := 0 - for _, item := range resp { - if obj, ok := item.(*object); ok { - length := len(obj.data) - size += length - sum.Write([]byte(hex.EncodeToString(obj.checksum))) - if start >= cursor+length { - continue - } - segments = append(segments, bytes.NewReader(obj.data[max(0, start-cursor):])) - cursor += length - } - } - etag = sum.Sum(nil) - if end == -1 { - end = size - } - reader = io.LimitReader(io.MultiReader(segments...), int64(end-start)) - } else { - if end == -1 { - end = len(obj.data) - } - etag = obj.checksum - reader = bytes.NewReader(obj.data[start:end]) - } - - h.Set("Content-Length", fmt.Sprint(end-start)) - h.Set("ETag", hex.EncodeToString(etag)) - h.Set("Last-Modified", obj.mtime.Format(http.TimeFormat)) - - if a.req.Method == "HEAD" { - return nil - } - - // TODO avoid holding the lock when writing data. - _, err := io.Copy(a.w, reader) - if err != nil { - // we can't do much except just log the fact. - log.Printf("error writing data: %v", err) - } - return nil -} - -// PUT on an object creates the object. -func (objr objectResource) put(a *action) interface{} { - var expectHash []byte - if c := a.req.Header.Get("ETag"); c != "" { - var err error - expectHash, err = hex.DecodeString(c) - if err != nil || len(expectHash) != md5.Size { - fatalf(400, "InvalidDigest", "The ETag you specified was invalid") - } - } - sum := md5.New() - // TODO avoid holding lock while reading data. - data, err := ioutil.ReadAll(io.TeeReader(a.req.Body, sum)) - if err != nil { - fatalf(400, "TODO", "read error") - } - gotHash := sum.Sum(nil) - if expectHash != nil && bytes.Compare(gotHash, expectHash) != 0 { - fatalf(422, "Bad ETag", "The ETag you specified did not match what we received") - } - if a.req.ContentLength >= 0 && int64(len(data)) != a.req.ContentLength { - fatalf(400, "IncompleteBody", "You did not provide the number of bytes specified by the Content-Length HTTP header") - } - - // TODO is this correct, or should we erase all previous metadata? - obj := objr.object - if obj == nil { - obj = &object{ - name: objr.name, - metadata: metadata{ - meta: make(http.Header), - }, - } - a.user.Objects++ - } else { - objr.container.bytes -= len(obj.data) - a.user.BytesUsed -= int64(len(obj.data)) - } - - var content_type string - if content_type = a.req.Header.Get("Content-Type"); content_type == "" { - content_type = mime.TypeByExtension(obj.name) - if content_type == "" { - content_type = "application/octet-stream" - } - } - - // PUT request has been successful - save data and metadata - obj.setMetadata(a, "object") - obj.content_type = content_type - obj.data = data - obj.checksum = gotHash - obj.mtime = time.Now().UTC() - objr.container.objects[objr.name] = obj - objr.container.bytes += len(data) - a.user.BytesUsed += int64(len(data)) - - h := a.w.Header() - h.Set("ETag", hex.EncodeToString(obj.checksum)) - - return nil -} - -func (objr objectResource) delete(a *action) interface{} { - if objr.object == nil { - fatalf(404, "NoSuchKey", "The specified key does not exist.") - } - - objr.container.bytes -= len(objr.object.data) - a.user.BytesUsed -= int64(len(objr.object.data)) - delete(objr.container.objects, objr.name) - a.user.Objects-- - return nil -} - -func (objr objectResource) post(a *action) interface{} { - obj := objr.object - obj.setMetadata(a, "object") - return nil -} - -func (objr objectResource) copy(a *action) interface{} { - if objr.object == nil { - fatalf(404, "NoSuchKey", "The specified key does not exist.") - } - - obj := objr.object - destination := a.req.Header.Get("Destination") - if destination == "" { - fatalf(400, "Bad Request", "You must provide a Destination header") - } - - var ( - obj2 *object - objr2 objectResource - ) - - destURL, _ := url.Parse("/v1/AUTH_" + TEST_ACCOUNT + "/" + destination) - r := a.srv.resourceForURL(destURL) - switch t := r.(type) { - case objectResource: - objr2 = t - if objr2.object == nil { - obj2 = &object{ - name: objr2.name, - metadata: metadata{ - meta: make(http.Header), - }, - } - a.user.Objects++ - } else { - obj2 = objr2.object - objr2.container.bytes -= len(obj2.data) - a.user.BytesUsed -= int64(len(obj2.data)) - } - default: - fatalf(400, "Bad Request", "Destination must point to a valid object path") - } - - obj2.content_type = obj.content_type - obj2.data = obj.data - obj2.checksum = obj.checksum - obj2.mtime = time.Now() - objr2.container.objects[objr2.name] = obj2 - objr2.container.bytes += len(obj.data) - a.user.BytesUsed += int64(len(obj.data)) - - for key, values := range obj.metadata.meta { - obj2.metadata.meta[key] = values - } - obj2.setMetadata(a, "object") - - return nil -} - -func (s *SwiftServer) serveHTTP(w http.ResponseWriter, req *http.Request) { - // ignore error from ParseForm as it's usually spurious. - req.ParseForm() - - s.mu.Lock() - defer s.mu.Unlock() - - if DEBUG { - log.Printf("swifttest %q %q", req.Method, req.URL) - } - a := &action{ - srv: s, - w: w, - req: req, - reqId: fmt.Sprintf("%09X", s.reqId), - } - s.reqId++ - - var r resource - defer func() { - switch err := recover().(type) { - case *swiftError: - w.Header().Set("Content-Type", `text/plain; charset=utf-8`) - http.Error(w, err.Message, err.statusCode) - case nil: - default: - panic(err) - } - }() - - var resp interface{} - - if req.URL.String() == "/v1.0" { - username := req.Header.Get("x-auth-user") - key := req.Header.Get("x-auth-key") - if acct, ok := s.Accounts[username]; ok { - if acct.password == key { - r := make([]byte, 16) - _, _ = rand.Read(r) - id := fmt.Sprintf("%X", r) - w.Header().Set("X-Storage-Url", s.URL+"/AUTH_"+username) - w.Header().Set("X-Auth-Token", "AUTH_tk"+string(id)) - w.Header().Set("X-Storage-Token", "AUTH_tk"+string(id)) - s.Sessions[id] = &session{ - username: username, - } - return - } - } - panic(notAuthorized()) - } - - if req.URL.String() == "/info" { - jsonMarshal(w, &swift.SwiftInfo{ - "swift": map[string]interface{}{ - "version": "1.2", - }, - "tempurl": map[string]interface{}{ - "methods": []string{"GET", "HEAD", "PUT"}, - }, - }) - return - } - - r = s.resourceForURL(req.URL) - - key := req.Header.Get("x-auth-token") - signature := req.URL.Query().Get("temp_url_sig") - expires := req.URL.Query().Get("temp_url_expires") - if key == "" && signature != "" && expires != "" { - accountName, _, _, _ := s.parseURL(req.URL) - secretKey := "" - if account, ok := s.Accounts[accountName]; ok { - secretKey = account.meta.Get("X-Account-Meta-Temp-Url-Key") - } - - get_hmac := func(method string) string { - mac := hmac.New(sha1.New, []byte(secretKey)) - body := fmt.Sprintf("%s\n%s\n%s", method, expires, req.URL.Path) - mac.Write([]byte(body)) - return hex.EncodeToString(mac.Sum(nil)) - } - - if req.Method == "HEAD" { - if signature != get_hmac("GET") && signature != get_hmac("POST") && signature != get_hmac("PUT") { - panic(notAuthorized()) - } - } else if signature != get_hmac(req.Method) { - panic(notAuthorized()) - } - } else { - session, ok := s.Sessions[key[7:]] - if !ok { - panic(notAuthorized()) - } - - a.user = s.Accounts[session.username] - } - - switch req.Method { - case "PUT": - resp = r.put(a) - case "GET", "HEAD": - resp = r.get(a) - case "DELETE": - resp = r.delete(a) - case "POST": - resp = r.post(a) - case "COPY": - resp = r.copy(a) - default: - fatalf(400, "MethodNotAllowed", "unknown http request method %q", req.Method) - } - - content_type := req.Header.Get("Content-Type") - if resp != nil && req.Method != "HEAD" { - if strings.HasPrefix(content_type, "application/json") || - req.URL.Query().Get("format") == "json" { - jsonMarshal(w, resp) - } else { - switch r := resp.(type) { - case string: - w.Write([]byte(r)) - default: - w.Write(resp.([]byte)) - } - } - } -} - -func jsonMarshal(w io.Writer, x interface{}) { - if err := json.NewEncoder(w).Encode(x); err != nil { - panic(fmt.Errorf("error marshalling %#v: %v", x, err)) - } -} - -var pathRegexp = regexp.MustCompile("/v1/AUTH_([a-zA-Z0-9]+)(/([^/]+)(/(.*))?)?") - -func (srv *SwiftServer) parseURL(u *url.URL) (account string, container string, object string, err error) { - m := pathRegexp.FindStringSubmatch(u.Path) - if m == nil { - return "", "", "", fmt.Errorf("Couldn't parse the specified URI") - } - account = m[1] - container = m[3] - object = m[5] - return -} - -// resourceForURL returns a resource object for the given URL. -func (srv *SwiftServer) resourceForURL(u *url.URL) (r resource) { - accountName, containerName, objectName, err := srv.parseURL(u) - - if err != nil { - fatalf(404, "InvalidURI", err.Error()) - } - - account, ok := srv.Accounts[accountName] - if !ok { - fatalf(404, "NoSuchAccount", "The specified account does not exist") - } - - if containerName == "" { - return rootResource{} - } - b := containerResource{ - name: containerName, - container: account.Containers[containerName], - } - - if objectName == "" { - return b - } - - if b.container == nil { - fatalf(404, "NoSuchContainer", "The specified container does not exist") - } - - objr := objectResource{ - name: objectName, - version: u.Query().Get("versionId"), - container: b.container, - } - - if obj := objr.container.objects[objr.name]; obj != nil { - objr.object = obj - } - return objr -} - -// nullResource has error stubs for all resource methods. -type nullResource struct{} - -func notAllowed() interface{} { - fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource") - return nil -} - -func notAuthorized() interface{} { - fatalf(401, "Unauthorized", "This server could not verify that you are authorized to access the document you requested.") - return nil -} - -func (nullResource) put(a *action) interface{} { return notAllowed() } -func (nullResource) get(a *action) interface{} { return notAllowed() } -func (nullResource) post(a *action) interface{} { return notAllowed() } -func (nullResource) delete(a *action) interface{} { return notAllowed() } -func (nullResource) copy(a *action) interface{} { return notAllowed() } - -type rootResource struct{} - -func (rootResource) put(a *action) interface{} { return notAllowed() } -func (rootResource) get(a *action) interface{} { - marker := a.req.Form.Get("marker") - prefix := a.req.Form.Get("prefix") - format := a.req.URL.Query().Get("format") - - h := a.w.Header() - - h.Set("X-Account-Bytes-Used", strconv.Itoa(int(a.user.BytesUsed))) - h.Set("X-Account-Container-Count", strconv.Itoa(int(a.user.Account.Containers))) - h.Set("X-Account-Object-Count", strconv.Itoa(int(a.user.Objects))) - - // add metadata - a.user.metadata.getMetadata(a) - - if a.req.Method == "HEAD" { - return nil - } - - var tmp orderedContainers - // first get all matching objects and arrange them in alphabetical order. - for _, container := range a.user.Containers { - if strings.HasPrefix(container.name, prefix) { - tmp = append(tmp, container) - } - } - sort.Sort(tmp) - - resp := make([]Folder, 0) - for _, container := range tmp { - if container.name <= marker { - continue - } - if format == "json" { - resp = append(resp, Folder{ - Count: len(container.objects), - Bytes: container.bytes, - Name: container.name, - }) - } else { - a.w.Write([]byte(container.name + "\n")) - } - } - - if format == "json" { - return resp - } else { - return nil - } -} - -func (r rootResource) post(a *action) interface{} { - a.user.metadata.setMetadata(a, "account") - return nil -} - -func (rootResource) delete(a *action) interface{} { - if a.req.URL.Query().Get("bulk-delete") == "1" { - fatalf(403, "Operation forbidden", "Bulk delete is not supported") - } - - return notAllowed() -} - -func (rootResource) copy(a *action) interface{} { return notAllowed() } - -func NewSwiftServer(address string) (*SwiftServer, error) { - var ( - l net.Listener - err error - ) - if strings.Index(address, ":") == -1 { - for port := 1024; port < 65535; port++ { - addr := fmt.Sprintf("%s:%d", address, port) - if l, err = net.Listen("tcp", addr); err == nil { - address = addr - break - } - } - } else { - l, err = net.Listen("tcp", address) - } - if err != nil { - return nil, fmt.Errorf("cannot listen on %s: %v", address, err) - } - - server := &SwiftServer{ - Listener: l, - AuthURL: "http://" + l.Addr().String() + "/v1.0", - URL: "http://" + l.Addr().String() + "/v1", - Accounts: make(map[string]*account), - Sessions: make(map[string]*session), - } - - server.Accounts[TEST_ACCOUNT] = &account{ - password: TEST_ACCOUNT, - metadata: metadata{ - meta: make(http.Header), - }, - Containers: make(map[string]*container), - } - - go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - server.serveHTTP(w, req) - })) - - return server, nil -} - -func (srv *SwiftServer) Close() { - srv.Listener.Close() -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/timeout_reader.go b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/timeout_reader.go deleted file mode 100644 index 3839e9ea..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/timeout_reader.go +++ /dev/null @@ -1,57 +0,0 @@ -package swift - -import ( - "io" - "time" -) - -// An io.ReadCloser which obeys an idle timeout -type timeoutReader struct { - reader io.ReadCloser - timeout time.Duration - cancel func() -} - -// Returns a wrapper around the reader which obeys an idle -// timeout. The cancel function is called if the timeout happens -func newTimeoutReader(reader io.ReadCloser, timeout time.Duration, cancel func()) *timeoutReader { - return &timeoutReader{ - reader: reader, - timeout: timeout, - cancel: cancel, - } -} - -// Read reads up to len(p) bytes into p -// -// Waits at most for timeout for the read to complete otherwise returns a timeout -func (t *timeoutReader) Read(p []byte) (int, error) { - // FIXME limit the amount of data read in one chunk so as to not exceed the timeout? - // Do the read in the background - type result struct { - n int - err error - } - done := make(chan result, 1) - go func() { - n, err := t.reader.Read(p) - done <- result{n, err} - }() - // Wait for the read or the timeout - select { - case r := <-done: - return r.n, r.err - case <-time.After(t.timeout): - t.cancel() - return 0, TimeoutError - } - panic("unreachable") // for Go 1.0 -} - -// Close the channel -func (t *timeoutReader) Close() error { - return t.reader.Close() -} - -// Check it satisfies the interface -var _ io.ReadCloser = &timeoutReader{} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/watchdog_reader.go b/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/watchdog_reader.go deleted file mode 100644 index b12b1bbe..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/ncw/swift/watchdog_reader.go +++ /dev/null @@ -1,34 +0,0 @@ -package swift - -import ( - "io" - "time" -) - -// An io.Reader which resets a watchdog timer whenever data is read -type watchdogReader struct { - timeout time.Duration - reader io.Reader - timer *time.Timer -} - -// Returns a new reader which will kick the watchdog timer whenever data is read -func newWatchdogReader(reader io.Reader, timeout time.Duration, timer *time.Timer) *watchdogReader { - return &watchdogReader{ - timeout: timeout, - reader: reader, - timer: timer, - } -} - -// Read reads up to len(p) bytes into p -func (t *watchdogReader) Read(p []byte) (n int, err error) { - // FIXME limit the amount of data read in one chunk so as to not exceed the timeout? - resetTimer(t.timer, t.timeout) - n, err = t.reader.Read(p) - resetTimer(t.timer, t.timeout) - return -} - -// Check it satisfies the interface -var _ io.Reader = &watchdogReader{} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/.gitignore deleted file mode 100644 index 36d1a84d..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -cobra.test diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/.travis.yml deleted file mode 100644 index dc43afd6..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: - - 1.3 - - 1.4.2 - - tip -script: - - go test ./... - - go build diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/LICENSE.txt deleted file mode 100644 index 298f0e26..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/LICENSE.txt +++ /dev/null @@ -1,174 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/README.md deleted file mode 100644 index b1fb0889..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/README.md +++ /dev/null @@ -1,485 +0,0 @@ -# Cobra - -A Commander for modern go CLI interactions - -[![Build Status](https://travis-ci.org/spf13/cobra.svg)](https://travis-ci.org/spf13/cobra) - -## Overview - -Cobra is a commander providing a simple interface to create powerful modern CLI -interfaces similar to git & go tools. In addition to providing an interface, Cobra -simultaneously provides a controller to organize your application code. - -Inspired by go, go-Commander, gh and subcommand, Cobra improves on these by -providing **fully posix compliant flags** (including short & long versions), -**nesting commands**, and the ability to **define your own help and usage** for any or -all commands. - -Cobra has an exceptionally clean interface and simple design without needless -constructors or initialization methods. - -Applications built with Cobra commands are designed to be as user friendly as -possible. Flags can be placed before or after the command (as long as a -confusing space isn’t provided). Both short and long flags can be used. A -command need not even be fully typed. The shortest unambiguous string will -suffice. Help is automatically generated and available for the application or -for a specific command using either the help command or the --help flag. - -## Concepts - -Cobra is built on a structure of commands & flags. - -**Commands** represent actions and **Flags** are modifiers for those actions. - -In the following example 'server' is a command and 'port' is a flag. - - hugo server --port=1313 - -### Commands - -Command is the central point of the application. Each interaction that -the application supports will be contained in a Command. A command can -have children commands and optionally run an action. - -In the example above 'server' is the command - -A Command has the following structure: - - type Command struct { - Use string // The one-line usage message. - Short string // The short description shown in the 'help' output. - Long string // The long message shown in the 'help ' output. - Run func(cmd *Command, args []string) // Run runs the command. - } - -### Flags - -A Flag is a way to modify the behavior of an command. Cobra supports -fully posix compliant flags as well as the go flag package. -A Cobra command can define flags that persist through to children commands -and flags that are only available to that command. - -In the example above 'port' is the flag. - -Flag functionality is provided by the [pflag -library](https://github.com/ogier/pflag), a fork of the flag standard library -which maintains the same interface while adding posix compliance. - -## Usage - -Cobra works by creating a set of commands and then organizing them into a tree. -The tree defines the structure of the application. - -Once each command is defined with it's corresponding flags, then the -tree is assigned to the commander which is finally executed. - -### Installing -Using Cobra is easy. First use go get to install the latest version -of the library. - - $ go get github.com/spf13/cobra - -Next include cobra in your application. - - import "github.com/spf13/cobra" - -### Create the root command - -The root command represents your binary itself. - -Cobra doesn't require any special constructors. Simply create your commands. - - var HugoCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at http://hugo.spf13.com`, - Run: func(cmd *cobra.Command, args []string) { - // Do Stuff Here - }, - } - -### Create additional commands - -Additional commands can be defined. - - var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print the version number of Hugo", - Long: `All software has versions. This is Hugo's`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") - }, - } - -### Attach command to its parent -In this example we are attaching it to the root, but commands can be attached at any level. - - HugoCmd.AddCommand(versionCmd) - -### Assign flags to a command - -Since the flags are defined and used in different locations, we need to -define a variable outside with the correct scope to assign the flag to -work with. - - var Verbose bool - var Source string - -There are two different approaches to assign a flag. - -#### Persistent Flags - -A flag can be 'persistent' meaning that this flag will be available to the -command it's assigned to as well as every command under that command. For -global flags assign a flag as a persistent flag on the root. - - HugoCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") - -#### Local Flags - -A flag can also be assigned locally which will only apply to that specific command. - - HugoCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") - -### Remove a command from its parent - -Removing a command is not a common action in simple programs but it allows 3rd parties to customize an existing command tree. - -In this example, we remove the existing `VersionCmd` command of an existing root command, and we replace it by our own version. - - mainlib.RootCmd.RemoveCommand(mainlib.VersionCmd) - mainlib.RootCmd.AddCommand(versionCmd) - -### Once all commands and flags are defined, Execute the commands - -Execute should be run on the root for clarity, though it can be called on any command. - - HugoCmd.Execute() - -## Example - -In the example below we have defined three commands. Two are at the top level -and one (cmdTimes) is a child of one of the top commands. In this case the root -is not executable meaning that a subcommand is required. This is accomplished -by not providing a 'Run' for the 'rootCmd'. - -We have only defined one flag for a single command. - -More documentation about flags is available at https://github.com/spf13/pflag - - import( - "github.com/spf13/cobra" - "fmt" - "strings" - ) - - func main() { - - var echoTimes int - - var cmdPrint = &cobra.Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `print is for printing anything back to the screen. - For many years people have printed back to the screen. - `, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdEcho = &cobra.Command{ - Use: "echo [string to echo]", - Short: "Echo anything to the screen", - Long: `echo is for echoing anything back. - Echo works a lot like print, except it has a child command. - `, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdTimes = &cobra.Command{ - Use: "times [# times] [string to echo]", - Short: "Echo anything to the screen more times", - Long: `echo things multiple times back to the user by providing - a count and a string.`, - Run: func(cmd *cobra.Command, args []string) { - for i:=0; i < echoTimes; i++ { - fmt.Println("Echo: " + strings.Join(args, " ")) - } - }, - } - - cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") - - var rootCmd = &cobra.Command{Use: "app"} - rootCmd.AddCommand(cmdPrint, cmdEcho) - cmdEcho.AddCommand(cmdTimes) - rootCmd.Execute() - } - -For a more complete example of a larger application, please checkout [Hugo](http://hugo.spf13.com) - -## The Help Command - -Cobra automatically adds a help command to your application when you have subcommands. -This will be called when a user runs 'app help'. Additionally help will also -support all other commands as input. Say for instance you have a command called -'create' without any additional configuration cobra will work when 'app help -create' is called. Every command will automatically have the '--help' flag added. - -### Example - -The following output is automatically generated by cobra. Nothing beyond the -command and flag definitions are needed. - - > hugo help - - A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - - Complete documentation is available at http://hugo.spf13.com - - Usage: - hugo [flags] - hugo [command] - - Available Commands: - server :: Hugo runs it's own a webserver to render the files - version :: Print the version number of Hugo - check :: Check content in the source directory - benchmark :: Benchmark hugo by building a site a number of times - help [command] :: Help about any command - - Available Flags: - -b, --base-url="": hostname (and path) to the root eg. http://spf13.com/ - -D, --build-drafts=false: include content marked as draft - --config="": config file (default is path/config.yaml|json|toml) - -d, --destination="": filesystem path to write files to - -s, --source="": filesystem path to read files relative from - --stepAnalysis=false: display memory and timing of different steps of the program - --uglyurls=false: if true, use /filename.html instead of /filename/ - -v, --verbose=false: verbose output - -w, --watch=false: watch filesystem for changes and recreate as needed - - Use "hugo help [command]" for more information about that command. - - - -Help is just a command like any other. There is no special logic or behavior -around it. In fact you can provide your own if you want. - -### Defining your own help - -You can provide your own Help command or you own template for the default command to use. - -The default help command is - - func (c *Command) initHelp() { - if c.helpCommand == nil { - c.helpCommand = &Command{ - Use: "help [command]", - Short: "Help about any command", - Long: `Help provides help for any command in the application. - Simply type ` + c.Name() + ` help [path to command] for full details.`, - Run: c.HelpFunc(), - } - } - c.AddCommand(c.helpCommand) - } - -You can provide your own command, function or template through the following methods. - - command.SetHelpCommand(cmd *Command) - - command.SetHelpFunc(f func(*Command, []string)) - - command.SetHelpTemplate(s string) - -The latter two will also apply to any children commands. - -## Usage - -When the user provides an invalid flag or invalid command Cobra responds by -showing the user the 'usage' - -### Example -You may recognize this from the help above. That's because the default help -embeds the usage as part of it's output. - - Usage: - hugo [flags] - hugo [command] - - Available Commands: - server Hugo runs it's own a webserver to render the files - version Print the version number of Hugo - check Check content in the source directory - benchmark Benchmark hugo by building a site a number of times - help [command] Help about any command - - Available Flags: - -b, --base-url="": hostname (and path) to the root eg. http://spf13.com/ - -D, --build-drafts=false: include content marked as draft - --config="": config file (default is path/config.yaml|json|toml) - -d, --destination="": filesystem path to write files to - -s, --source="": filesystem path to read files relative from - --stepAnalysis=false: display memory and timing of different steps of the program - --uglyurls=false: if true, use /filename.html instead of /filename/ - -v, --verbose=false: verbose output - -w, --watch=false: watch filesystem for changes and recreate as needed - -### Defining your own usage -You can provide your own usage function or template for cobra to use. - -The default usage function is - - return func(c *Command) error { - err := tmpl(c.Out(), c.UsageTemplate(), c) - return err - } - -Like help the function and template are over ridable through public methods. - - command.SetUsageFunc(f func(*Command) error) - - command.SetUsageTemplate(s string) - -## PreRun or PostRun Hooks - -It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistendPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherrited by children if they do not declare their own. These function are run in the following order: - -- `PersistentPreRun` -- `PreRun` -- `Run` -- `PostRun` -- `PersistenPostRun` - -And example of two commands which use all of these features is below. When the subcommand in executed it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun` - -```go -package main - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func main() { - - var rootCmd = &cobra.Command{ - Use: "root [sub]", - Short: "My root command", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) - }, - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) - }, - } - - var subCmd = &cobra.Command{ - Use: "sub [no options!]", - Short: "My sub command", - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) - }, - } - - rootCmd.AddCommand(subCmd) - - rootCmd.SetArgs([]string{""}) - _ = rootCmd.Execute() - fmt.Print("\n") - rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) - _ = rootCmd.Execute() -} -``` - -## Generating markdown formatted documentation for your command - -Cobra can generate a markdown formatted document based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Markdown Docs](md_docs.md) - -## Generating bash completions for your command - -Cobra can generate a bash completions file. If you add more information to your command these completions can be amazingly powerful and flexible. Read more about [Bash Completions](bash_completions.md) - -## Debugging - -Cobra provides a ‘DebugFlags’ method on a command which when called will print -out everything Cobra knows about the flags for each command - -### Example - - command.DebugFlags() - -## Release Notes -* **0.9.0** June 17, 2014 - * flags can appears anywhere in the args (provided they are unambiguous) - * --help prints usage screen for app or command - * Prefix matching for commands - * Cleaner looking help and usage output - * Extensive test suite -* **0.8.0** Nov 5, 2013 - * Reworked interface to remove commander completely - * Command now primary structure - * No initialization needed - * Usage & Help templates & functions definable at any level - * Updated Readme -* **0.7.0** Sept 24, 2013 - * Needs more eyes - * Test suite - * Support for automatic error messages - * Support for help command - * Support for printing to any io.Writer instead of os.Stderr - * Support for persistent flags which cascade down tree - * Ready for integration into Hugo -* **0.1.0** Sept 3, 2013 - * Implement first draft - -## ToDo -* Launch proper documentation site - -## Contributing - -1. Fork it -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Commit your changes (`git commit -am 'Add some feature'`) -4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## Contributors - -Names in no particular order: - -* [spf13](https://github.com/spf13) - -## License - -Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) - - -[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/spf13/cobra/trend.png)](https://bitdeli.com/free "Bitdeli Badge") - diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/bash_completions.go deleted file mode 100644 index 735d2f54..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/bash_completions.go +++ /dev/null @@ -1,357 +0,0 @@ -package cobra - -import ( - "bytes" - "fmt" - "os" - "sort" - "strings" - - "github.com/spf13/pflag" -) - -const ( - BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extentions" - BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag" -) - -func preamble(out *bytes.Buffer) { - fmt.Fprintf(out, `#!/bin/bash - - -__debug() -{ - if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then - echo "$*" >> "${BASH_COMP_DEBUG_FILE}" - fi -} - -__index_of_word() -{ - local w word=$1 - shift - index=0 - for w in "$@"; do - [[ $w = "$word" ]] && return - index=$((index+1)) - done - index=-1 -} - -__contains_word() -{ - local w word=$1; shift - for w in "$@"; do - [[ $w = "$word" ]] && return - done - return 1 -} - -__handle_reply() -{ - __debug "${FUNCNAME}" - case $cur in - -*) - compopt -o nospace - local allflags - if [ ${#must_have_one_flag[@]} -ne 0 ]; then - allflags=("${must_have_one_flag[@]}") - else - allflags=("${flags[*]} ${two_word_flags[*]}") - fi - COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") ) - [[ $COMPREPLY == *= ]] || compopt +o nospace - return 0; - ;; - esac - - # check if we are handling a flag with special work handling - local index - __index_of_word "${prev}" "${flags_with_completion[@]}" - if [[ ${index} -ge 0 ]]; then - ${flags_completion[${index}]} - return - fi - - # we are parsing a flag and don't have a special handler, no completion - if [[ ${cur} != "${words[cword]}" ]]; then - return - fi - - local completions - if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then - completions=("${must_have_one_flag[@]}") - elif [[ ${#must_have_one_noun[@]} -ne 0 ]]; then - completions=("${must_have_one_noun[@]}") - else - completions=("${commands[@]}") - fi - COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") ) - - if [[ ${#COMPREPLY[@]} -eq 0 ]]; then - declare -F __custom_func >/dev/null && __custom_func - fi -} - -# The arguments should be in the form "ext1|ext2|extn" -__handle_filename_extension_flag() -{ - local ext="$1" - _filedir "@(${ext})" -} - -__handle_flag() -{ - __debug "${FUNCNAME}: c is $c words[c] is ${words[c]}" - - # if a command required a flag, and we found it, unset must_have_one_flag() - local flagname=${words[c]} - # if the word contained an = - if [[ ${words[c]} == *"="* ]]; then - flagname=${flagname%%=*} # strip everything after the = - flagname="${flagname}=" # but put the = back - fi - __debug "${FUNCNAME}: looking for ${flagname}" - if __contains_word "${flagname}" "${must_have_one_flag[@]}"; then - must_have_one_flag=() - fi - - # skip the argument to a two word flag - if __contains_word "${words[c]}" "${two_word_flags[@]}"; then - c=$((c+1)) - # if we are looking for a flags value, don't show commands - if [[ $c -eq $cword ]]; then - commands=() - fi - fi - - # skip the flag itself - c=$((c+1)) - -} - -__handle_noun() -{ - __debug "${FUNCNAME}: c is $c words[c] is ${words[c]}" - - if __contains_word "${words[c]}" "${must_have_one_noun[@]}"; then - must_have_one_noun=() - fi - - nouns+=("${words[c]}") - c=$((c+1)) -} - -__handle_command() -{ - __debug "${FUNCNAME}: c is $c words[c] is ${words[c]}" - - local next_command - if [[ -n ${last_command} ]]; then - next_command="_${last_command}_${words[c]}" - else - next_command="_${words[c]}" - fi - c=$((c+1)) - __debug "${FUNCNAME}: looking for ${next_command}" - declare -F $next_command >/dev/null && $next_command -} - -__handle_word() -{ - if [[ $c -ge $cword ]]; then - __handle_reply - return - fi - __debug "${FUNCNAME}: c is $c words[c] is ${words[c]}" - if [[ "${words[c]}" == -* ]]; then - __handle_flag - elif __contains_word "${words[c]}" "${commands[@]}"; then - __handle_command - else - __handle_noun - fi - __handle_word -} - -`) -} - -func postscript(out *bytes.Buffer, name string) { - fmt.Fprintf(out, "__start_%s()\n", name) - fmt.Fprintf(out, `{ - local cur prev words cword - _init_completion -s || return - - local c=0 - local flags=() - local two_word_flags=() - local flags_with_completion=() - local flags_completion=() - local commands=("%s") - local must_have_one_flag=() - local must_have_one_noun=() - local last_command - local nouns=() - - __handle_word -} - -`, name) - fmt.Fprintf(out, "complete -F __start_%s %s\n", name, name) - fmt.Fprintf(out, "# ex: ts=4 sw=4 et filetype=sh\n") -} - -func writeCommands(cmd *Command, out *bytes.Buffer) { - fmt.Fprintf(out, " commands=()\n") - for _, c := range cmd.Commands() { - if len(c.Deprecated) > 0 { - continue - } - fmt.Fprintf(out, " commands+=(%q)\n", c.Name()) - } - fmt.Fprintf(out, "\n") -} - -func writeFlagHandler(name string, annotations map[string][]string, out *bytes.Buffer) { - for key, value := range annotations { - switch key { - case BashCompFilenameExt: - fmt.Fprintf(out, " flags_with_completion+=(%q)\n", name) - - ext := strings.Join(value, "|") - ext = "__handle_filename_extension_flag " + ext - fmt.Fprintf(out, " flags_completion+=(%q)\n", ext) - } - } -} - -func writeShortFlag(flag *pflag.Flag, out *bytes.Buffer) { - b := (flag.Value.Type() == "bool") - name := flag.Shorthand - format := " " - if !b { - format += "two_word_" - } - format += "flags+=(\"-%s\")\n" - fmt.Fprintf(out, format, name) - writeFlagHandler("-"+name, flag.Annotations, out) -} - -func writeFlag(flag *pflag.Flag, out *bytes.Buffer) { - b := (flag.Value.Type() == "bool") - name := flag.Name - format := " flags+=(\"--%s" - if !b { - format += "=" - } - format += "\")\n" - fmt.Fprintf(out, format, name) - writeFlagHandler("--"+name, flag.Annotations, out) -} - -func writeFlags(cmd *Command, out *bytes.Buffer) { - fmt.Fprintf(out, ` flags=() - two_word_flags=() - flags_with_completion=() - flags_completion=() - -`) - cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - writeFlag(flag, out) - if len(flag.Shorthand) > 0 { - writeShortFlag(flag, out) - } - }) - - fmt.Fprintf(out, "\n") -} - -func writeRequiredFlag(cmd *Command, out *bytes.Buffer) { - fmt.Fprintf(out, " must_have_one_flag=()\n") - flags := cmd.NonInheritedFlags() - flags.VisitAll(func(flag *pflag.Flag) { - for key, _ := range flag.Annotations { - switch key { - case BashCompOneRequiredFlag: - format := " must_have_one_flag+=(\"--%s" - b := (flag.Value.Type() == "bool") - if !b { - format += "=" - } - format += "\")\n" - fmt.Fprintf(out, format, flag.Name) - - if len(flag.Shorthand) > 0 { - fmt.Fprintf(out, " must_have_one_flag+=(\"-%s\")\n", flag.Shorthand) - } - } - } - }) -} - -func writeRequiredNoun(cmd *Command, out *bytes.Buffer) { - fmt.Fprintf(out, " must_have_one_noun=()\n") - sort.Sort(sort.StringSlice(cmd.ValidArgs)) - for _, value := range cmd.ValidArgs { - fmt.Fprintf(out, " must_have_one_noun+=(%q)\n", value) - } -} - -func gen(cmd *Command, out *bytes.Buffer) { - for _, c := range cmd.Commands() { - if len(c.Deprecated) > 0 { - continue - } - gen(c, out) - } - commandName := cmd.CommandPath() - commandName = strings.Replace(commandName, " ", "_", -1) - fmt.Fprintf(out, "_%s()\n{\n", commandName) - fmt.Fprintf(out, " last_command=%q\n", commandName) - writeCommands(cmd, out) - writeFlags(cmd, out) - writeRequiredFlag(cmd, out) - writeRequiredNoun(cmd, out) - fmt.Fprintf(out, "}\n\n") -} - -func (cmd *Command) GenBashCompletion(out *bytes.Buffer) { - preamble(out) - if len(cmd.BashCompletionFunction) > 0 { - fmt.Fprintf(out, "%s\n", cmd.BashCompletionFunction) - } - gen(cmd, out) - postscript(out, cmd.Name()) -} - -func (cmd *Command) GenBashCompletionFile(filename string) error { - out := new(bytes.Buffer) - - cmd.GenBashCompletion(out) - - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - _, err = outFile.Write(out.Bytes()) - if err != nil { - return err - } - return nil -} - -func (cmd *Command) MarkFlagRequired(name string) { - flag := cmd.Flags().Lookup(name) - if flag == nil { - return - } - if flag.Annotations == nil { - flag.Annotations = make(map[string][]string) - } - annotation := make([]string, 1) - annotation[0] = "true" - flag.Annotations[BashCompOneRequiredFlag] = annotation -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/bash_completions.md deleted file mode 100644 index 204704ef..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/bash_completions.md +++ /dev/null @@ -1,149 +0,0 @@ -# Generating Bash Completions For Your Own cobra.Command - -Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows: - -```go -package main - -import ( - "io/ioutil" - "os" - - "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd" -) - -func main() { - kubectl := cmd.NewFactory(nil).NewKubectlCommand(os.Stdin, ioutil.Discard, ioutil.Discard) - kubectl.GenBashCompletionFile("out.sh") -} -``` - -That will get you completions of subcommands and flags. If you make additional annotations to your code, you can get even more intelligent and flexible behavior. - -## Creating your own custom functions - -Some more actual code that works in kubernetes: - -```bash -const ( - bash_completion_func = `__kubectl_parse_get() -{ - local kubectl_output out - if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then - out=($(echo "${kubectl_output}" | awk '{print $1}')) - COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) - fi -} - -__kubectl_get_resource() -{ - if [[ ${#nouns[@]} -eq 0 ]]; then - return 1 - fi - __kubectl_parse_get ${nouns[${#nouns[@]} -1]} - if [[ $? -eq 0 ]]; then - return 0 - fi -} - -__custom_func() { - case ${last_command} in - kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop) - __kubectl_get_resource - return - ;; - *) - ;; - esac -} -`) -``` - -And then I set that in my command definition: - -```go -cmds := &cobra.Command{ - Use: "kubectl", - Short: "kubectl controls the Kubernetes cluster manager", - Long: `kubectl controls the Kubernetes cluster manager. - -Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, - Run: runHelp, - BashCompletionFunction: bash_completion_func, -} -``` - -The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__custom_func()` to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! - -## Have the completions code complete your 'nouns' - -In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like: - -```go -validArgs []string = { "pods", "nodes", "services", "replicationControllers" } - -cmd := &cobra.Command{ - Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", - Short: "Display one or many resources", - Long: get_long, - Example: get_example, - Run: func(cmd *cobra.Command, args []string) { - err := RunGet(f, out, cmd, args) - util.CheckErr(err) - }, - ValidArgs: validArgs, -} -``` - -Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like - -```bash -# kubectl get [tab][tab] -nodes pods replicationControllers services -``` - -## Mark flags as required - -Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy. - -```go -cmd.MarkFlagRequired("pod") -cmd.MarkFlagRequired("container") -``` - -and you'll get something like - -```bash -# kubectl exec [tab][tab][tab] --c --container= -p --pod= -``` - -# Specify valid filename extensions for flags that take a filename - -In this example we use --filename= and expect to get a json or yaml file as the argument. To make this easier we annotate the --filename flag with valid filename extensions. - -```go - annotations := []string{"json", "yaml", "yml"} - annotation := make(map[string][]string) - annotation[cobra.BashCompFilenameExt] = annotations - - flag := &pflag.Flag{ - Name: "filename", - Shorthand: "f", - Usage: usage, - Value: value, - DefValue: value.String(), - Annotations: annotation, - } - cmd.Flags().AddFlag(flag) -``` - -Now when you run a command with this filename flag you'll get something like - -```bash -# kubectl create -f -test/ example/ rpmbuild/ -hello.yml test.json -``` - -So while there are many other files in the CWD it only shows me subdirs and those with valid extensions. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/cobra.go deleted file mode 100644 index 78b92b0a..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/cobra.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright © 2013 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Commands similar to git, go tools and other modern CLI tools -// inspired by go, go-Commander, gh and subcommand - -package cobra - -import ( - "fmt" - "io" - "reflect" - "strconv" - "strings" - "text/template" -) - -var initializers []func() - -// automatic prefix matching can be a dangerous thing to automatically enable in CLI tools. -// Set this to true to enable it -var EnablePrefixMatching bool = false - -// enables an information splash screen on Windows if the CLI is started from explorer.exe. -var EnableWindowsMouseTrap bool = true - -var MousetrapHelpText string = `This is a command line tool - -You need to open cmd.exe and run it from there. -` - -//OnInitialize takes a series of func() arguments and appends them to a slice of func(). -func OnInitialize(y ...func()) { - for _, x := range y { - initializers = append(initializers, x) - } -} - -//Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans, -//Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as -//ints and then compared. -func Gt(a interface{}, b interface{}) bool { - var left, right int64 - av := reflect.ValueOf(a) - - switch av.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - left = int64(av.Len()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - left = av.Int() - case reflect.String: - left, _ = strconv.ParseInt(av.String(), 10, 64) - } - - bv := reflect.ValueOf(b) - - switch bv.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - right = int64(bv.Len()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - right = bv.Int() - case reflect.String: - right, _ = strconv.ParseInt(bv.String(), 10, 64) - } - - return left > right -} - -//Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic. -func Eq(a interface{}, b interface{}) bool { - av := reflect.ValueOf(a) - bv := reflect.ValueOf(b) - - switch av.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - panic("Eq called on unsupported type") - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return av.Int() == bv.Int() - case reflect.String: - return av.String() == bv.String() - } - return false -} - -//rpad adds padding to the right of a string -func rpad(s string, padding int) string { - template := fmt.Sprintf("%%-%ds", padding) - return fmt.Sprintf(template, s) -} - -// tmpl executes the given template text on data, writing the result to w. -func tmpl(w io.Writer, text string, data interface{}) error { - t := template.New("top") - t.Funcs(template.FuncMap{ - "trim": strings.TrimSpace, - "rpad": rpad, - "gt": Gt, - "eq": Eq, - }) - template.Must(t.Parse(text)) - return t.Execute(w, data) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/command.go deleted file mode 100644 index 3831c337..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/command.go +++ /dev/null @@ -1,1031 +0,0 @@ -// Copyright © 2013 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. -//In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" - "runtime" - "strings" - "time" - - "github.com/inconshreveable/mousetrap" - flag "github.com/spf13/pflag" -) - -// Command is just that, a command for your application. -// eg. 'go run' ... 'run' is the command. Cobra requires -// you to define the usage and description as part of your command -// definition to ensure usability. -type Command struct { - // Name is the command name, usually the executable's name. - name string - // The one-line usage message. - Use string - // An array of aliases that can be used instead of the first word in Use. - Aliases []string - // The short description shown in the 'help' output. - Short string - // The long message shown in the 'help ' output. - Long string - // Examples of how to use the command - Example string - // List of all valid non-flag arguments, used for bash completions *TODO* actually validate these - ValidArgs []string - // Custom functions used by the bash autocompletion generator - BashCompletionFunction string - // Is this command deprecated and should print this string when used? - Deprecated string - // Full set of flags - flags *flag.FlagSet - // Set of flags childrens of this command will inherit - pflags *flag.FlagSet - // Flags that are declared specifically by this command (not inherited). - lflags *flag.FlagSet - // The *Run functions are executed in the following order: - // * PersistentPreRun() - // * PreRun() - // * Run() - // * PostRun() - // * PersistentPostRun() - // All functions get the same args, the arguments after the command name - // PersistentPreRun: children of this command will inherit and execute - PersistentPreRun func(cmd *Command, args []string) - // PreRun: children of this command will not inherit. - PreRun func(cmd *Command, args []string) - // Run: Typically the actual work function. Most commands will only implement this - Run func(cmd *Command, args []string) - // PostRun: run after the Run command. - PostRun func(cmd *Command, args []string) - // PersistentPostRun: children of this command will inherit and execute after PostRun - PersistentPostRun func(cmd *Command, args []string) - // Commands is the list of commands supported by this program. - commands []*Command - // Parent Command for this command - parent *Command - // max lengths of commands' string lengths for use in padding - commandsMaxUseLen int - commandsMaxCommandPathLen int - commandsMaxNameLen int - - flagErrorBuf *bytes.Buffer - cmdErrorBuf *bytes.Buffer - - args []string // actual args parsed from flags - output *io.Writer // nil means stderr; use Out() method instead - usageFunc func(*Command) error // Usage can be defined by application - usageTemplate string // Can be defined by Application - helpTemplate string // Can be defined by Application - helpFunc func(*Command, []string) // Help can be defined by application - helpCommand *Command // The help command - helpFlagVal bool - // The global normalization function that we can use on every pFlag set and children commands - globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName -} - -// os.Args[1:] by default, if desired, can be overridden -// particularly useful when testing. -func (c *Command) SetArgs(a []string) { - c.args = a -} - -func (c *Command) getOut(def io.Writer) io.Writer { - if c.output != nil { - return *c.output - } - - if c.HasParent() { - return c.parent.Out() - } else { - return def - } -} - -func (c *Command) Out() io.Writer { - return c.getOut(os.Stderr) -} - -func (c *Command) getOutOrStdout() io.Writer { - return c.getOut(os.Stdout) -} - -// SetOutput sets the destination for usage and error messages. -// If output is nil, os.Stderr is used. -func (c *Command) SetOutput(output io.Writer) { - c.output = &output -} - -// Usage can be defined by application -func (c *Command) SetUsageFunc(f func(*Command) error) { - c.usageFunc = f -} - -// Can be defined by Application -func (c *Command) SetUsageTemplate(s string) { - c.usageTemplate = s -} - -// Can be defined by Application -func (c *Command) SetHelpFunc(f func(*Command, []string)) { - c.helpFunc = f -} - -func (c *Command) SetHelpCommand(cmd *Command) { - c.helpCommand = cmd -} - -// Can be defined by Application -func (c *Command) SetHelpTemplate(s string) { - c.helpTemplate = s -} - -// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. -// The user should not have a cyclic dependency on commands. -func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { - c.Flags().SetNormalizeFunc(n) - c.PersistentFlags().SetNormalizeFunc(n) - c.LocalFlags().SetNormalizeFunc(n) - c.globNormFunc = n - - for _, command := range c.commands { - command.SetGlobalNormalizationFunc(n) - } -} - -func (c *Command) UsageFunc() (f func(*Command) error) { - if c.usageFunc != nil { - return c.usageFunc - } - - if c.HasParent() { - return c.parent.UsageFunc() - } else { - return func(c *Command) error { - err := tmpl(c.Out(), c.UsageTemplate(), c) - return err - } - } -} -func (c *Command) HelpFunc() func(*Command, []string) { - if c.helpFunc != nil { - return c.helpFunc - } - - if c.HasParent() { - return c.parent.HelpFunc() - } else { - return func(c *Command, args []string) { - if len(args) == 0 { - // Help called without any topic, calling on root - c.Root().Help() - return - } - - cmd, _, e := c.Root().Find(args) - if cmd == nil || e != nil { - c.Printf("Unknown help topic %#q.", args) - - c.Root().Usage() - } else { - err := cmd.Help() - if err != nil { - c.Println(err) - } - } - } - } -} - -var minUsagePadding int = 25 - -func (c *Command) UsagePadding() int { - if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen { - return minUsagePadding - } else { - return c.parent.commandsMaxUseLen - } -} - -var minCommandPathPadding int = 11 - -// -func (c *Command) CommandPathPadding() int { - if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen { - return minCommandPathPadding - } else { - return c.parent.commandsMaxCommandPathLen - } -} - -var minNamePadding int = 11 - -func (c *Command) NamePadding() int { - if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen { - return minNamePadding - } else { - return c.parent.commandsMaxNameLen - } -} - -func (c *Command) UsageTemplate() string { - if c.usageTemplate != "" { - return c.usageTemplate - } - - if c.HasParent() { - return c.parent.UsageTemplate() - } else { - return `{{ $cmd := . }} -Usage: {{if .Runnable}} - {{.UseLine}}{{if .HasFlags}} [flags]{{end}}{{end}}{{if .HasSubCommands}} - {{ .CommandPath}} [command]{{end}}{{if gt .Aliases 0}} - -Aliases: - {{.NameAndAliases}} -{{end}}{{if .HasExample}} - -Examples: -{{ .Example }} -{{end}}{{ if .HasRunnableSubCommands}} - -Available Commands: {{range .Commands}}{{if and (.Runnable) (not .Deprecated)}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}} -{{end}} -{{ if .HasLocalFlags}}Flags: -{{.LocalFlags.FlagUsages}}{{end}} -{{ if .HasInheritedFlags}}Global Flags: -{{.InheritedFlags.FlagUsages}}{{end}}{{if or (.HasHelpSubCommands) (.HasRunnableSiblings)}} -Additional help topics: -{{if .HasHelpSubCommands}}{{range .Commands}}{{if and (not .Runnable) (not .Deprecated)}} {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasRunnableSiblings }}{{range .Parent.Commands}}{{if and (not .Runnable) (not .Deprecated)}}{{if not (eq .Name $cmd.Name) }} - {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{end}} -{{end}}{{ if .HasSubCommands }} -Use "{{.Root.Name}} help [command]" for more information about a command. -{{end}}` - } -} - -func (c *Command) HelpTemplate() string { - if c.helpTemplate != "" { - return c.helpTemplate - } - - if c.HasParent() { - return c.parent.HelpTemplate() - } else { - return `{{with or .Long .Short }}{{. | trim}}{{end}} -{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}} -` - } -} - -// Really only used when casting a command to a commander -func (c *Command) resetChildrensParents() { - for _, x := range c.commands { - x.parent = c - } -} - -// Test if the named flag is a boolean flag. -func isBooleanFlag(name string, f *flag.FlagSet) bool { - flag := f.Lookup(name) - if flag == nil { - return false - } - return flag.Value.Type() == "bool" -} - -// Test if the named flag is a boolean flag. -func isBooleanShortFlag(name string, f *flag.FlagSet) bool { - result := false - f.VisitAll(func(f *flag.Flag) { - if f.Shorthand == name && f.Value.Type() == "bool" { - result = true - } - }) - return result -} - -func stripFlags(args []string, c *Command) []string { - if len(args) < 1 { - return args - } - c.mergePersistentFlags() - - commands := []string{} - - inQuote := false - inFlag := false - for _, y := range args { - if !inQuote { - switch { - case strings.HasPrefix(y, "\""): - inQuote = true - case strings.Contains(y, "=\""): - inQuote = true - case strings.HasPrefix(y, "--") && !strings.Contains(y, "="): - // TODO: this isn't quite right, we should really check ahead for 'true' or 'false' - inFlag = !isBooleanFlag(y[2:], c.Flags()) - case strings.HasPrefix(y, "-") && !strings.Contains(y, "=") && len(y) == 2 && !isBooleanShortFlag(y[1:], c.Flags()): - inFlag = true - case inFlag: - inFlag = false - case y == "": - // strip empty commands, as the go tests expect this to be ok.... - case !strings.HasPrefix(y, "-"): - commands = append(commands, y) - inFlag = false - } - } - - if strings.HasSuffix(y, "\"") && !strings.HasSuffix(y, "\\\"") { - inQuote = false - } - } - - return commands -} - -// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like -// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]). -func argsMinusFirstX(args []string, x string) []string { - for i, y := range args { - if x == y { - ret := []string{} - ret = append(ret, args[:i]...) - ret = append(ret, args[i+1:]...) - return ret - } - } - return args -} - -// find the target command given the args and command tree -// Meant to be run on the highest node. Only searches down. -func (c *Command) Find(args []string) (*Command, []string, error) { - if c == nil { - return nil, nil, fmt.Errorf("Called find() on a nil Command") - } - - // If there are no arguments, return the root command. If the root has no - // subcommands, args reflects arguments that should actually be passed to - // the root command, so also return the root command. - if len(args) == 0 || !c.Root().HasSubCommands() { - return c.Root(), args, nil - } - - var innerfind func(*Command, []string) (*Command, []string) - - innerfind = func(c *Command, innerArgs []string) (*Command, []string) { - if len(innerArgs) > 0 && c.HasSubCommands() { - argsWOflags := stripFlags(innerArgs, c) - if len(argsWOflags) > 0 { - matches := make([]*Command, 0) - for _, cmd := range c.commands { - if cmd.Name() == argsWOflags[0] || cmd.HasAlias(argsWOflags[0]) { // exact name or alias match - return innerfind(cmd, argsMinusFirstX(innerArgs, argsWOflags[0])) - } else if EnablePrefixMatching { - if strings.HasPrefix(cmd.Name(), argsWOflags[0]) { // prefix match - matches = append(matches, cmd) - } - for _, x := range cmd.Aliases { - if strings.HasPrefix(x, argsWOflags[0]) { - matches = append(matches, cmd) - } - } - } - } - - // only accept a single prefix match - multiple matches would be ambiguous - if len(matches) == 1 { - return innerfind(matches[0], argsMinusFirstX(innerArgs, argsWOflags[0])) - } - } - } - - return c, innerArgs - } - - commandFound, a := innerfind(c, args) - - // If we matched on the root, but we asked for a subcommand, return an error - if commandFound.Name() == c.Name() && len(stripFlags(args, c)) > 0 && commandFound.Name() != args[0] { - return nil, a, fmt.Errorf("unknown command %q", a[0]) - } - - return commandFound, a, nil -} - -func (c *Command) Root() *Command { - var findRoot func(*Command) *Command - - findRoot = func(x *Command) *Command { - if x.HasParent() { - return findRoot(x.parent) - } else { - return x - } - } - - return findRoot(c) -} - -func (c *Command) execute(a []string) (err error) { - if c == nil { - return fmt.Errorf("Called Execute() on a nil Command") - } - - if len(c.Deprecated) > 0 { - c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated) - } - - err = c.ParseFlags(a) - if err == flag.ErrHelp { - c.Help() - return nil - } - if err != nil { - // We're writing subcommand usage to root command's error buffer to have it displayed to the user - r := c.Root() - if r.cmdErrorBuf == nil { - r.cmdErrorBuf = new(bytes.Buffer) - } - // for writing the usage to the buffer we need to switch the output temporarily - // since Out() returns root output, you also need to revert that on root - out := r.Out() - r.SetOutput(r.cmdErrorBuf) - c.Usage() - r.SetOutput(out) - return err - } - // If help is called, regardless of other flags, we print that. - // Print help also if c.Run is nil. - if c.helpFlagVal || !c.Runnable() { - c.Help() - return nil - } - - c.preRun() - argWoFlags := c.Flags().Args() - - for p := c; p != nil; p = p.Parent() { - if p.PersistentPreRun != nil { - p.PersistentPreRun(c, argWoFlags) - break - } - } - if c.PreRun != nil { - c.PreRun(c, argWoFlags) - } - - c.Run(c, argWoFlags) - - if c.PostRun != nil { - c.PostRun(c, argWoFlags) - } - for p := c; p != nil; p = p.Parent() { - if p.PersistentPostRun != nil { - p.PersistentPostRun(c, argWoFlags) - break - } - } - - return nil -} - -func (c *Command) preRun() { - for _, x := range initializers { - x() - } -} - -func (c *Command) errorMsgFromParse() string { - s := c.flagErrorBuf.String() - - x := strings.Split(s, "\n") - - if len(x) > 0 { - return x[0] - } else { - return "" - } -} - -// Call execute to use the args (os.Args[1:] by default) -// and run through the command tree finding appropriate matches -// for commands and then corresponding flags. -func (c *Command) Execute() (err error) { - - // Regardless of what command execute is called on, run on Root only - if c.HasParent() { - return c.Root().Execute() - } - - if EnableWindowsMouseTrap && runtime.GOOS == "windows" { - if mousetrap.StartedByExplorer() { - c.Print(MousetrapHelpText) - time.Sleep(5 * time.Second) - os.Exit(1) - } - } - - // initialize help as the last point possible to allow for user - // overriding - c.initHelp() - - var args []string - - if len(c.args) == 0 { - args = os.Args[1:] - } else { - args = c.args - } - - cmd, flags, err := c.Find(args) - if err == nil { - err = cmd.execute(flags) - } - - if err != nil { - if err == flag.ErrHelp { - c.Help() - - } else { - c.Println("Error:", err.Error()) - c.Printf("Run '%v help' for usage.\n", c.Root().Name()) - } - } - - return -} - -func (c *Command) initHelp() { - if c.helpCommand == nil { - if !c.HasSubCommands() { - return - } - - c.helpCommand = &Command{ - Use: "help [command]", - Short: "Help about any command", - Long: `Help provides help for any command in the application. - Simply type ` + c.Name() + ` help [path to command] for full details.`, - Run: c.HelpFunc(), - PersistentPreRun: func(cmd *Command, args []string) {}, - PersistentPostRun: func(cmd *Command, args []string) {}, - } - } - c.AddCommand(c.helpCommand) -} - -// Used for testing -func (c *Command) ResetCommands() { - c.commands = nil - c.helpCommand = nil - c.cmdErrorBuf = new(bytes.Buffer) - c.cmdErrorBuf.Reset() -} - -//Commands returns a slice of child commands. -func (c *Command) Commands() []*Command { - return c.commands -} - -// AddCommand adds one or more commands to this parent command. -func (c *Command) AddCommand(cmds ...*Command) { - for i, x := range cmds { - if cmds[i] == c { - panic("Command can't be a child of itself") - } - cmds[i].parent = c - // update max lengths - usageLen := len(x.Use) - if usageLen > c.commandsMaxUseLen { - c.commandsMaxUseLen = usageLen - } - commandPathLen := len(x.CommandPath()) - if commandPathLen > c.commandsMaxCommandPathLen { - c.commandsMaxCommandPathLen = commandPathLen - } - nameLen := len(x.Name()) - if nameLen > c.commandsMaxNameLen { - c.commandsMaxNameLen = nameLen - } - // If glabal normalization function exists, update all children - if c.globNormFunc != nil { - x.SetGlobalNormalizationFunc(c.globNormFunc) - } - c.commands = append(c.commands, x) - } -} - -// AddCommand removes one or more commands from a parent command. -func (c *Command) RemoveCommand(cmds ...*Command) { - commands := []*Command{} -main: - for _, command := range c.commands { - for _, cmd := range cmds { - if command == cmd { - command.parent = nil - continue main - } - } - commands = append(commands, command) - } - c.commands = commands - // recompute all lengths - c.commandsMaxUseLen = 0 - c.commandsMaxCommandPathLen = 0 - c.commandsMaxNameLen = 0 - for _, command := range c.commands { - usageLen := len(command.Use) - if usageLen > c.commandsMaxUseLen { - c.commandsMaxUseLen = usageLen - } - commandPathLen := len(command.CommandPath()) - if commandPathLen > c.commandsMaxCommandPathLen { - c.commandsMaxCommandPathLen = commandPathLen - } - nameLen := len(command.Name()) - if nameLen > c.commandsMaxNameLen { - c.commandsMaxNameLen = nameLen - } - } -} - -// Convenience method to Print to the defined output -func (c *Command) Print(i ...interface{}) { - fmt.Fprint(c.Out(), i...) -} - -// Convenience method to Println to the defined output -func (c *Command) Println(i ...interface{}) { - str := fmt.Sprintln(i...) - c.Print(str) -} - -// Convenience method to Printf to the defined output -func (c *Command) Printf(format string, i ...interface{}) { - str := fmt.Sprintf(format, i...) - c.Print(str) -} - -// Output the usage for the command -// Used when a user provides invalid input -// Can be defined by user by overriding UsageFunc -func (c *Command) Usage() error { - c.mergePersistentFlags() - err := c.UsageFunc()(c) - return err -} - -// Output the help for the command -// Used when a user calls help [command] -// by the default HelpFunc in the commander -func (c *Command) Help() error { - c.mergePersistentFlags() - err := tmpl(c.getOutOrStdout(), c.HelpTemplate(), c) - return err -} - -func (c *Command) UsageString() string { - tmpOutput := c.output - bb := new(bytes.Buffer) - c.SetOutput(bb) - c.Usage() - c.output = tmpOutput - return bb.String() -} - -// CommandPath returns the full path to this command. -func (c *Command) CommandPath() string { - str := c.Name() - x := c - for x.HasParent() { - str = x.parent.Name() + " " + str - x = x.parent - } - return str -} - -//The full usage for a given command (including parents) -func (c *Command) UseLine() string { - str := "" - if c.HasParent() { - str = c.parent.CommandPath() + " " - } - return str + c.Use -} - -// For use in determining which flags have been assigned to which commands -// and which persist -func (c *Command) DebugFlags() { - c.Println("DebugFlags called on", c.Name()) - var debugflags func(*Command) - - debugflags = func(x *Command) { - if x.HasFlags() || x.HasPersistentFlags() { - c.Println(x.Name()) - } - if x.HasFlags() { - x.flags.VisitAll(func(f *flag.Flag) { - if x.HasPersistentFlags() { - if x.persistentFlag(f.Name) == nil { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") - } else { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]") - } - } else { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") - } - }) - } - if x.HasPersistentFlags() { - x.pflags.VisitAll(func(f *flag.Flag) { - if x.HasFlags() { - if x.flags.Lookup(f.Name) == nil { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") - } - } else { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") - } - }) - } - c.Println(x.flagErrorBuf) - if x.HasSubCommands() { - for _, y := range x.commands { - debugflags(y) - } - } - } - - debugflags(c) -} - -// Name returns the command's name: the first word in the use line. -func (c *Command) Name() string { - if c.name != "" { - return c.name - } - name := c.Use - i := strings.Index(name, " ") - if i >= 0 { - name = name[:i] - } - return name -} - -// Determine if a given string is an alias of the command. -func (c *Command) HasAlias(s string) bool { - for _, a := range c.Aliases { - if a == s { - return true - } - } - return false -} - -func (c *Command) NameAndAliases() string { - return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ") -} - -func (c *Command) HasExample() bool { - return len(c.Example) > 0 -} - -// Determine if the command is itself runnable -func (c *Command) Runnable() bool { - return c.Run != nil -} - -// Determine if the command has children commands -func (c *Command) HasSubCommands() bool { - return len(c.commands) > 0 -} - -func (c *Command) HasRunnableSiblings() bool { - if !c.HasParent() { - return false - } - for _, sub := range c.parent.commands { - if sub.Runnable() { - return true - } - } - return false -} - -func (c *Command) HasHelpSubCommands() bool { - for _, sub := range c.commands { - if !sub.Runnable() { - return true - } - } - return false -} - -// Determine if the command has runnable children commands -func (c *Command) HasRunnableSubCommands() bool { - for _, sub := range c.commands { - if sub.Runnable() { - return true - } - } - return false -} - -// Determine if the command is a child command -func (c *Command) HasParent() bool { - return c.parent != nil -} - -// GlobalNormalizationFunc returns the global normalization function or nil if doesn't exists -func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName { - return c.globNormFunc -} - -// Get the complete FlagSet that applies to this command (local and persistent declared here and by all parents) -func (c *Command) Flags() *flag.FlagSet { - if c.flags == nil { - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.flags.SetOutput(c.flagErrorBuf) - c.PersistentFlags().BoolVarP(&c.helpFlagVal, "help", "h", false, "help for "+c.Name()) - } - return c.flags -} - -// Get the local FlagSet specifically set in the current command -func (c *Command) LocalFlags() *flag.FlagSet { - c.mergePersistentFlags() - - local := flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.lflags.VisitAll(func(f *flag.Flag) { - local.AddFlag(f) - }) - return local -} - -// All Flags which were inherited from parents commands -func (c *Command) InheritedFlags() *flag.FlagSet { - c.mergePersistentFlags() - - inherited := flag.NewFlagSet(c.Name(), flag.ContinueOnError) - local := c.LocalFlags() - - var rmerge func(x *Command) - - rmerge = func(x *Command) { - if x.HasPersistentFlags() { - x.PersistentFlags().VisitAll(func(f *flag.Flag) { - if inherited.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil { - inherited.AddFlag(f) - } - }) - } - if x.HasParent() { - rmerge(x.parent) - } - } - - if c.HasParent() { - rmerge(c.parent) - } - - return inherited -} - -// All Flags which were not inherited from parent commands -func (c *Command) NonInheritedFlags() *flag.FlagSet { - return c.LocalFlags() -} - -// Get the Persistent FlagSet specifically set in the current command -func (c *Command) PersistentFlags() *flag.FlagSet { - if c.pflags == nil { - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.pflags.SetOutput(c.flagErrorBuf) - } - return c.pflags -} - -// For use in testing -func (c *Command) ResetFlags() { - c.flagErrorBuf = new(bytes.Buffer) - c.flagErrorBuf.Reset() - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.flags.SetOutput(c.flagErrorBuf) - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.pflags.SetOutput(c.flagErrorBuf) -} - -// Does the command contain any flags (local plus persistent from the entire structure) -func (c *Command) HasFlags() bool { - return c.Flags().HasFlags() -} - -// Does the command contain persistent flags -func (c *Command) HasPersistentFlags() bool { - return c.PersistentFlags().HasFlags() -} - -// Does the command has flags specifically declared locally -func (c *Command) HasLocalFlags() bool { - return c.LocalFlags().HasFlags() -} - -func (c *Command) HasInheritedFlags() bool { - return c.InheritedFlags().HasFlags() -} - -// Climbs up the command tree looking for matching flag -func (c *Command) Flag(name string) (flag *flag.Flag) { - flag = c.Flags().Lookup(name) - - if flag == nil { - flag = c.persistentFlag(name) - } - - return -} - -// recursively find matching persistent flag -func (c *Command) persistentFlag(name string) (flag *flag.Flag) { - if c.HasPersistentFlags() { - flag = c.PersistentFlags().Lookup(name) - } - - if flag == nil && c.HasParent() { - flag = c.parent.persistentFlag(name) - } - return -} - -// Parses persistent flag tree & local flags -func (c *Command) ParseFlags(args []string) (err error) { - c.mergePersistentFlags() - err = c.Flags().Parse(args) - return -} - -func (c *Command) Parent() *Command { - return c.parent -} - -func (c *Command) mergePersistentFlags() { - var rmerge func(x *Command) - - // Save the set of local flags - if c.lflags == nil { - c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.lflags.SetOutput(c.flagErrorBuf) - addtolocal := func(f *flag.Flag) { - c.lflags.AddFlag(f) - } - c.Flags().VisitAll(addtolocal) - c.PersistentFlags().VisitAll(addtolocal) - } - rmerge = func(x *Command) { - if !x.HasParent() { - flag.CommandLine.VisitAll(func(f *flag.Flag) { - if x.PersistentFlags().Lookup(f.Name) == nil { - x.PersistentFlags().AddFlag(f) - } - }) - } - if x.HasPersistentFlags() { - x.PersistentFlags().VisitAll(func(f *flag.Flag) { - if c.Flags().Lookup(f.Name) == nil { - c.Flags().AddFlag(f) - } - }) - } - if x.HasParent() { - rmerge(x.parent) - } - } - - rmerge(c) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/md_docs.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/md_docs.go deleted file mode 100644 index 6092c85a..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/md_docs.go +++ /dev/null @@ -1,138 +0,0 @@ -//Copyright 2015 Red Hat Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cobra - -import ( - "bytes" - "fmt" - "os" - "sort" - "strings" - "time" -) - -func printOptions(out *bytes.Buffer, cmd *Command, name string) { - flags := cmd.NonInheritedFlags() - flags.SetOutput(out) - if flags.HasFlags() { - fmt.Fprintf(out, "### Options\n\n```\n") - flags.PrintDefaults() - fmt.Fprintf(out, "```\n\n") - } - - parentFlags := cmd.InheritedFlags() - parentFlags.SetOutput(out) - if parentFlags.HasFlags() { - fmt.Fprintf(out, "### Options inherited from parent commands\n\n```\n") - parentFlags.PrintDefaults() - fmt.Fprintf(out, "```\n\n") - } -} - -type byName []*Command - -func (s byName) Len() int { return len(s) } -func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } - -func GenMarkdown(cmd *Command, out *bytes.Buffer) { - GenMarkdownCustom(cmd, out, func(s string) string { return s }) -} - -func GenMarkdownCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) { - name := cmd.CommandPath() - - short := cmd.Short - long := cmd.Long - if len(long) == 0 { - long = short - } - - fmt.Fprintf(out, "## %s\n\n", name) - fmt.Fprintf(out, "%s\n\n", short) - fmt.Fprintf(out, "### Synopsis\n\n") - fmt.Fprintf(out, "\n%s\n\n", long) - - if cmd.Runnable() { - fmt.Fprintf(out, "```\n%s\n```\n\n", cmd.UseLine()) - } - - if len(cmd.Example) > 0 { - fmt.Fprintf(out, "### Examples\n\n") - fmt.Fprintf(out, "```\n%s\n```\n\n", cmd.Example) - } - - printOptions(out, cmd, name) - - if len(cmd.Commands()) > 0 || cmd.HasParent() { - fmt.Fprintf(out, "### SEE ALSO\n") - if cmd.HasParent() { - parent := cmd.Parent() - pname := parent.CommandPath() - link := pname + ".md" - link = strings.Replace(link, " ", "_", -1) - fmt.Fprintf(out, "* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short) - } - - children := cmd.Commands() - sort.Sort(byName(children)) - - for _, child := range children { - if len(child.Deprecated) > 0 { - continue - } - cname := name + " " + child.Name() - link := cname + ".md" - link = strings.Replace(link, " ", "_", -1) - fmt.Fprintf(out, "* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short) - } - fmt.Fprintf(out, "\n") - } - - fmt.Fprintf(out, "###### Auto generated by spf13/cobra at %s\n", time.Now().UTC()) -} - -func GenMarkdownTree(cmd *Command, dir string) { - identity := func(s string) string { return s } - emptyStr := func(s string) string { return "" } - GenMarkdownTreeCustom(cmd, dir, emptyStr, identity) -} - -func GenMarkdownTreeCustom(cmd *Command, dir string, filePrepender func(string) string, linkHandler func(string) string) { - for _, c := range cmd.Commands() { - GenMarkdownTreeCustom(c, dir, filePrepender, linkHandler) - } - out := new(bytes.Buffer) - - GenMarkdownCustom(cmd, out, linkHandler) - - filename := cmd.CommandPath() - filename = dir + strings.Replace(filename, " ", "_", -1) + ".md" - outFile, err := os.Create(filename) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - defer outFile.Close() - _, err = outFile.WriteString(filePrepender(filename)) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - _, err = outFile.Write(out.Bytes()) - if err != nil { - fmt.Println(err) - os.Exit(1) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/md_docs.md b/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/md_docs.md deleted file mode 100644 index 3a0d55ab..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/cobra/md_docs.md +++ /dev/null @@ -1,81 +0,0 @@ -# Generating Markdown Docs For Your Own cobra.Command - -## Generate markdown docs for the entire command tree - -This program can actually generate docs for the kubectl command in the kubernetes project - -```go -package main - -import ( - "io/ioutil" - "os" - - "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd" - "github.com/spf13/cobra" -) - -func main() { - kubectl := cmd.NewFactory(nil).NewKubectlCommand(os.Stdin, ioutil.Discard, ioutil.Discard) - cobra.GenMarkdownTree(kubectl, "./") -} -``` - -This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./") - -## Generate markdown docs for a single command - -You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenMarkdown` instead of `GenMarkdownTree` - -```go - out := new(bytes.Buffer) - cobra.GenMarkdown(cmd, out) -``` - -This will write the markdown doc for ONLY "cmd" into the out, buffer. - -## Customize the output - -Both `GenMarkdown` and `GenMarkdownTree` have alternate versions with callbacks to get some control of the output: - -```go -func GenMarkdownTreeCustom(cmd *Command, dir string, filePrepender func(string) string, linkHandler func(string) string) { - //... -} -``` - -```go -func GenMarkdownCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) { - //... -} -``` - -The `filePrepender` will prepend the return value given the full filepath to the rendered Markdown file. A common use case is to add front matter to use the generated documentation with [Hugo](http://gohugo.io/): - -```go -const fmTemplate = `--- -date: %s -title: "%s" -slug: %s -url: %s ---- -` - -filePrepender := func(filename string) string { - now := time.Now().Format(time.RFC3339) - name := filepath.Base(filename) - base := strings.TrimSuffix(name, path.Ext(name)) - url := "/commands/" + strings.ToLower(base) + "/" - return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url) -} -``` - -The `linkHandler` can be used to customize the rendered internal links to the commands, given a filename: - -```go -linkHandler := func(name string) string { - base := strings.TrimSuffix(name, path.Ext(name)) - return "/commands/" + strings.ToLower(base) + "/" -} -``` - diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/.travis.yml deleted file mode 100644 index c4d88e37..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -sudo: false - -language: go - -go: - - 1.3 - - 1.4 - - tip diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/LICENSE deleted file mode 100644 index 63ed1cfe..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 Alex Ogier. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/README.md deleted file mode 100644 index f7d63500..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/README.md +++ /dev/null @@ -1,191 +0,0 @@ -[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag) - -## Description - -pflag is a drop-in replacement for Go's flag package, implementing -POSIX/GNU-style --flags. - -pflag is compatible with the [GNU extensions to the POSIX recommendations -for command-line options][1]. For a more precise description, see the -"Command-line flag syntax" section below. - -[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html - -pflag is available under the same style of BSD license as the Go language, -which can be found in the LICENSE file. - -## Installation - -pflag is available using the standard `go get` command. - -Install by running: - - go get github.com/ogier/pflag - -Run tests by running: - - go test github.com/ogier/pflag - -## Usage - -pflag is a drop-in replacement of Go's native flag package. If you import -pflag under the name "flag" then all code should continue to function -with no changes. - -``` go -import flag "github.com/ogier/pflag" -``` - -There is one exception to this: if you directly instantiate the Flag struct -there is one more field "Shorthand" that you will need to set. -Most code never instantiates this struct directly, and instead uses -functions such as String(), BoolVar(), and Var(), and is therefore -unaffected. - -Define flags using flag.String(), Bool(), Int(), etc. - -This declares an integer flag, -flagname, stored in the pointer ip, with type *int. - -``` go -var ip *int = flag.Int("flagname", 1234, "help message for flagname") -``` - -If you like, you can bind the flag to a variable using the Var() functions. - -``` go -var flagvar int -func init() { - flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") -} -``` - -Or you can create custom flags that satisfy the Value interface (with -pointer receivers) and couple them to flag parsing by - -``` go -flag.Var(&flagVal, "name", "help message for flagname") -``` - -For such flags, the default value is just the initial value of the variable. - -After all flags are defined, call - -``` go -flag.Parse() -``` - -to parse the command line into the defined flags. - -Flags may then be used directly. If you're using the flags themselves, -they are all pointers; if you bind to variables, they're values. - -``` go -fmt.Println("ip has value ", *ip) -fmt.Println("flagvar has value ", flagvar) -``` - -After parsing, the arguments after the flag are available as the -slice flag.Args() or individually as flag.Arg(i). -The arguments are indexed from 0 through flag.NArg()-1. - -The pflag package also defines some new functions that are not in flag, -that give one-letter shorthands for flags. You can use these by appending -'P' to the name of any function that defines a flag. - -``` go -var ip = flag.IntP("flagname", "f", 1234, "help message") -var flagvar bool -func init() { - flag.BoolVarP("boolname", "b", true, "help message") -} -flag.VarP(&flagVar, "varname", "v", 1234, "help message") -``` - -Shorthand letters can be used with single dashes on the command line. -Boolean shorthand flags can be combined with other shorthand flags. - -The default set of command-line flags is controlled by -top-level functions. The FlagSet type allows one to define -independent sets of flags, such as to implement subcommands -in a command-line interface. The methods of FlagSet are -analogous to the top-level functions for the command-line -flag set. - -## Command line flag syntax - -``` ---flag // boolean flags only ---flag=x -``` - -Unlike the flag package, a single dash before an option means something -different than a double dash. Single dashes signify a series of shorthand -letters for flags. All but the last shorthand letter must be boolean flags. - -``` -// boolean flags --f --abc - -// non-boolean flags --n 1234 --Ifile - -// mixed --abcs "hello" --abcn1234 -``` - -Flag parsing stops after the terminator "--". Unlike the flag package, -flags can be interspersed with arguments anywhere on the command line -before this terminator. - -Integer flags accept 1234, 0664, 0x1234 and may be negative. -Boolean flags (in their long form) accept 1, 0, t, f, true, false, -TRUE, FALSE, True, False. -Duration flags accept any input valid for time.ParseDuration. - -## Mutating or "Normalizing" Flag names - -It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow. - -**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag - -```go -func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - from := []string{"-", "_"} - to := "." - for _, sep := range from { - name = strings.Replace(name, sep, to, -1) - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc) -``` - -**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name - -```go -func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - switch name { - case "old-flag-name": - name = "new-flag-name" - break - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(aliasNormalizeFunc) -``` - -## More info - -You can see the full reference documentation of the pflag package -[at godoc.org][3], or through go's standard documentation system by -running `godoc -http=:6060` and browsing to -[http://localhost:6060/pkg/github.com/ogier/pflag][2] after -installation. - -[2]: http://localhost:6060/pkg/github.com/ogier/pflag -[3]: http://godoc.org/github.com/ogier/pflag diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/bool.go deleted file mode 100644 index 70e2e0a6..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/bool.go +++ /dev/null @@ -1,83 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// optional interface to indicate boolean flags that can be -// supplied without "=value" text -type boolFlag interface { - Value - IsBoolFlag() bool -} - -// -- bool Value -type boolValue bool - -func newBoolValue(val bool, p *bool) *boolValue { - *p = val - return (*boolValue)(p) -} - -func (b *boolValue) Set(s string) error { - v, err := strconv.ParseBool(s) - *b = boolValue(v) - return err -} - -func (b *boolValue) Type() string { - return "bool" -} - -func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } - -func (b *boolValue) IsBoolFlag() bool { return true } - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { - f.VarP(newBoolValue(value, p), name, "", usage) -} - -// Like BoolVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) { - f.VarP(newBoolValue(value, p), name, shorthand, usage) -} - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func BoolVar(p *bool, name string, value bool, usage string) { - CommandLine.VarP(newBoolValue(value, p), name, "", usage) -} - -// Like BoolVar, but accepts a shorthand letter that can be used after a single dash. -func BoolVarP(p *bool, name, shorthand string, value bool, usage string) { - CommandLine.VarP(newBoolValue(value, p), name, shorthand, usage) -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func (f *FlagSet) Bool(name string, value bool, usage string) *bool { - p := new(bool) - f.BoolVarP(p, name, "", value, usage) - return p -} - -// Like Bool, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool { - p := new(bool) - f.BoolVarP(p, name, shorthand, value, usage) - return p -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func Bool(name string, value bool, usage string) *bool { - return CommandLine.BoolP(name, "", value, usage) -} - -// Like Bool, but accepts a shorthand letter that can be used after a single dash. -func BoolP(name, shorthand string, value bool, usage string) *bool { - return CommandLine.BoolP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/duration.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/duration.go deleted file mode 100644 index 66ed7ac9..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/duration.go +++ /dev/null @@ -1,71 +0,0 @@ -package pflag - -import "time" - -// -- time.Duration Value -type durationValue time.Duration - -func newDurationValue(val time.Duration, p *time.Duration) *durationValue { - *p = val - return (*durationValue)(p) -} - -func (d *durationValue) Set(s string) error { - v, err := time.ParseDuration(s) - *d = durationValue(v) - return err -} - -func (d *durationValue) Type() string { - return "duration" -} - -func (d *durationValue) String() string { return (*time.Duration)(d).String() } - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { - f.VarP(newDurationValue(value, p), name, "", usage) -} - -// Like DurationVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { - f.VarP(newDurationValue(value, p), name, shorthand, usage) -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { - CommandLine.VarP(newDurationValue(value, p), name, "", usage) -} - -// Like DurationVar, but accepts a shorthand letter that can be used after a single dash. -func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { - CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage) -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { - p := new(time.Duration) - f.DurationVarP(p, name, "", value, usage) - return p -} - -// Like Duration, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { - p := new(time.Duration) - f.DurationVarP(p, name, shorthand, value, usage) - return p -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func Duration(name string, value time.Duration, usage string) *time.Duration { - return CommandLine.DurationP(name, "", value, usage) -} - -// Like Duration, but accepts a shorthand letter that can be used after a single dash. -func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { - return CommandLine.DurationP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/flag.go deleted file mode 100644 index 0070b93e..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/flag.go +++ /dev/null @@ -1,695 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* - pflag is a drop-in replacement for Go's flag package, implementing - POSIX/GNU-style --flags. - - pflag is compatible with the GNU extensions to the POSIX recommendations - for command-line options. See - http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html - - Usage: - - pflag is a drop-in replacement of Go's native flag package. If you import - pflag under the name "flag" then all code should continue to function - with no changes. - - import flag "github.com/ogier/pflag" - - There is one exception to this: if you directly instantiate the Flag struct - there is one more field "Shorthand" that you will need to set. - Most code never instantiates this struct directly, and instead uses - functions such as String(), BoolVar(), and Var(), and is therefore - unaffected. - - Define flags using flag.String(), Bool(), Int(), etc. - - This declares an integer flag, -flagname, stored in the pointer ip, with type *int. - var ip = flag.Int("flagname", 1234, "help message for flagname") - If you like, you can bind the flag to a variable using the Var() functions. - var flagvar int - func init() { - flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") - } - Or you can create custom flags that satisfy the Value interface (with - pointer receivers) and couple them to flag parsing by - flag.Var(&flagVal, "name", "help message for flagname") - For such flags, the default value is just the initial value of the variable. - - After all flags are defined, call - flag.Parse() - to parse the command line into the defined flags. - - Flags may then be used directly. If you're using the flags themselves, - they are all pointers; if you bind to variables, they're values. - fmt.Println("ip has value ", *ip) - fmt.Println("flagvar has value ", flagvar) - - After parsing, the arguments after the flag are available as the - slice flag.Args() or individually as flag.Arg(i). - The arguments are indexed from 0 through flag.NArg()-1. - - The pflag package also defines some new functions that are not in flag, - that give one-letter shorthands for flags. You can use these by appending - 'P' to the name of any function that defines a flag. - var ip = flag.IntP("flagname", "f", 1234, "help message") - var flagvar bool - func init() { - flag.BoolVarP("boolname", "b", true, "help message") - } - flag.VarP(&flagVar, "varname", "v", 1234, "help message") - Shorthand letters can be used with single dashes on the command line. - Boolean shorthand flags can be combined with other shorthand flags. - - Command line flag syntax: - --flag // boolean flags only - --flag=x - - Unlike the flag package, a single dash before an option means something - different than a double dash. Single dashes signify a series of shorthand - letters for flags. All but the last shorthand letter must be boolean flags. - // boolean flags - -f - -abc - // non-boolean flags - -n 1234 - -Ifile - // mixed - -abcs "hello" - -abcn1234 - - Flag parsing stops after the terminator "--". Unlike the flag package, - flags can be interspersed with arguments anywhere on the command line - before this terminator. - - Integer flags accept 1234, 0664, 0x1234 and may be negative. - Boolean flags (in their long form) accept 1, 0, t, f, true, false, - TRUE, FALSE, True, False. - Duration flags accept any input valid for time.ParseDuration. - - The default set of command-line flags is controlled by - top-level functions. The FlagSet type allows one to define - independent sets of flags, such as to implement subcommands - in a command-line interface. The methods of FlagSet are - analogous to the top-level functions for the command-line - flag set. -*/ -package pflag - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "sort" - "strings" -) - -// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. -var ErrHelp = errors.New("pflag: help requested") - -// ErrorHandling defines how to handle flag parsing errors. -type ErrorHandling int - -const ( - ContinueOnError ErrorHandling = iota - ExitOnError - PanicOnError -) - -// NormalizedName is a flag name that has been normalized according to rules -// for the FlagSet (e.g. making '-' and '_' equivalent). -type NormalizedName string - -// A FlagSet represents a set of defined flags. -type FlagSet struct { - // Usage is the function called when an error occurs while parsing flags. - // The field is a function (not a method) that may be changed to point to - // a custom error handler. - Usage func() - - name string - parsed bool - actual map[NormalizedName]*Flag - formal map[NormalizedName]*Flag - shorthands map[byte]*Flag - args []string // arguments after flags - exitOnError bool // does the program exit if there's an error? - errorHandling ErrorHandling - output io.Writer // nil means stderr; use out() accessor - interspersed bool // allow interspersed option/non-option args - normalizeNameFunc func(f *FlagSet, name string) NormalizedName -} - -// A Flag represents the state of a flag. -type Flag struct { - Name string // name as it appears on command line - Shorthand string // one-letter abbreviated flag - Usage string // help message - Value Value // value as set - DefValue string // default value (as text); for usage message - Changed bool // If the user set the value (or if left to default) - Deprecated string // If this flag is deprecated, this string is the new or now thing to use - Annotations map[string][]string // used by cobra.Command bash autocomple code -} - -// Value is the interface to the dynamic value stored in a flag. -// (The default value is represented as a string.) -type Value interface { - String() string - Set(string) error - Type() string -} - -// sortFlags returns the flags as a slice in lexicographical sorted order. -func sortFlags(flags map[NormalizedName]*Flag) []*Flag { - list := make(sort.StringSlice, len(flags)) - i := 0 - for k := range flags { - list[i] = string(k) - i++ - } - list.Sort() - result := make([]*Flag, len(list)) - for i, name := range list { - result[i] = flags[NormalizedName(name)] - } - return result -} - -func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) { - f.normalizeNameFunc = n - for k, v := range f.formal { - delete(f.formal, k) - nname := f.normalizeFlagName(string(k)) - f.formal[nname] = v - v.Name = string(nname) - } -} - -func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName { - if f.normalizeNameFunc != nil { - return f.normalizeNameFunc - } - return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) } -} - -func (f *FlagSet) normalizeFlagName(name string) NormalizedName { - n := f.GetNormalizeFunc() - return n(f, name) -} - -func (f *FlagSet) out() io.Writer { - if f.output == nil { - return os.Stderr - } - return f.output -} - -// SetOutput sets the destination for usage and error messages. -// If output is nil, os.Stderr is used. -func (f *FlagSet) SetOutput(output io.Writer) { - f.output = output -} - -// VisitAll visits the flags in lexicographical order, calling fn for each. -// It visits all flags, even those not set. -func (f *FlagSet) VisitAll(fn func(*Flag)) { - for _, flag := range sortFlags(f.formal) { - fn(flag) - } -} - -func (f *FlagSet) HasFlags() bool { - return len(f.formal) > 0 -} - -// VisitAll visits the command-line flags in lexicographical order, calling -// fn for each. It visits all flags, even those not set. -func VisitAll(fn func(*Flag)) { - CommandLine.VisitAll(fn) -} - -// Visit visits the flags in lexicographical order, calling fn for each. -// It visits only those flags that have been set. -func (f *FlagSet) Visit(fn func(*Flag)) { - for _, flag := range sortFlags(f.actual) { - fn(flag) - } -} - -// Visit visits the command-line flags in lexicographical order, calling fn -// for each. It visits only those flags that have been set. -func Visit(fn func(*Flag)) { - CommandLine.Visit(fn) -} - -// Lookup returns the Flag structure of the named flag, returning nil if none exists. -func (f *FlagSet) Lookup(name string) *Flag { - return f.lookup(f.normalizeFlagName(name)) -} - -// lookup returns the Flag structure of the named flag, returning nil if none exists. -func (f *FlagSet) lookup(name NormalizedName) *Flag { - return f.formal[name] -} - -// Mark a flag deprecated in your program -func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { - flag := f.Lookup(name) - if flag == nil { - return fmt.Errorf("flag %q does not exist", name) - } - flag.Deprecated = usageMessage - return nil -} - -// Lookup returns the Flag structure of the named command-line flag, -// returning nil if none exists. -func Lookup(name string) *Flag { - return CommandLine.Lookup(name) -} - -// Set sets the value of the named flag. -func (f *FlagSet) Set(name, value string) error { - normalName := f.normalizeFlagName(name) - flag, ok := f.formal[normalName] - if !ok { - return fmt.Errorf("no such flag -%v", name) - } - err := flag.Value.Set(value) - if err != nil { - return err - } - if f.actual == nil { - f.actual = make(map[NormalizedName]*Flag) - } - f.actual[normalName] = flag - flag.Changed = true - if len(flag.Deprecated) > 0 { - fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) - } - return nil -} - -// Set sets the value of the named command-line flag. -func Set(name, value string) error { - return CommandLine.Set(name, value) -} - -// PrintDefaults prints, to standard error unless configured -// otherwise, the default values of all defined flags in the set. -func (f *FlagSet) PrintDefaults() { - f.VisitAll(func(flag *Flag) { - if len(flag.Deprecated) > 0 { - return - } - format := "--%s=%s: %s\n" - if _, ok := flag.Value.(*stringValue); ok { - // put quotes on the value - format = "--%s=%q: %s\n" - } - if len(flag.Shorthand) > 0 { - format = " -%s, " + format - } else { - format = " %s " + format - } - fmt.Fprintf(f.out(), format, flag.Shorthand, flag.Name, flag.DefValue, flag.Usage) - }) -} - -func (f *FlagSet) FlagUsages() string { - x := new(bytes.Buffer) - - f.VisitAll(func(flag *Flag) { - if len(flag.Deprecated) > 0 { - return - } - format := "--%s=%s: %s\n" - if _, ok := flag.Value.(*stringValue); ok { - // put quotes on the value - format = "--%s=%q: %s\n" - } - if len(flag.Shorthand) > 0 { - format = " -%s, " + format - } else { - format = " %s " + format - } - fmt.Fprintf(x, format, flag.Shorthand, flag.Name, flag.DefValue, flag.Usage) - }) - - return x.String() -} - -// PrintDefaults prints to standard error the default values of all defined command-line flags. -func PrintDefaults() { - CommandLine.PrintDefaults() -} - -// defaultUsage is the default function to print a usage message. -func defaultUsage(f *FlagSet) { - fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) - f.PrintDefaults() -} - -// NOTE: Usage is not just defaultUsage(CommandLine) -// because it serves (via godoc flag Usage) as the example -// for how to write your own usage function. - -// Usage prints to standard error a usage message documenting all defined command-line flags. -// The function is a variable that may be changed to point to a custom function. -var Usage = func() { - fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) - PrintDefaults() -} - -// NFlag returns the number of flags that have been set. -func (f *FlagSet) NFlag() int { return len(f.actual) } - -// NFlag returns the number of command-line flags that have been set. -func NFlag() int { return len(CommandLine.actual) } - -// Arg returns the i'th argument. Arg(0) is the first remaining argument -// after flags have been processed. -func (f *FlagSet) Arg(i int) string { - if i < 0 || i >= len(f.args) { - return "" - } - return f.args[i] -} - -// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument -// after flags have been processed. -func Arg(i int) string { - return CommandLine.Arg(i) -} - -// NArg is the number of arguments remaining after flags have been processed. -func (f *FlagSet) NArg() int { return len(f.args) } - -// NArg is the number of arguments remaining after flags have been processed. -func NArg() int { return len(CommandLine.args) } - -// Args returns the non-flag arguments. -func (f *FlagSet) Args() []string { return f.args } - -// Args returns the non-flag command-line arguments. -func Args() []string { return CommandLine.args } - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func (f *FlagSet) Var(value Value, name string, usage string) { - f.VarP(value, name, "", usage) -} - -// Like Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) VarP(value Value, name, shorthand, usage string) { - // Remember the default value as a string; it won't change. - flag := &Flag{ - Name: name, - Shorthand: shorthand, - Usage: usage, - Value: value, - DefValue: value.String(), - } - f.AddFlag(flag) -} - -func (f *FlagSet) AddFlag(flag *Flag) { - // Call normalizeFlagName function only once - var normalizedFlagName NormalizedName = f.normalizeFlagName(flag.Name) - - _, alreadythere := f.formal[normalizedFlagName] - if alreadythere { - msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) - fmt.Fprintln(f.out(), msg) - panic(msg) // Happens only if flags are declared with identical names - } - if f.formal == nil { - f.formal = make(map[NormalizedName]*Flag) - } - - flag.Name = string(normalizedFlagName) - f.formal[normalizedFlagName] = flag - - if len(flag.Shorthand) == 0 { - return - } - if len(flag.Shorthand) > 1 { - fmt.Fprintf(f.out(), "%s shorthand more than ASCII character: %s\n", f.name, flag.Shorthand) - panic("shorthand is more than one character") - } - if f.shorthands == nil { - f.shorthands = make(map[byte]*Flag) - } - c := flag.Shorthand[0] - old, alreadythere := f.shorthands[c] - if alreadythere { - fmt.Fprintf(f.out(), "%s shorthand reused: %q for %s already used for %s\n", f.name, c, flag.Name, old.Name) - panic("shorthand redefinition") - } - f.shorthands[c] = flag -} - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func Var(value Value, name string, usage string) { - CommandLine.VarP(value, name, "", usage) -} - -// Like Var, but accepts a shorthand letter that can be used after a single dash. -func VarP(value Value, name, shorthand, usage string) { - CommandLine.VarP(value, name, shorthand, usage) -} - -// failf prints to standard error a formatted error and usage message and -// returns the error. -func (f *FlagSet) failf(format string, a ...interface{}) error { - err := fmt.Errorf(format, a...) - fmt.Fprintln(f.out(), err) - f.usage() - return err -} - -// usage calls the Usage method for the flag set, or the usage function if -// the flag set is CommandLine. -func (f *FlagSet) usage() { - if f == CommandLine { - Usage() - } else if f.Usage == nil { - defaultUsage(f) - } else { - f.Usage() - } -} - -func (f *FlagSet) setFlag(flag *Flag, value string, origArg string) error { - if err := flag.Value.Set(value); err != nil { - return f.failf("invalid argument %q for %s: %v", value, origArg, err) - } - // mark as visited for Visit() - if f.actual == nil { - f.actual = make(map[NormalizedName]*Flag) - } - f.actual[f.normalizeFlagName(flag.Name)] = flag - flag.Changed = true - if len(flag.Deprecated) > 0 { - fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) - } - return nil -} - -func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error) { - a = args - name := s[2:] - if len(name) == 0 || name[0] == '-' || name[0] == '=' { - err = f.failf("bad flag syntax: %s", s) - return - } - split := strings.SplitN(name, "=", 2) - name = split[0] - flag, alreadythere := f.formal[f.normalizeFlagName(name)] - if !alreadythere { - if name == "help" { // special case for nice help message. - f.usage() - return a, ErrHelp - } - err = f.failf("unknown flag: --%s", name) - return - } - var value string - if len(split) == 2 { - // '--flag=arg' - value = split[1] - } else if bv, ok := flag.Value.(boolFlag); ok && bv.IsBoolFlag() { - // '--flag' (where flag is a bool) - value = "true" - } else { - // '--flag' (where flag was not a bool) - err = f.failf("flag needs an argument: %s", s) - return - } - err = f.setFlag(flag, value, s) - return -} - -func (f *FlagSet) parseSingleShortArg(shorthands string, args []string) (outShorts string, outArgs []string, err error) { - outArgs = args - outShorts = shorthands[1:] - c := shorthands[0] - - flag, alreadythere := f.shorthands[c] - if !alreadythere { - if c == 'h' { // special case for nice help message. - f.usage() - err = ErrHelp - return - } - //TODO continue on error - err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) - return - } - var value string - if len(shorthands) > 2 && shorthands[1] == '=' { - value = shorthands[2:] - outShorts = "" - } else if bv, ok := flag.Value.(boolFlag); ok && bv.IsBoolFlag() { - value = "true" - } else if len(shorthands) > 1 { - value = shorthands[1:] - outShorts = "" - } else if len(args) > 0 { - value = args[0] - outArgs = args[1:] - } else { - err = f.failf("flag needs an argument: %q in -%s", c, shorthands) - return - } - err = f.setFlag(flag, value, shorthands) - return -} - -func (f *FlagSet) parseShortArg(s string, args []string) (a []string, err error) { - a = args - shorthands := s[1:] - - for len(shorthands) > 0 { - shorthands, a, err = f.parseSingleShortArg(shorthands, args) - if err != nil { - return - } - } - - return -} - -func (f *FlagSet) parseArgs(args []string) (err error) { - for len(args) > 0 { - s := args[0] - args = args[1:] - if len(s) == 0 || s[0] != '-' || len(s) == 1 { - if !f.interspersed { - f.args = append(f.args, s) - f.args = append(f.args, args...) - return nil - } - f.args = append(f.args, s) - continue - } - - if s[1] == '-' { - if len(s) == 2 { // "--" terminates the flags - f.args = append(f.args, args...) - break - } - args, err = f.parseLongArg(s, args) - } else { - args, err = f.parseShortArg(s, args) - } - if err != nil { - return - } - } - return -} - -// Parse parses flag definitions from the argument list, which should not -// include the command name. Must be called after all flags in the FlagSet -// are defined and before flags are accessed by the program. -// The return value will be ErrHelp if -help was set but not defined. -func (f *FlagSet) Parse(arguments []string) error { - f.parsed = true - f.args = make([]string, 0, len(arguments)) - err := f.parseArgs(arguments) - if err != nil { - switch f.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - os.Exit(2) - case PanicOnError: - panic(err) - } - } - return nil -} - -// Parsed reports whether f.Parse has been called. -func (f *FlagSet) Parsed() bool { - return f.parsed -} - -// Parse parses the command-line flags from os.Args[1:]. Must be called -// after all flags are defined and before flags are accessed by the program. -func Parse() { - // Ignore errors; CommandLine is set for ExitOnError. - CommandLine.Parse(os.Args[1:]) -} - -// Whether to support interspersed option/non-option arguments. -func SetInterspersed(interspersed bool) { - CommandLine.SetInterspersed(interspersed) -} - -// Parsed returns true if the command-line flags have been parsed. -func Parsed() bool { - return CommandLine.Parsed() -} - -// The default set of command-line flags, parsed from os.Args. -var CommandLine = NewFlagSet(os.Args[0], ExitOnError) - -// NewFlagSet returns a new, empty flag set with the specified name and -// error handling property. -func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { - f := &FlagSet{ - name: name, - errorHandling: errorHandling, - interspersed: true, - } - return f -} - -// Whether to support interspersed option/non-option arguments. -func (f *FlagSet) SetInterspersed(interspersed bool) { - f.interspersed = interspersed -} - -// Init sets the name and error handling property for a flag set. -// By default, the zero FlagSet uses an empty name and the -// ContinueOnError error handling policy. -func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { - f.name = name - f.errorHandling = errorHandling -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/float32.go deleted file mode 100644 index b7ad67d9..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/float32.go +++ /dev/null @@ -1,74 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// -- float32 Value -type float32Value float32 - -func newFloat32Value(val float32, p *float32) *float32Value { - *p = val - return (*float32Value)(p) -} - -func (f *float32Value) Set(s string) error { - v, err := strconv.ParseFloat(s, 32) - *f = float32Value(v) - return err -} - -func (f *float32Value) Type() string { - return "float32" -} - -func (f *float32Value) String() string { return fmt.Sprintf("%v", *f) } - -// Float32Var defines a float32 flag with specified name, default value, and usage string. -// The argument p points to a float32 variable in which to store the value of the flag. -func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) { - f.VarP(newFloat32Value(value, p), name, "", usage) -} - -// Like Float32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) { - f.VarP(newFloat32Value(value, p), name, shorthand, usage) -} - -// Float32Var defines a float32 flag with specified name, default value, and usage string. -// The argument p points to a float32 variable in which to store the value of the flag. -func Float32Var(p *float32, name string, value float32, usage string) { - CommandLine.VarP(newFloat32Value(value, p), name, "", usage) -} - -// Like Float32Var, but accepts a shorthand letter that can be used after a single dash. -func Float32VarP(p *float32, name, shorthand string, value float32, usage string) { - CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage) -} - -// Float32 defines a float32 flag with specified name, default value, and usage string. -// The return value is the address of a float32 variable that stores the value of the flag. -func (f *FlagSet) Float32(name string, value float32, usage string) *float32 { - p := new(float32) - f.Float32VarP(p, name, "", value, usage) - return p -} - -// Like Float32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 { - p := new(float32) - f.Float32VarP(p, name, shorthand, value, usage) - return p -} - -// Float32 defines a float32 flag with specified name, default value, and usage string. -// The return value is the address of a float32 variable that stores the value of the flag. -func Float32(name string, value float32, usage string) *float32 { - return CommandLine.Float32P(name, "", value, usage) -} - -// Like Float32, but accepts a shorthand letter that can be used after a single dash. -func Float32P(name, shorthand string, value float32, usage string) *float32 { - return CommandLine.Float32P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/float64.go deleted file mode 100644 index 03155123..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/float64.go +++ /dev/null @@ -1,74 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// -- float64 Value -type float64Value float64 - -func newFloat64Value(val float64, p *float64) *float64Value { - *p = val - return (*float64Value)(p) -} - -func (f *float64Value) Set(s string) error { - v, err := strconv.ParseFloat(s, 64) - *f = float64Value(v) - return err -} - -func (f *float64Value) Type() string { - return "float64" -} - -func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { - f.VarP(newFloat64Value(value, p), name, "", usage) -} - -// Like Float64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) { - f.VarP(newFloat64Value(value, p), name, shorthand, usage) -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func Float64Var(p *float64, name string, value float64, usage string) { - CommandLine.VarP(newFloat64Value(value, p), name, "", usage) -} - -// Like Float64Var, but accepts a shorthand letter that can be used after a single dash. -func Float64VarP(p *float64, name, shorthand string, value float64, usage string) { - CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage) -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { - p := new(float64) - f.Float64VarP(p, name, "", value, usage) - return p -} - -// Like Float64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 { - p := new(float64) - f.Float64VarP(p, name, shorthand, value, usage) - return p -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func Float64(name string, value float64, usage string) *float64 { - return CommandLine.Float64P(name, "", value, usage) -} - -// Like Float64, but accepts a shorthand letter that can be used after a single dash. -func Float64P(name, shorthand string, value float64, usage string) *float64 { - return CommandLine.Float64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int.go deleted file mode 100644 index dca9da6e..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int.go +++ /dev/null @@ -1,74 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// -- int Value -type intValue int - -func newIntValue(val int, p *int) *intValue { - *p = val - return (*intValue)(p) -} - -func (i *intValue) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = intValue(v) - return err -} - -func (i *intValue) Type() string { - return "int" -} - -func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { - f.VarP(newIntValue(value, p), name, "", usage) -} - -// Like IntVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) { - f.VarP(newIntValue(value, p), name, shorthand, usage) -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func IntVar(p *int, name string, value int, usage string) { - CommandLine.VarP(newIntValue(value, p), name, "", usage) -} - -// Like IntVar, but accepts a shorthand letter that can be used after a single dash. -func IntVarP(p *int, name, shorthand string, value int, usage string) { - CommandLine.VarP(newIntValue(value, p), name, shorthand, usage) -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func (f *FlagSet) Int(name string, value int, usage string) *int { - p := new(int) - f.IntVarP(p, name, "", value, usage) - return p -} - -// Like Int, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int { - p := new(int) - f.IntVarP(p, name, shorthand, value, usage) - return p -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func Int(name string, value int, usage string) *int { - return CommandLine.IntP(name, "", value, usage) -} - -// Like Int, but accepts a shorthand letter that can be used after a single dash. -func IntP(name, shorthand string, value int, usage string) *int { - return CommandLine.IntP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int32.go deleted file mode 100644 index 18eaacd6..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int32.go +++ /dev/null @@ -1,74 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// -- int32 Value -type int32Value int32 - -func newInt32Value(val int32, p *int32) *int32Value { - *p = val - return (*int32Value)(p) -} - -func (i *int32Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 32) - *i = int32Value(v) - return err -} - -func (i *int32Value) Type() string { - return "int32" -} - -func (i *int32Value) String() string { return fmt.Sprintf("%v", *i) } - -// Int32Var defines an int32 flag with specified name, default value, and usage string. -// The argument p points to an int32 variable in which to store the value of the flag. -func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) { - f.VarP(newInt32Value(value, p), name, "", usage) -} - -// Like Int32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) { - f.VarP(newInt32Value(value, p), name, shorthand, usage) -} - -// Int32Var defines an int32 flag with specified name, default value, and usage string. -// The argument p points to an int32 variable in which to store the value of the flag. -func Int32Var(p *int32, name string, value int32, usage string) { - CommandLine.VarP(newInt32Value(value, p), name, "", usage) -} - -// Like Int32Var, but accepts a shorthand letter that can be used after a single dash. -func Int32VarP(p *int32, name, shorthand string, value int32, usage string) { - CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage) -} - -// Int32 defines an int32 flag with specified name, default value, and usage string. -// The return value is the address of an int32 variable that stores the value of the flag. -func (f *FlagSet) Int32(name string, value int32, usage string) *int32 { - p := new(int32) - f.Int32VarP(p, name, "", value, usage) - return p -} - -// Like Int32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 { - p := new(int32) - f.Int32VarP(p, name, shorthand, value, usage) - return p -} - -// Int32 defines an int32 flag with specified name, default value, and usage string. -// The return value is the address of an int32 variable that stores the value of the flag. -func Int32(name string, value int32, usage string) *int32 { - return CommandLine.Int32P(name, "", value, usage) -} - -// Like Int32, but accepts a shorthand letter that can be used after a single dash. -func Int32P(name, shorthand string, value int32, usage string) *int32 { - return CommandLine.Int32P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int64.go deleted file mode 100644 index 0114aaaa..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int64.go +++ /dev/null @@ -1,74 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// -- int64 Value -type int64Value int64 - -func newInt64Value(val int64, p *int64) *int64Value { - *p = val - return (*int64Value)(p) -} - -func (i *int64Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = int64Value(v) - return err -} - -func (i *int64Value) Type() string { - return "int64" -} - -func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { - f.VarP(newInt64Value(value, p), name, "", usage) -} - -// Like Int64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) { - f.VarP(newInt64Value(value, p), name, shorthand, usage) -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func Int64Var(p *int64, name string, value int64, usage string) { - CommandLine.VarP(newInt64Value(value, p), name, "", usage) -} - -// Like Int64Var, but accepts a shorthand letter that can be used after a single dash. -func Int64VarP(p *int64, name, shorthand string, value int64, usage string) { - CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage) -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { - p := new(int64) - f.Int64VarP(p, name, "", value, usage) - return p -} - -// Like Int64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 { - p := new(int64) - f.Int64VarP(p, name, shorthand, value, usage) - return p -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func Int64(name string, value int64, usage string) *int64 { - return CommandLine.Int64P(name, "", value, usage) -} - -// Like Int64, but accepts a shorthand letter that can be used after a single dash. -func Int64P(name, shorthand string, value int64, usage string) *int64 { - return CommandLine.Int64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int8.go deleted file mode 100644 index aab1022f..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/int8.go +++ /dev/null @@ -1,74 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// -- int8 Value -type int8Value int8 - -func newInt8Value(val int8, p *int8) *int8Value { - *p = val - return (*int8Value)(p) -} - -func (i *int8Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 8) - *i = int8Value(v) - return err -} - -func (i *int8Value) Type() string { - return "int8" -} - -func (i *int8Value) String() string { return fmt.Sprintf("%v", *i) } - -// Int8Var defines an int8 flag with specified name, default value, and usage string. -// The argument p points to an int8 variable in which to store the value of the flag. -func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) { - f.VarP(newInt8Value(value, p), name, "", usage) -} - -// Like Int8Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) { - f.VarP(newInt8Value(value, p), name, shorthand, usage) -} - -// Int8Var defines an int8 flag with specified name, default value, and usage string. -// The argument p points to an int8 variable in which to store the value of the flag. -func Int8Var(p *int8, name string, value int8, usage string) { - CommandLine.VarP(newInt8Value(value, p), name, "", usage) -} - -// Like Int8Var, but accepts a shorthand letter that can be used after a single dash. -func Int8VarP(p *int8, name, shorthand string, value int8, usage string) { - CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage) -} - -// Int8 defines an int8 flag with specified name, default value, and usage string. -// The return value is the address of an int8 variable that stores the value of the flag. -func (f *FlagSet) Int8(name string, value int8, usage string) *int8 { - p := new(int8) - f.Int8VarP(p, name, "", value, usage) - return p -} - -// Like Int8, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 { - p := new(int8) - f.Int8VarP(p, name, shorthand, value, usage) - return p -} - -// Int8 defines an int8 flag with specified name, default value, and usage string. -// The return value is the address of an int8 variable that stores the value of the flag. -func Int8(name string, value int8, usage string) *int8 { - return CommandLine.Int8P(name, "", value, usage) -} - -// Like Int8, but accepts a shorthand letter that can be used after a single dash. -func Int8P(name, shorthand string, value int8, usage string) *int8 { - return CommandLine.Int8P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/ip.go deleted file mode 100644 index efa75fbc..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/ip.go +++ /dev/null @@ -1,76 +0,0 @@ -package pflag - -import ( - "fmt" - "net" -) - -// -- net.IP value -type ipValue net.IP - -func newIPValue(val net.IP, p *net.IP) *ipValue { - *p = val - return (*ipValue)(p) -} - -func (i *ipValue) String() string { return net.IP(*i).String() } -func (i *ipValue) Set(s string) error { - ip := net.ParseIP(s) - if ip == nil { - return fmt.Errorf("failed to parse IP: %q", s) - } - *i = ipValue(ip) - return nil -} - -func (i *ipValue) Type() string { - return "ip" -} - -// IPVar defines an net.IP flag with specified name, default value, and usage string. -// The argument p points to an net.IP variable in which to store the value of the flag. -func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) { - f.VarP(newIPValue(value, p), name, "", usage) -} - -// Like IPVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { - f.VarP(newIPValue(value, p), name, shorthand, usage) -} - -// IPVar defines an net.IP flag with specified name, default value, and usage string. -// The argument p points to an net.IP variable in which to store the value of the flag. -func IPVar(p *net.IP, name string, value net.IP, usage string) { - CommandLine.VarP(newIPValue(value, p), name, "", usage) -} - -// Like IPVar, but accepts a shorthand letter that can be used after a single dash. -func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { - CommandLine.VarP(newIPValue(value, p), name, shorthand, usage) -} - -// IP defines an net.IP flag with specified name, default value, and usage string. -// The return value is the address of an net.IP variable that stores the value of the flag. -func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP { - p := new(net.IP) - f.IPVarP(p, name, "", value, usage) - return p -} - -// Like IP, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP { - p := new(net.IP) - f.IPVarP(p, name, shorthand, value, usage) - return p -} - -// IP defines an net.IP flag with specified name, default value, and usage string. -// The return value is the address of an net.IP variable that stores the value of the flag. -func IP(name string, value net.IP, usage string) *net.IP { - return CommandLine.IPP(name, "", value, usage) -} - -// Like IP, but accepts a shorthand letter that can be used after a single dash. -func IPP(name, shorthand string, value net.IP, usage string) *net.IP { - return CommandLine.IPP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/ipmask.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/ipmask.go deleted file mode 100644 index 09b9533e..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/ipmask.go +++ /dev/null @@ -1,86 +0,0 @@ -package pflag - -import ( - "fmt" - "net" -) - -// -- net.IPMask value -type ipMaskValue net.IPMask - -func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue { - *p = val - return (*ipMaskValue)(p) -} - -func (i *ipMaskValue) String() string { return net.IPMask(*i).String() } -func (i *ipMaskValue) Set(s string) error { - ip := ParseIPv4Mask(s) - if ip == nil { - return fmt.Errorf("failed to parse IP mask: %q", s) - } - *i = ipMaskValue(ip) - return nil -} - -func (i *ipMaskValue) Type() string { - return "ipMask" -} - -// Parse IPv4 netmask written in IP form (e.g. 255.255.255.0). -// This function should really belong to the net package. -func ParseIPv4Mask(s string) net.IPMask { - mask := net.ParseIP(s) - if mask == nil { - return nil - } - return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15]) -} - -// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. -// The argument p points to an net.IPMask variable in which to store the value of the flag. -func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { - f.VarP(newIPMaskValue(value, p), name, "", usage) -} - -// Like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { - f.VarP(newIPMaskValue(value, p), name, shorthand, usage) -} - -// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. -// The argument p points to an net.IPMask variable in which to store the value of the flag. -func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { - CommandLine.VarP(newIPMaskValue(value, p), name, "", usage) -} - -// Like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. -func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { - CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage) -} - -// IPMask defines an net.IPMask flag with specified name, default value, and usage string. -// The return value is the address of an net.IPMask variable that stores the value of the flag. -func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask { - p := new(net.IPMask) - f.IPMaskVarP(p, name, "", value, usage) - return p -} - -// Like IPMask, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { - p := new(net.IPMask) - f.IPMaskVarP(p, name, shorthand, value, usage) - return p -} - -// IPMask defines an net.IPMask flag with specified name, default value, and usage string. -// The return value is the address of an net.IPMask variable that stores the value of the flag. -func IPMask(name string, value net.IPMask, usage string) *net.IPMask { - return CommandLine.IPMaskP(name, "", value, usage) -} - -// Like IP, but accepts a shorthand letter that can be used after a single dash. -func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { - return CommandLine.IPMaskP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/string.go deleted file mode 100644 index 362fbf8a..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/string.go +++ /dev/null @@ -1,69 +0,0 @@ -package pflag - -import "fmt" - -// -- string Value -type stringValue string - -func newStringValue(val string, p *string) *stringValue { - *p = val - return (*stringValue)(p) -} - -func (s *stringValue) Set(val string) error { - *s = stringValue(val) - return nil -} -func (s *stringValue) Type() string { - return "string" -} - -func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func (f *FlagSet) StringVar(p *string, name string, value string, usage string) { - f.VarP(newStringValue(value, p), name, "", usage) -} - -// Like StringVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) { - f.VarP(newStringValue(value, p), name, shorthand, usage) -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func StringVar(p *string, name string, value string, usage string) { - CommandLine.VarP(newStringValue(value, p), name, "", usage) -} - -// Like StringVar, but accepts a shorthand letter that can be used after a single dash. -func StringVarP(p *string, name, shorthand string, value string, usage string) { - CommandLine.VarP(newStringValue(value, p), name, shorthand, usage) -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func (f *FlagSet) String(name string, value string, usage string) *string { - p := new(string) - f.StringVarP(p, name, "", value, usage) - return p -} - -// Like String, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string { - p := new(string) - f.StringVarP(p, name, shorthand, value, usage) - return p -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func String(name string, value string, usage string) *string { - return CommandLine.StringP(name, "", value, usage) -} - -// Like String, but accepts a shorthand letter that can be used after a single dash. -func StringP(name, shorthand string, value string, usage string) *string { - return CommandLine.StringP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint.go deleted file mode 100644 index c063fe7c..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint.go +++ /dev/null @@ -1,74 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// -- uint Value -type uintValue uint - -func newUintValue(val uint, p *uint) *uintValue { - *p = val - return (*uintValue)(p) -} - -func (i *uintValue) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uintValue(v) - return err -} - -func (i *uintValue) Type() string { - return "uint" -} - -func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { - f.VarP(newUintValue(value, p), name, "", usage) -} - -// Like UintVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) { - f.VarP(newUintValue(value, p), name, shorthand, usage) -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func UintVar(p *uint, name string, value uint, usage string) { - CommandLine.VarP(newUintValue(value, p), name, "", usage) -} - -// Like UintVar, but accepts a shorthand letter that can be used after a single dash. -func UintVarP(p *uint, name, shorthand string, value uint, usage string) { - CommandLine.VarP(newUintValue(value, p), name, shorthand, usage) -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func (f *FlagSet) Uint(name string, value uint, usage string) *uint { - p := new(uint) - f.UintVarP(p, name, "", value, usage) - return p -} - -// Like Uint, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint { - p := new(uint) - f.UintVarP(p, name, shorthand, value, usage) - return p -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func Uint(name string, value uint, usage string) *uint { - return CommandLine.UintP(name, "", value, usage) -} - -// Like Uint, but accepts a shorthand letter that can be used after a single dash. -func UintP(name, shorthand string, value uint, usage string) *uint { - return CommandLine.UintP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint16.go deleted file mode 100644 index ab1c1f9e..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint16.go +++ /dev/null @@ -1,72 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// -- uint16 value -type uint16Value uint16 - -func newUint16Value(val uint16, p *uint16) *uint16Value { - *p = val - return (*uint16Value)(p) -} -func (i *uint16Value) String() string { return fmt.Sprintf("%d", *i) } -func (i *uint16Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 16) - *i = uint16Value(v) - return err -} - -func (i *uint16Value) Type() string { - return "uint16" -} - -// Uint16Var defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) { - f.VarP(newUint16Value(value, p), name, "", usage) -} - -// Like Uint16Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { - f.VarP(newUint16Value(value, p), name, shorthand, usage) -} - -// Uint16Var defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func Uint16Var(p *uint16, name string, value uint16, usage string) { - CommandLine.VarP(newUint16Value(value, p), name, "", usage) -} - -// Like Uint16Var, but accepts a shorthand letter that can be used after a single dash. -func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { - CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage) -} - -// Uint16 defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 { - p := new(uint16) - f.Uint16VarP(p, name, "", value, usage) - return p -} - -// Like Uint16, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 { - p := new(uint16) - f.Uint16VarP(p, name, shorthand, value, usage) - return p -} - -// Uint16 defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func Uint16(name string, value uint16, usage string) *uint16 { - return CommandLine.Uint16P(name, "", value, usage) -} - -// Like Uint16, but accepts a shorthand letter that can be used after a single dash. -func Uint16P(name, shorthand string, value uint16, usage string) *uint16 { - return CommandLine.Uint16P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint32.go deleted file mode 100644 index db635ae8..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint32.go +++ /dev/null @@ -1,72 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// -- uint16 value -type uint32Value uint32 - -func newUint32Value(val uint32, p *uint32) *uint32Value { - *p = val - return (*uint32Value)(p) -} -func (i *uint32Value) String() string { return fmt.Sprintf("%d", *i) } -func (i *uint32Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 32) - *i = uint32Value(v) - return err -} - -func (i *uint32Value) Type() string { - return "uint32" -} - -// Uint32Var defines a uint32 flag with specified name, default value, and usage string. -// The argument p points to a uint32 variable in which to store the value of the flag. -func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) { - f.VarP(newUint32Value(value, p), name, "", usage) -} - -// Like Uint32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { - f.VarP(newUint32Value(value, p), name, shorthand, usage) -} - -// Uint32Var defines a uint32 flag with specified name, default value, and usage string. -// The argument p points to a uint32 variable in which to store the value of the flag. -func Uint32Var(p *uint32, name string, value uint32, usage string) { - CommandLine.VarP(newUint32Value(value, p), name, "", usage) -} - -// Like Uint32Var, but accepts a shorthand letter that can be used after a single dash. -func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { - CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage) -} - -// Uint32 defines a uint32 flag with specified name, default value, and usage string. -// The return value is the address of a uint32 variable that stores the value of the flag. -func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 { - p := new(uint32) - f.Uint32VarP(p, name, "", value, usage) - return p -} - -// Like Uint32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 { - p := new(uint32) - f.Uint32VarP(p, name, shorthand, value, usage) - return p -} - -// Uint32 defines a uint32 flag with specified name, default value, and usage string. -// The return value is the address of a uint32 variable that stores the value of the flag. -func Uint32(name string, value uint32, usage string) *uint32 { - return CommandLine.Uint32P(name, "", value, usage) -} - -// Like Uint32, but accepts a shorthand letter that can be used after a single dash. -func Uint32P(name, shorthand string, value uint32, usage string) *uint32 { - return CommandLine.Uint32P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint64.go deleted file mode 100644 index 99c7e805..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint64.go +++ /dev/null @@ -1,74 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// -- uint64 Value -type uint64Value uint64 - -func newUint64Value(val uint64, p *uint64) *uint64Value { - *p = val - return (*uint64Value)(p) -} - -func (i *uint64Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uint64Value(v) - return err -} - -func (i *uint64Value) Type() string { - return "uint64" -} - -func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { - f.VarP(newUint64Value(value, p), name, "", usage) -} - -// Like Uint64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { - f.VarP(newUint64Value(value, p), name, shorthand, usage) -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func Uint64Var(p *uint64, name string, value uint64, usage string) { - CommandLine.VarP(newUint64Value(value, p), name, "", usage) -} - -// Like Uint64Var, but accepts a shorthand letter that can be used after a single dash. -func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { - CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage) -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { - p := new(uint64) - f.Uint64VarP(p, name, "", value, usage) - return p -} - -// Like Uint64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 { - p := new(uint64) - f.Uint64VarP(p, name, shorthand, value, usage) - return p -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func Uint64(name string, value uint64, usage string) *uint64 { - return CommandLine.Uint64P(name, "", value, usage) -} - -// Like Uint64, but accepts a shorthand letter that can be used after a single dash. -func Uint64P(name, shorthand string, value uint64, usage string) *uint64 { - return CommandLine.Uint64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint8.go deleted file mode 100644 index 6fef508d..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/spf13/pflag/uint8.go +++ /dev/null @@ -1,74 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" -) - -// -- uint8 Value -type uint8Value uint8 - -func newUint8Value(val uint8, p *uint8) *uint8Value { - *p = val - return (*uint8Value)(p) -} - -func (i *uint8Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 8) - *i = uint8Value(v) - return err -} - -func (i *uint8Value) Type() string { - return "uint8" -} - -func (i *uint8Value) String() string { return fmt.Sprintf("%v", *i) } - -// Uint8Var defines a uint8 flag with specified name, default value, and usage string. -// The argument p points to a uint8 variable in which to store the value of the flag. -func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) { - f.VarP(newUint8Value(value, p), name, "", usage) -} - -// Like Uint8Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { - f.VarP(newUint8Value(value, p), name, shorthand, usage) -} - -// Uint8Var defines a uint8 flag with specified name, default value, and usage string. -// The argument p points to a uint8 variable in which to store the value of the flag. -func Uint8Var(p *uint8, name string, value uint8, usage string) { - CommandLine.VarP(newUint8Value(value, p), name, "", usage) -} - -// Like Uint8Var, but accepts a shorthand letter that can be used after a single dash. -func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { - CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage) -} - -// Uint8 defines a uint8 flag with specified name, default value, and usage string. -// The return value is the address of a uint8 variable that stores the value of the flag. -func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 { - p := new(uint8) - f.Uint8VarP(p, name, "", value, usage) - return p -} - -// Like Uint8, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 { - p := new(uint8) - f.Uint8VarP(p, name, shorthand, value, usage) - return p -} - -// Uint8 defines a uint8 flag with specified name, default value, and usage string. -// The return value is the address of a uint8 variable that stores the value of the flag. -func Uint8(name string, value uint8, usage string) *uint8 { - return CommandLine.Uint8P(name, "", value, usage) -} - -// Like Uint8, but accepts a shorthand letter that can be used after a single dash. -func Uint8P(name, shorthand string, value uint8, usage string) *uint8 { - return CommandLine.Uint8P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/.gitignore b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/.gitignore deleted file mode 100644 index daf913b1..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/LICENSE deleted file mode 100644 index 2815cc36..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/README.md b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/README.md deleted file mode 100644 index d2d3fb89..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# go-crypto -A Subset of the Go `crypto` Package with a Resumable Hash Interface - -### Documentation - -GoDocs: http://godoc.org/github.com/stevvooe/resumable diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/resumable.go b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/resumable.go deleted file mode 100644 index af4488f1..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/resumable.go +++ /dev/null @@ -1,43 +0,0 @@ -// Package resumable registers resumable versions of hash functions. Resumable -// varieties of hash functions are available via the standard crypto package. -// Support can be checked by type assertion against the resumable.Hash -// interface. -// -// While one can use these sub-packages directly, it makes more sense to -// register them using side-effect imports: -// -// import _ "github.com/stevvooe/resumable/sha256" -// -// This will make the resumable hashes available to the application through -// the standard crypto package. For example, if a new sha256 is required, one -// should use the following: -// -// h := crypto.SHA256.New() -// -// Such a features allows one to control the inclusion of resumable hash -// support in a single file. Applications that require the resumable hash -// implementation can type switch to detect support, while other parts of the -// application can be completely oblivious to the presence of the alternative -// hash functions. -// -// Also note that the implementations available in this package are completely -// untouched from their Go counterparts in the standard library. Only an extra -// file is added to each package to implement the extra resumable hash -// functions. -package resumable - -import "hash" - -// Hash is the common interface implemented by all resumable hash functions. -type Hash interface { - hash.Hash - - // Len returns the number of bytes written to the Hash so far. - Len() int64 - - // State returns a snapshot of the state of the Hash. - State() ([]byte, error) - - // Restore resets the Hash to the given state. - Restore(state []byte) error -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/resume.go b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/resume.go deleted file mode 100644 index 426d78ad..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/resume.go +++ /dev/null @@ -1,53 +0,0 @@ -package sha256 - -import ( - "bytes" - "encoding/gob" - - // import to ensure that our init function runs after the standard package - _ "crypto/sha256" -) - -// Len returns the number of bytes which have been written to the digest. -func (d *digest) Len() int64 { - return int64(d.len) -} - -// State returns a snapshot of the state of the digest. -func (d *digest) State() ([]byte, error) { - var buf bytes.Buffer - encoder := gob.NewEncoder(&buf) - - // We encode this way so that we do not have - // to export these fields of the digest struct. - vals := []interface{}{ - d.h, d.x, d.nx, d.len, d.is224, - } - - for _, val := range vals { - if err := encoder.Encode(val); err != nil { - return nil, err - } - } - - return buf.Bytes(), nil -} - -// Restore resets the digest to the given state. -func (d *digest) Restore(state []byte) error { - decoder := gob.NewDecoder(bytes.NewReader(state)) - - // We decode this way so that we do not have - // to export these fields of the digest struct. - vals := []interface{}{ - &d.h, &d.x, &d.nx, &d.len, &d.is224, - } - - for _, val := range vals { - if err := decoder.Decode(val); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256.go b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256.go deleted file mode 100644 index d84cebf2..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package sha256 implements the SHA224 and SHA256 hash algorithms as defined -// in FIPS 180-4. -package sha256 - -import ( - "crypto" - "hash" -) - -func init() { - crypto.RegisterHash(crypto.SHA224, New224) - crypto.RegisterHash(crypto.SHA256, New) -} - -// The size of a SHA256 checksum in bytes. -const Size = 32 - -// The size of a SHA224 checksum in bytes. -const Size224 = 28 - -// The blocksize of SHA256 and SHA224 in bytes. -const BlockSize = 64 - -const ( - chunk = 64 - init0 = 0x6A09E667 - init1 = 0xBB67AE85 - init2 = 0x3C6EF372 - init3 = 0xA54FF53A - init4 = 0x510E527F - init5 = 0x9B05688C - init6 = 0x1F83D9AB - init7 = 0x5BE0CD19 - init0_224 = 0xC1059ED8 - init1_224 = 0x367CD507 - init2_224 = 0x3070DD17 - init3_224 = 0xF70E5939 - init4_224 = 0xFFC00B31 - init5_224 = 0x68581511 - init6_224 = 0x64F98FA7 - init7_224 = 0xBEFA4FA4 -) - -// digest represents the partial evaluation of a checksum. -type digest struct { - h [8]uint32 - x [chunk]byte - nx int - len uint64 - is224 bool // mark if this digest is SHA-224 -} - -func (d *digest) Reset() { - if !d.is224 { - d.h[0] = init0 - d.h[1] = init1 - d.h[2] = init2 - d.h[3] = init3 - d.h[4] = init4 - d.h[5] = init5 - d.h[6] = init6 - d.h[7] = init7 - } else { - d.h[0] = init0_224 - d.h[1] = init1_224 - d.h[2] = init2_224 - d.h[3] = init3_224 - d.h[4] = init4_224 - d.h[5] = init5_224 - d.h[6] = init6_224 - d.h[7] = init7_224 - } - d.nx = 0 - d.len = 0 -} - -// New returns a new hash.Hash computing the SHA256 checksum. -func New() hash.Hash { - d := new(digest) - d.Reset() - return d -} - -// New224 returns a new hash.Hash computing the SHA224 checksum. -func New224() hash.Hash { - d := new(digest) - d.is224 = true - d.Reset() - return d -} - -func (d *digest) Size() int { - if !d.is224 { - return Size - } - return Size224 -} - -func (d *digest) BlockSize() int { return BlockSize } - -func (d *digest) Write(p []byte) (nn int, err error) { - nn = len(p) - d.len += uint64(nn) - if d.nx > 0 { - n := copy(d.x[d.nx:], p) - d.nx += n - if d.nx == chunk { - block(d, d.x[:]) - d.nx = 0 - } - p = p[n:] - } - if len(p) >= chunk { - n := len(p) &^ (chunk - 1) - block(d, p[:n]) - p = p[n:] - } - if len(p) > 0 { - d.nx = copy(d.x[:], p) - } - return -} - -func (d0 *digest) Sum(in []byte) []byte { - // Make a copy of d0 so that caller can keep writing and summing. - d := *d0 - hash := d.checkSum() - if d.is224 { - return append(in, hash[:Size224]...) - } - return append(in, hash[:]...) -} - -func (d *digest) checkSum() [Size]byte { - len := d.len - // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. - var tmp [64]byte - tmp[0] = 0x80 - if len%64 < 56 { - d.Write(tmp[0 : 56-len%64]) - } else { - d.Write(tmp[0 : 64+56-len%64]) - } - - // Length in bits. - len <<= 3 - for i := uint(0); i < 8; i++ { - tmp[i] = byte(len >> (56 - 8*i)) - } - d.Write(tmp[0:8]) - - if d.nx != 0 { - panic("d.nx != 0") - } - - h := d.h[:] - if d.is224 { - h = d.h[:7] - } - - var digest [Size]byte - for i, s := range h { - digest[i*4] = byte(s >> 24) - digest[i*4+1] = byte(s >> 16) - digest[i*4+2] = byte(s >> 8) - digest[i*4+3] = byte(s) - } - - return digest -} - -// Sum256 returns the SHA256 checksum of the data. -func Sum256(data []byte) [Size]byte { - var d digest - d.Reset() - d.Write(data) - return d.checkSum() -} - -// Sum224 returns the SHA224 checksum of the data. -func Sum224(data []byte) (sum224 [Size224]byte) { - var d digest - d.is224 = true - d.Reset() - d.Write(data) - sum := d.checkSum() - copy(sum224[:], sum[:Size224]) - return -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block.go b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block.go deleted file mode 100644 index ca5efd15..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !386,!amd64 - -// SHA256 block step. -// In its own file so that a faster assembly or C version -// can be substituted easily. - -package sha256 - -var _K = []uint32{ - 0x428a2f98, - 0x71374491, - 0xb5c0fbcf, - 0xe9b5dba5, - 0x3956c25b, - 0x59f111f1, - 0x923f82a4, - 0xab1c5ed5, - 0xd807aa98, - 0x12835b01, - 0x243185be, - 0x550c7dc3, - 0x72be5d74, - 0x80deb1fe, - 0x9bdc06a7, - 0xc19bf174, - 0xe49b69c1, - 0xefbe4786, - 0x0fc19dc6, - 0x240ca1cc, - 0x2de92c6f, - 0x4a7484aa, - 0x5cb0a9dc, - 0x76f988da, - 0x983e5152, - 0xa831c66d, - 0xb00327c8, - 0xbf597fc7, - 0xc6e00bf3, - 0xd5a79147, - 0x06ca6351, - 0x14292967, - 0x27b70a85, - 0x2e1b2138, - 0x4d2c6dfc, - 0x53380d13, - 0x650a7354, - 0x766a0abb, - 0x81c2c92e, - 0x92722c85, - 0xa2bfe8a1, - 0xa81a664b, - 0xc24b8b70, - 0xc76c51a3, - 0xd192e819, - 0xd6990624, - 0xf40e3585, - 0x106aa070, - 0x19a4c116, - 0x1e376c08, - 0x2748774c, - 0x34b0bcb5, - 0x391c0cb3, - 0x4ed8aa4a, - 0x5b9cca4f, - 0x682e6ff3, - 0x748f82ee, - 0x78a5636f, - 0x84c87814, - 0x8cc70208, - 0x90befffa, - 0xa4506ceb, - 0xbef9a3f7, - 0xc67178f2, -} - -func block(dig *digest, p []byte) { - var w [64]uint32 - h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] - for len(p) >= chunk { - // Can interlace the computation of w with the - // rounds below if needed for speed. - for i := 0; i < 16; i++ { - j := i * 4 - w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3]) - } - for i := 16; i < 64; i++ { - v1 := w[i-2] - t1 := (v1>>17 | v1<<(32-17)) ^ (v1>>19 | v1<<(32-19)) ^ (v1 >> 10) - v2 := w[i-15] - t2 := (v2>>7 | v2<<(32-7)) ^ (v2>>18 | v2<<(32-18)) ^ (v2 >> 3) - w[i] = t1 + w[i-7] + t2 + w[i-16] - } - - a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7 - - for i := 0; i < 64; i++ { - t1 := h + ((e>>6 | e<<(32-6)) ^ (e>>11 | e<<(32-11)) ^ (e>>25 | e<<(32-25))) + ((e & f) ^ (^e & g)) + _K[i] + w[i] - - t2 := ((a>>2 | a<<(32-2)) ^ (a>>13 | a<<(32-13)) ^ (a>>22 | a<<(32-22))) + ((a & b) ^ (a & c) ^ (b & c)) - - h = g - g = f - f = e - e = d + t1 - d = c - c = b - b = a - a = t1 + t2 - } - - h0 += a - h1 += b - h2 += c - h3 += d - h4 += e - h5 += f - h6 += g - h7 += h - - p = p[chunk:] - } - - dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7 -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block_386.s b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block_386.s deleted file mode 100644 index 73ae2bf3..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block_386.s +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// SHA256 block routine. See sha256block.go for Go equivalent. -// -// The algorithm is detailed in FIPS 180-4: -// -// http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf -// -// Wt = Mt; for 0 <= t <= 15 -// Wt = SIGMA1(Wt-2) + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 -// -// a = H0 -// b = H1 -// c = H2 -// d = H3 -// e = H4 -// f = H5 -// g = H6 -// h = H7 -// -// for t = 0 to 63 { -// T1 = h + BIGSIGMA1(e) + Ch(e,f,g) + Kt + Wt -// T2 = BIGSIGMA0(a) + Maj(a,b,c) -// h = g -// g = f -// f = e -// e = d + T1 -// d = c -// c = b -// b = a -// a = T1 + T2 -// } -// -// H0 = a + H0 -// H1 = b + H1 -// H2 = c + H2 -// H3 = d + H3 -// H4 = e + H4 -// H5 = f + H5 -// H6 = g + H6 -// H7 = h + H7 - -// Wt = Mt; for 0 <= t <= 15 -#define MSGSCHEDULE0(index) \ - MOVL (index*4)(SI), AX; \ - BSWAPL AX; \ - MOVL AX, (index*4)(BP) - -// Wt = SIGMA1(Wt-2) + Wt-7 + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 -// SIGMA0(x) = ROTR(7,x) XOR ROTR(18,x) XOR SHR(3,x) -// SIGMA1(x) = ROTR(17,x) XOR ROTR(19,x) XOR SHR(10,x) -#define MSGSCHEDULE1(index) \ - MOVL ((index-2)*4)(BP), AX; \ - MOVL AX, CX; \ - RORL $17, AX; \ - MOVL CX, DX; \ - RORL $19, CX; \ - SHRL $10, DX; \ - MOVL ((index-15)*4)(BP), BX; \ - XORL CX, AX; \ - MOVL BX, CX; \ - XORL DX, AX; \ - RORL $7, BX; \ - MOVL CX, DX; \ - SHRL $3, DX; \ - RORL $18, CX; \ - ADDL ((index-7)*4)(BP), AX; \ - XORL CX, BX; \ - XORL DX, BX; \ - ADDL ((index-16)*4)(BP), BX; \ - ADDL BX, AX; \ - MOVL AX, ((index)*4)(BP) - -// Calculate T1 in AX - uses AX, BX, CX and DX registers. -// Wt is passed in AX. -// T1 = h + BIGSIGMA1(e) + Ch(e, f, g) + Kt + Wt -// BIGSIGMA1(x) = ROTR(6,x) XOR ROTR(11,x) XOR ROTR(25,x) -// Ch(x, y, z) = (x AND y) XOR (NOT x AND z) -#define SHA256T1(const, e, f, g, h) \ - MOVL (h*4)(DI), BX; \ - ADDL AX, BX; \ - MOVL (e*4)(DI), AX; \ - ADDL $const, BX; \ - MOVL (e*4)(DI), CX; \ - RORL $6, AX; \ - MOVL (e*4)(DI), DX; \ - RORL $11, CX; \ - XORL CX, AX; \ - MOVL (e*4)(DI), CX; \ - RORL $25, DX; \ - ANDL (f*4)(DI), CX; \ - XORL AX, DX; \ - MOVL (e*4)(DI), AX; \ - NOTL AX; \ - ADDL DX, BX; \ - ANDL (g*4)(DI), AX; \ - XORL CX, AX; \ - ADDL BX, AX - -// Calculate T2 in BX - uses AX, BX, CX and DX registers. -// T2 = BIGSIGMA0(a) + Maj(a, b, c) -// BIGSIGMA0(x) = ROTR(2,x) XOR ROTR(13,x) XOR ROTR(22,x) -// Maj(x, y, z) = (x AND y) XOR (x AND z) XOR (y AND z) -#define SHA256T2(a, b, c) \ - MOVL (a*4)(DI), AX; \ - MOVL (c*4)(DI), BX; \ - RORL $2, AX; \ - MOVL (a*4)(DI), DX; \ - ANDL (b*4)(DI), BX; \ - RORL $13, DX; \ - MOVL (a*4)(DI), CX; \ - ANDL (c*4)(DI), CX; \ - XORL DX, AX; \ - XORL CX, BX; \ - MOVL (a*4)(DI), DX; \ - MOVL (b*4)(DI), CX; \ - RORL $22, DX; \ - ANDL (a*4)(DI), CX; \ - XORL CX, BX; \ - XORL DX, AX; \ - ADDL AX, BX - -// Calculate T1 and T2, then e = d + T1 and a = T1 + T2. -// The values for e and a are stored in d and h, ready for rotation. -#define SHA256ROUND(index, const, a, b, c, d, e, f, g, h) \ - SHA256T1(const, e, f, g, h); \ - MOVL AX, 292(SP); \ - SHA256T2(a, b, c); \ - MOVL 292(SP), AX; \ - ADDL AX, BX; \ - ADDL AX, (d*4)(DI); \ - MOVL BX, (h*4)(DI) - -#define SHA256ROUND0(index, const, a, b, c, d, e, f, g, h) \ - MSGSCHEDULE0(index); \ - SHA256ROUND(index, const, a, b, c, d, e, f, g, h) - -#define SHA256ROUND1(index, const, a, b, c, d, e, f, g, h) \ - MSGSCHEDULE1(index); \ - SHA256ROUND(index, const, a, b, c, d, e, f, g, h) - -TEXT ·block(SB),0,$296-12 - MOVL p_base+4(FP), SI - MOVL p_len+8(FP), DX - SHRL $6, DX - SHLL $6, DX - - LEAL (SI)(DX*1), DI - MOVL DI, 288(SP) - CMPL SI, DI - JEQ end - - LEAL 256(SP), DI // variables - - MOVL dig+0(FP), BP - MOVL (0*4)(BP), AX // a = H0 - MOVL AX, (0*4)(DI) - MOVL (1*4)(BP), BX // b = H1 - MOVL BX, (1*4)(DI) - MOVL (2*4)(BP), CX // c = H2 - MOVL CX, (2*4)(DI) - MOVL (3*4)(BP), DX // d = H3 - MOVL DX, (3*4)(DI) - MOVL (4*4)(BP), AX // e = H4 - MOVL AX, (4*4)(DI) - MOVL (5*4)(BP), BX // f = H5 - MOVL BX, (5*4)(DI) - MOVL (6*4)(BP), CX // g = H6 - MOVL CX, (6*4)(DI) - MOVL (7*4)(BP), DX // h = H7 - MOVL DX, (7*4)(DI) - -loop: - MOVL SP, BP // message schedule - - SHA256ROUND0(0, 0x428a2f98, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND0(1, 0x71374491, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND0(2, 0xb5c0fbcf, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND0(3, 0xe9b5dba5, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND0(4, 0x3956c25b, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND0(5, 0x59f111f1, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND0(6, 0x923f82a4, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND0(7, 0xab1c5ed5, 1, 2, 3, 4, 5, 6, 7, 0) - SHA256ROUND0(8, 0xd807aa98, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND0(9, 0x12835b01, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND0(10, 0x243185be, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND0(11, 0x550c7dc3, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND0(12, 0x72be5d74, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND0(13, 0x80deb1fe, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND0(14, 0x9bdc06a7, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND0(15, 0xc19bf174, 1, 2, 3, 4, 5, 6, 7, 0) - - SHA256ROUND1(16, 0xe49b69c1, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND1(17, 0xefbe4786, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND1(18, 0x0fc19dc6, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND1(19, 0x240ca1cc, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND1(20, 0x2de92c6f, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND1(21, 0x4a7484aa, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND1(22, 0x5cb0a9dc, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND1(23, 0x76f988da, 1, 2, 3, 4, 5, 6, 7, 0) - SHA256ROUND1(24, 0x983e5152, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND1(25, 0xa831c66d, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND1(26, 0xb00327c8, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND1(27, 0xbf597fc7, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND1(28, 0xc6e00bf3, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND1(29, 0xd5a79147, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND1(30, 0x06ca6351, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND1(31, 0x14292967, 1, 2, 3, 4, 5, 6, 7, 0) - SHA256ROUND1(32, 0x27b70a85, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND1(33, 0x2e1b2138, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND1(34, 0x4d2c6dfc, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND1(35, 0x53380d13, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND1(36, 0x650a7354, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND1(37, 0x766a0abb, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND1(38, 0x81c2c92e, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND1(39, 0x92722c85, 1, 2, 3, 4, 5, 6, 7, 0) - SHA256ROUND1(40, 0xa2bfe8a1, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND1(41, 0xa81a664b, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND1(42, 0xc24b8b70, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND1(43, 0xc76c51a3, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND1(44, 0xd192e819, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND1(45, 0xd6990624, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND1(46, 0xf40e3585, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND1(47, 0x106aa070, 1, 2, 3, 4, 5, 6, 7, 0) - SHA256ROUND1(48, 0x19a4c116, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND1(49, 0x1e376c08, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND1(50, 0x2748774c, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND1(51, 0x34b0bcb5, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND1(52, 0x391c0cb3, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND1(53, 0x4ed8aa4a, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND1(54, 0x5b9cca4f, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND1(55, 0x682e6ff3, 1, 2, 3, 4, 5, 6, 7, 0) - SHA256ROUND1(56, 0x748f82ee, 0, 1, 2, 3, 4, 5, 6, 7) - SHA256ROUND1(57, 0x78a5636f, 7, 0, 1, 2, 3, 4, 5, 6) - SHA256ROUND1(58, 0x84c87814, 6, 7, 0, 1, 2, 3, 4, 5) - SHA256ROUND1(59, 0x8cc70208, 5, 6, 7, 0, 1, 2, 3, 4) - SHA256ROUND1(60, 0x90befffa, 4, 5, 6, 7, 0, 1, 2, 3) - SHA256ROUND1(61, 0xa4506ceb, 3, 4, 5, 6, 7, 0, 1, 2) - SHA256ROUND1(62, 0xbef9a3f7, 2, 3, 4, 5, 6, 7, 0, 1) - SHA256ROUND1(63, 0xc67178f2, 1, 2, 3, 4, 5, 6, 7, 0) - - MOVL dig+0(FP), BP - MOVL (0*4)(BP), AX // H0 = a + H0 - ADDL (0*4)(DI), AX - MOVL AX, (0*4)(DI) - MOVL AX, (0*4)(BP) - MOVL (1*4)(BP), BX // H1 = b + H1 - ADDL (1*4)(DI), BX - MOVL BX, (1*4)(DI) - MOVL BX, (1*4)(BP) - MOVL (2*4)(BP), CX // H2 = c + H2 - ADDL (2*4)(DI), CX - MOVL CX, (2*4)(DI) - MOVL CX, (2*4)(BP) - MOVL (3*4)(BP), DX // H3 = d + H3 - ADDL (3*4)(DI), DX - MOVL DX, (3*4)(DI) - MOVL DX, (3*4)(BP) - MOVL (4*4)(BP), AX // H4 = e + H4 - ADDL (4*4)(DI), AX - MOVL AX, (4*4)(DI) - MOVL AX, (4*4)(BP) - MOVL (5*4)(BP), BX // H5 = f + H5 - ADDL (5*4)(DI), BX - MOVL BX, (5*4)(DI) - MOVL BX, (5*4)(BP) - MOVL (6*4)(BP), CX // H6 = g + H6 - ADDL (6*4)(DI), CX - MOVL CX, (6*4)(DI) - MOVL CX, (6*4)(BP) - MOVL (7*4)(BP), DX // H7 = h + H7 - ADDL (7*4)(DI), DX - MOVL DX, (7*4)(DI) - MOVL DX, (7*4)(BP) - - ADDL $64, SI - CMPL SI, 288(SP) - JB loop - -end: - RET diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block_amd64.s b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block_amd64.s deleted file mode 100644 index 868eaed4..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block_amd64.s +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -// SHA256 block routine. See sha256block.go for Go equivalent. -// -// The algorithm is detailed in FIPS 180-4: -// -// http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf -// -// Wt = Mt; for 0 <= t <= 15 -// Wt = SIGMA1(Wt-2) + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 -// -// a = H0 -// b = H1 -// c = H2 -// d = H3 -// e = H4 -// f = H5 -// g = H6 -// h = H7 -// -// for t = 0 to 63 { -// T1 = h + BIGSIGMA1(e) + Ch(e,f,g) + Kt + Wt -// T2 = BIGSIGMA0(a) + Maj(a,b,c) -// h = g -// g = f -// f = e -// e = d + T1 -// d = c -// c = b -// b = a -// a = T1 + T2 -// } -// -// H0 = a + H0 -// H1 = b + H1 -// H2 = c + H2 -// H3 = d + H3 -// H4 = e + H4 -// H5 = f + H5 -// H6 = g + H6 -// H7 = h + H7 - -// Wt = Mt; for 0 <= t <= 15 -#define MSGSCHEDULE0(index) \ - MOVL (index*4)(SI), AX; \ - BSWAPL AX; \ - MOVL AX, (index*4)(BP) - -// Wt = SIGMA1(Wt-2) + Wt-7 + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 63 -// SIGMA0(x) = ROTR(7,x) XOR ROTR(18,x) XOR SHR(3,x) -// SIGMA1(x) = ROTR(17,x) XOR ROTR(19,x) XOR SHR(10,x) -#define MSGSCHEDULE1(index) \ - MOVL ((index-2)*4)(BP), AX; \ - MOVL AX, CX; \ - RORL $17, AX; \ - MOVL CX, DX; \ - RORL $19, CX; \ - SHRL $10, DX; \ - MOVL ((index-15)*4)(BP), BX; \ - XORL CX, AX; \ - MOVL BX, CX; \ - XORL DX, AX; \ - RORL $7, BX; \ - MOVL CX, DX; \ - SHRL $3, DX; \ - RORL $18, CX; \ - ADDL ((index-7)*4)(BP), AX; \ - XORL CX, BX; \ - XORL DX, BX; \ - ADDL ((index-16)*4)(BP), BX; \ - ADDL BX, AX; \ - MOVL AX, ((index)*4)(BP) - -// Calculate T1 in AX - uses AX, CX and DX registers. -// h is also used as an accumulator. Wt is passed in AX. -// T1 = h + BIGSIGMA1(e) + Ch(e, f, g) + Kt + Wt -// BIGSIGMA1(x) = ROTR(6,x) XOR ROTR(11,x) XOR ROTR(25,x) -// Ch(x, y, z) = (x AND y) XOR (NOT x AND z) -#define SHA256T1(const, e, f, g, h) \ - ADDL AX, h; \ - MOVL e, AX; \ - ADDL $const, h; \ - MOVL e, CX; \ - RORL $6, AX; \ - MOVL e, DX; \ - RORL $11, CX; \ - XORL CX, AX; \ - MOVL e, CX; \ - RORL $25, DX; \ - ANDL f, CX; \ - XORL AX, DX; \ - MOVL e, AX; \ - NOTL AX; \ - ADDL DX, h; \ - ANDL g, AX; \ - XORL CX, AX; \ - ADDL h, AX - -// Calculate T2 in BX - uses BX, CX, DX and DI registers. -// T2 = BIGSIGMA0(a) + Maj(a, b, c) -// BIGSIGMA0(x) = ROTR(2,x) XOR ROTR(13,x) XOR ROTR(22,x) -// Maj(x, y, z) = (x AND y) XOR (x AND z) XOR (y AND z) -#define SHA256T2(a, b, c) \ - MOVL a, DI; \ - MOVL c, BX; \ - RORL $2, DI; \ - MOVL a, DX; \ - ANDL b, BX; \ - RORL $13, DX; \ - MOVL a, CX; \ - ANDL c, CX; \ - XORL DX, DI; \ - XORL CX, BX; \ - MOVL a, DX; \ - MOVL b, CX; \ - RORL $22, DX; \ - ANDL a, CX; \ - XORL CX, BX; \ - XORL DX, DI; \ - ADDL DI, BX - -// Calculate T1 and T2, then e = d + T1 and a = T1 + T2. -// The values for e and a are stored in d and h, ready for rotation. -#define SHA256ROUND(index, const, a, b, c, d, e, f, g, h) \ - SHA256T1(const, e, f, g, h); \ - SHA256T2(a, b, c); \ - MOVL BX, h; \ - ADDL AX, d; \ - ADDL AX, h - -#define SHA256ROUND0(index, const, a, b, c, d, e, f, g, h) \ - MSGSCHEDULE0(index); \ - SHA256ROUND(index, const, a, b, c, d, e, f, g, h) - -#define SHA256ROUND1(index, const, a, b, c, d, e, f, g, h) \ - MSGSCHEDULE1(index); \ - SHA256ROUND(index, const, a, b, c, d, e, f, g, h) - -TEXT ·block(SB),0,$264-32 - MOVQ p_base+8(FP), SI - MOVQ p_len+16(FP), DX - SHRQ $6, DX - SHLQ $6, DX - - LEAQ (SI)(DX*1), DI - MOVQ DI, 256(SP) - CMPQ SI, DI - JEQ end - - MOVQ dig+0(FP), BP - MOVL (0*4)(BP), R8 // a = H0 - MOVL (1*4)(BP), R9 // b = H1 - MOVL (2*4)(BP), R10 // c = H2 - MOVL (3*4)(BP), R11 // d = H3 - MOVL (4*4)(BP), R12 // e = H4 - MOVL (5*4)(BP), R13 // f = H5 - MOVL (6*4)(BP), R14 // g = H6 - MOVL (7*4)(BP), R15 // h = H7 - -loop: - MOVQ SP, BP // message schedule - - SHA256ROUND0(0, 0x428a2f98, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND0(1, 0x71374491, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND0(2, 0xb5c0fbcf, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND0(3, 0xe9b5dba5, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND0(4, 0x3956c25b, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND0(5, 0x59f111f1, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND0(6, 0x923f82a4, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND0(7, 0xab1c5ed5, R9, R10, R11, R12, R13, R14, R15, R8) - SHA256ROUND0(8, 0xd807aa98, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND0(9, 0x12835b01, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND0(10, 0x243185be, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND0(11, 0x550c7dc3, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND0(12, 0x72be5d74, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND0(13, 0x80deb1fe, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND0(14, 0x9bdc06a7, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND0(15, 0xc19bf174, R9, R10, R11, R12, R13, R14, R15, R8) - - SHA256ROUND1(16, 0xe49b69c1, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND1(17, 0xefbe4786, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND1(18, 0x0fc19dc6, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND1(19, 0x240ca1cc, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND1(20, 0x2de92c6f, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND1(21, 0x4a7484aa, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND1(22, 0x5cb0a9dc, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND1(23, 0x76f988da, R9, R10, R11, R12, R13, R14, R15, R8) - SHA256ROUND1(24, 0x983e5152, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND1(25, 0xa831c66d, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND1(26, 0xb00327c8, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND1(27, 0xbf597fc7, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND1(28, 0xc6e00bf3, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND1(29, 0xd5a79147, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND1(30, 0x06ca6351, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND1(31, 0x14292967, R9, R10, R11, R12, R13, R14, R15, R8) - SHA256ROUND1(32, 0x27b70a85, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND1(33, 0x2e1b2138, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND1(34, 0x4d2c6dfc, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND1(35, 0x53380d13, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND1(36, 0x650a7354, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND1(37, 0x766a0abb, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND1(38, 0x81c2c92e, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND1(39, 0x92722c85, R9, R10, R11, R12, R13, R14, R15, R8) - SHA256ROUND1(40, 0xa2bfe8a1, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND1(41, 0xa81a664b, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND1(42, 0xc24b8b70, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND1(43, 0xc76c51a3, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND1(44, 0xd192e819, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND1(45, 0xd6990624, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND1(46, 0xf40e3585, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND1(47, 0x106aa070, R9, R10, R11, R12, R13, R14, R15, R8) - SHA256ROUND1(48, 0x19a4c116, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND1(49, 0x1e376c08, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND1(50, 0x2748774c, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND1(51, 0x34b0bcb5, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND1(52, 0x391c0cb3, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND1(53, 0x4ed8aa4a, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND1(54, 0x5b9cca4f, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND1(55, 0x682e6ff3, R9, R10, R11, R12, R13, R14, R15, R8) - SHA256ROUND1(56, 0x748f82ee, R8, R9, R10, R11, R12, R13, R14, R15) - SHA256ROUND1(57, 0x78a5636f, R15, R8, R9, R10, R11, R12, R13, R14) - SHA256ROUND1(58, 0x84c87814, R14, R15, R8, R9, R10, R11, R12, R13) - SHA256ROUND1(59, 0x8cc70208, R13, R14, R15, R8, R9, R10, R11, R12) - SHA256ROUND1(60, 0x90befffa, R12, R13, R14, R15, R8, R9, R10, R11) - SHA256ROUND1(61, 0xa4506ceb, R11, R12, R13, R14, R15, R8, R9, R10) - SHA256ROUND1(62, 0xbef9a3f7, R10, R11, R12, R13, R14, R15, R8, R9) - SHA256ROUND1(63, 0xc67178f2, R9, R10, R11, R12, R13, R14, R15, R8) - - MOVQ dig+0(FP), BP - ADDL (0*4)(BP), R8 // H0 = a + H0 - MOVL R8, (0*4)(BP) - ADDL (1*4)(BP), R9 // H1 = b + H1 - MOVL R9, (1*4)(BP) - ADDL (2*4)(BP), R10 // H2 = c + H2 - MOVL R10, (2*4)(BP) - ADDL (3*4)(BP), R11 // H3 = d + H3 - MOVL R11, (3*4)(BP) - ADDL (4*4)(BP), R12 // H4 = e + H4 - MOVL R12, (4*4)(BP) - ADDL (5*4)(BP), R13 // H5 = f + H5 - MOVL R13, (5*4)(BP) - ADDL (6*4)(BP), R14 // H6 = g + H6 - MOVL R14, (6*4)(BP) - ADDL (7*4)(BP), R15 // H7 = h + H7 - MOVL R15, (7*4)(BP) - - ADDQ $64, SI - CMPQ SI, 256(SP) - JB loop - -end: - RET diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block_decl.go b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block_decl.go deleted file mode 100644 index a50c9787..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha256/sha256block_decl.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build 386 amd64 - -package sha256 - -//go:noescape - -func block(dig *digest, p []byte) diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/resume.go b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/resume.go deleted file mode 100644 index 55b433e7..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/resume.go +++ /dev/null @@ -1,53 +0,0 @@ -package sha512 - -import ( - "bytes" - "encoding/gob" - - // import to ensure that our init function runs after the standard package - _ "crypto/sha512" -) - -// Len returns the number of bytes which have been written to the digest. -func (d *digest) Len() int64 { - return int64(d.len) -} - -// State returns a snapshot of the state of the digest. -func (d *digest) State() ([]byte, error) { - var buf bytes.Buffer - encoder := gob.NewEncoder(&buf) - - // We encode this way so that we do not have - // to export these fields of the digest struct. - vals := []interface{}{ - d.h, d.x, d.nx, d.len, d.is384, - } - - for _, val := range vals { - if err := encoder.Encode(val); err != nil { - return nil, err - } - } - - return buf.Bytes(), nil -} - -// Restore resets the digest to the given state. -func (d *digest) Restore(state []byte) error { - decoder := gob.NewDecoder(bytes.NewReader(state)) - - // We decode this way so that we do not have - // to export these fields of the digest struct. - vals := []interface{}{ - &d.h, &d.x, &d.nx, &d.len, &d.is384, - } - - for _, val := range vals { - if err := decoder.Decode(val); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512.go b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512.go deleted file mode 100644 index bca7a91e..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package sha512 implements the SHA384 and SHA512 hash algorithms as defined -// in FIPS 180-2. -package sha512 - -import ( - "crypto" - "hash" -) - -func init() { - crypto.RegisterHash(crypto.SHA384, New384) - crypto.RegisterHash(crypto.SHA512, New) -} - -// The size of a SHA512 checksum in bytes. -const Size = 64 - -// The size of a SHA384 checksum in bytes. -const Size384 = 48 - -// The blocksize of SHA512 and SHA384 in bytes. -const BlockSize = 128 - -const ( - chunk = 128 - init0 = 0x6a09e667f3bcc908 - init1 = 0xbb67ae8584caa73b - init2 = 0x3c6ef372fe94f82b - init3 = 0xa54ff53a5f1d36f1 - init4 = 0x510e527fade682d1 - init5 = 0x9b05688c2b3e6c1f - init6 = 0x1f83d9abfb41bd6b - init7 = 0x5be0cd19137e2179 - init0_384 = 0xcbbb9d5dc1059ed8 - init1_384 = 0x629a292a367cd507 - init2_384 = 0x9159015a3070dd17 - init3_384 = 0x152fecd8f70e5939 - init4_384 = 0x67332667ffc00b31 - init5_384 = 0x8eb44a8768581511 - init6_384 = 0xdb0c2e0d64f98fa7 - init7_384 = 0x47b5481dbefa4fa4 -) - -// digest represents the partial evaluation of a checksum. -type digest struct { - h [8]uint64 - x [chunk]byte - nx int - len uint64 - is384 bool // mark if this digest is SHA-384 -} - -func (d *digest) Reset() { - if !d.is384 { - d.h[0] = init0 - d.h[1] = init1 - d.h[2] = init2 - d.h[3] = init3 - d.h[4] = init4 - d.h[5] = init5 - d.h[6] = init6 - d.h[7] = init7 - } else { - d.h[0] = init0_384 - d.h[1] = init1_384 - d.h[2] = init2_384 - d.h[3] = init3_384 - d.h[4] = init4_384 - d.h[5] = init5_384 - d.h[6] = init6_384 - d.h[7] = init7_384 - } - d.nx = 0 - d.len = 0 -} - -// New returns a new hash.Hash computing the SHA512 checksum. -func New() hash.Hash { - d := new(digest) - d.Reset() - return d -} - -// New384 returns a new hash.Hash computing the SHA384 checksum. -func New384() hash.Hash { - d := new(digest) - d.is384 = true - d.Reset() - return d -} - -func (d *digest) Size() int { - if !d.is384 { - return Size - } - return Size384 -} - -func (d *digest) BlockSize() int { return BlockSize } - -func (d *digest) Write(p []byte) (nn int, err error) { - nn = len(p) - d.len += uint64(nn) - if d.nx > 0 { - n := copy(d.x[d.nx:], p) - d.nx += n - if d.nx == chunk { - block(d, d.x[:]) - d.nx = 0 - } - p = p[n:] - } - if len(p) >= chunk { - n := len(p) &^ (chunk - 1) - block(d, p[:n]) - p = p[n:] - } - if len(p) > 0 { - d.nx = copy(d.x[:], p) - } - return -} - -func (d0 *digest) Sum(in []byte) []byte { - // Make a copy of d0 so that caller can keep writing and summing. - d := new(digest) - *d = *d0 - hash := d.checkSum() - if d.is384 { - return append(in, hash[:Size384]...) - } - return append(in, hash[:]...) -} - -func (d *digest) checkSum() [Size]byte { - // Padding. Add a 1 bit and 0 bits until 112 bytes mod 128. - len := d.len - var tmp [128]byte - tmp[0] = 0x80 - if len%128 < 112 { - d.Write(tmp[0 : 112-len%128]) - } else { - d.Write(tmp[0 : 128+112-len%128]) - } - - // Length in bits. - len <<= 3 - for i := uint(0); i < 16; i++ { - tmp[i] = byte(len >> (120 - 8*i)) - } - d.Write(tmp[0:16]) - - if d.nx != 0 { - panic("d.nx != 0") - } - - h := d.h[:] - if d.is384 { - h = d.h[:6] - } - - var digest [Size]byte - for i, s := range h { - digest[i*8] = byte(s >> 56) - digest[i*8+1] = byte(s >> 48) - digest[i*8+2] = byte(s >> 40) - digest[i*8+3] = byte(s >> 32) - digest[i*8+4] = byte(s >> 24) - digest[i*8+5] = byte(s >> 16) - digest[i*8+6] = byte(s >> 8) - digest[i*8+7] = byte(s) - } - - return digest -} - -// Sum512 returns the SHA512 checksum of the data. -func Sum512(data []byte) [Size]byte { - var d digest - d.Reset() - d.Write(data) - return d.checkSum() -} - -// Sum384 returns the SHA384 checksum of the data. -func Sum384(data []byte) (sum384 [Size384]byte) { - var d digest - d.is384 = true - d.Reset() - d.Write(data) - sum := d.checkSum() - copy(sum384[:], sum[:Size384]) - return -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512block.go b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512block.go deleted file mode 100644 index 648ae8f7..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512block.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 - -// SHA512 block step. -// In its own file so that a faster assembly or C version -// can be substituted easily. - -package sha512 - -var _K = []uint64{ - 0x428a2f98d728ae22, - 0x7137449123ef65cd, - 0xb5c0fbcfec4d3b2f, - 0xe9b5dba58189dbbc, - 0x3956c25bf348b538, - 0x59f111f1b605d019, - 0x923f82a4af194f9b, - 0xab1c5ed5da6d8118, - 0xd807aa98a3030242, - 0x12835b0145706fbe, - 0x243185be4ee4b28c, - 0x550c7dc3d5ffb4e2, - 0x72be5d74f27b896f, - 0x80deb1fe3b1696b1, - 0x9bdc06a725c71235, - 0xc19bf174cf692694, - 0xe49b69c19ef14ad2, - 0xefbe4786384f25e3, - 0x0fc19dc68b8cd5b5, - 0x240ca1cc77ac9c65, - 0x2de92c6f592b0275, - 0x4a7484aa6ea6e483, - 0x5cb0a9dcbd41fbd4, - 0x76f988da831153b5, - 0x983e5152ee66dfab, - 0xa831c66d2db43210, - 0xb00327c898fb213f, - 0xbf597fc7beef0ee4, - 0xc6e00bf33da88fc2, - 0xd5a79147930aa725, - 0x06ca6351e003826f, - 0x142929670a0e6e70, - 0x27b70a8546d22ffc, - 0x2e1b21385c26c926, - 0x4d2c6dfc5ac42aed, - 0x53380d139d95b3df, - 0x650a73548baf63de, - 0x766a0abb3c77b2a8, - 0x81c2c92e47edaee6, - 0x92722c851482353b, - 0xa2bfe8a14cf10364, - 0xa81a664bbc423001, - 0xc24b8b70d0f89791, - 0xc76c51a30654be30, - 0xd192e819d6ef5218, - 0xd69906245565a910, - 0xf40e35855771202a, - 0x106aa07032bbd1b8, - 0x19a4c116b8d2d0c8, - 0x1e376c085141ab53, - 0x2748774cdf8eeb99, - 0x34b0bcb5e19b48a8, - 0x391c0cb3c5c95a63, - 0x4ed8aa4ae3418acb, - 0x5b9cca4f7763e373, - 0x682e6ff3d6b2b8a3, - 0x748f82ee5defb2fc, - 0x78a5636f43172f60, - 0x84c87814a1f0ab72, - 0x8cc702081a6439ec, - 0x90befffa23631e28, - 0xa4506cebde82bde9, - 0xbef9a3f7b2c67915, - 0xc67178f2e372532b, - 0xca273eceea26619c, - 0xd186b8c721c0c207, - 0xeada7dd6cde0eb1e, - 0xf57d4f7fee6ed178, - 0x06f067aa72176fba, - 0x0a637dc5a2c898a6, - 0x113f9804bef90dae, - 0x1b710b35131c471b, - 0x28db77f523047d84, - 0x32caab7b40c72493, - 0x3c9ebe0a15c9bebc, - 0x431d67c49c100d4c, - 0x4cc5d4becb3e42b6, - 0x597f299cfc657e2a, - 0x5fcb6fab3ad6faec, - 0x6c44198c4a475817, -} - -func block(dig *digest, p []byte) { - var w [80]uint64 - h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] - for len(p) >= chunk { - for i := 0; i < 16; i++ { - j := i * 8 - w[i] = uint64(p[j])<<56 | uint64(p[j+1])<<48 | uint64(p[j+2])<<40 | uint64(p[j+3])<<32 | - uint64(p[j+4])<<24 | uint64(p[j+5])<<16 | uint64(p[j+6])<<8 | uint64(p[j+7]) - } - for i := 16; i < 80; i++ { - v1 := w[i-2] - t1 := (v1>>19 | v1<<(64-19)) ^ (v1>>61 | v1<<(64-61)) ^ (v1 >> 6) - v2 := w[i-15] - t2 := (v2>>1 | v2<<(64-1)) ^ (v2>>8 | v2<<(64-8)) ^ (v2 >> 7) - - w[i] = t1 + w[i-7] + t2 + w[i-16] - } - - a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7 - - for i := 0; i < 80; i++ { - t1 := h + ((e>>14 | e<<(64-14)) ^ (e>>18 | e<<(64-18)) ^ (e>>41 | e<<(64-41))) + ((e & f) ^ (^e & g)) + _K[i] + w[i] - - t2 := ((a>>28 | a<<(64-28)) ^ (a>>34 | a<<(64-34)) ^ (a>>39 | a<<(64-39))) + ((a & b) ^ (a & c) ^ (b & c)) - - h = g - g = f - f = e - e = d + t1 - d = c - c = b - b = a - a = t1 + t2 - } - - h0 += a - h1 += b - h2 += c - h3 += d - h4 += e - h5 += f - h6 += g - h7 += h - - p = p[chunk:] - } - - dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7 -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512block_amd64.s b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512block_amd64.s deleted file mode 100644 index 2e10233d..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512block_amd64.s +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -// SHA512 block routine. See sha512block.go for Go equivalent. -// -// The algorithm is detailed in FIPS 180-4: -// -// http://csrc.nist.gov/publications/fips/fips180-4/fips-180-4.pdf -// -// Wt = Mt; for 0 <= t <= 15 -// Wt = SIGMA1(Wt-2) + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 79 -// -// a = H0 -// b = H1 -// c = H2 -// d = H3 -// e = H4 -// f = H5 -// g = H6 -// h = H7 -// -// for t = 0 to 79 { -// T1 = h + BIGSIGMA1(e) + Ch(e,f,g) + Kt + Wt -// T2 = BIGSIGMA0(a) + Maj(a,b,c) -// h = g -// g = f -// f = e -// e = d + T1 -// d = c -// c = b -// b = a -// a = T1 + T2 -// } -// -// H0 = a + H0 -// H1 = b + H1 -// H2 = c + H2 -// H3 = d + H3 -// H4 = e + H4 -// H5 = f + H5 -// H6 = g + H6 -// H7 = h + H7 - -// Wt = Mt; for 0 <= t <= 15 -#define MSGSCHEDULE0(index) \ - MOVQ (index*8)(SI), AX; \ - BSWAPQ AX; \ - MOVQ AX, (index*8)(BP) - -// Wt = SIGMA1(Wt-2) + Wt-7 + SIGMA0(Wt-15) + Wt-16; for 16 <= t <= 79 -// SIGMA0(x) = ROTR(1,x) XOR ROTR(8,x) XOR SHR(7,x) -// SIGMA1(x) = ROTR(19,x) XOR ROTR(61,x) XOR SHR(6,x) -#define MSGSCHEDULE1(index) \ - MOVQ ((index-2)*8)(BP), AX; \ - MOVQ AX, CX; \ - RORQ $19, AX; \ - MOVQ CX, DX; \ - RORQ $61, CX; \ - SHRQ $6, DX; \ - MOVQ ((index-15)*8)(BP), BX; \ - XORQ CX, AX; \ - MOVQ BX, CX; \ - XORQ DX, AX; \ - RORQ $1, BX; \ - MOVQ CX, DX; \ - SHRQ $7, DX; \ - RORQ $8, CX; \ - ADDQ ((index-7)*8)(BP), AX; \ - XORQ CX, BX; \ - XORQ DX, BX; \ - ADDQ ((index-16)*8)(BP), BX; \ - ADDQ BX, AX; \ - MOVQ AX, ((index)*8)(BP) - -// Calculate T1 in AX - uses AX, CX and DX registers. -// h is also used as an accumulator. Wt is passed in AX. -// T1 = h + BIGSIGMA1(e) + Ch(e, f, g) + Kt + Wt -// BIGSIGMA1(x) = ROTR(14,x) XOR ROTR(18,x) XOR ROTR(41,x) -// Ch(x, y, z) = (x AND y) XOR (NOT x AND z) -#define SHA512T1(const, e, f, g, h) \ - MOVQ $const, DX; \ - ADDQ AX, h; \ - MOVQ e, AX; \ - ADDQ DX, h; \ - MOVQ e, CX; \ - RORQ $14, AX; \ - MOVQ e, DX; \ - RORQ $18, CX; \ - XORQ CX, AX; \ - MOVQ e, CX; \ - RORQ $41, DX; \ - ANDQ f, CX; \ - XORQ AX, DX; \ - MOVQ e, AX; \ - NOTQ AX; \ - ADDQ DX, h; \ - ANDQ g, AX; \ - XORQ CX, AX; \ - ADDQ h, AX - -// Calculate T2 in BX - uses BX, CX, DX and DI registers. -// T2 = BIGSIGMA0(a) + Maj(a, b, c) -// BIGSIGMA0(x) = ROTR(28,x) XOR ROTR(34,x) XOR ROTR(39,x) -// Maj(x, y, z) = (x AND y) XOR (x AND z) XOR (y AND z) -#define SHA512T2(a, b, c) \ - MOVQ a, DI; \ - MOVQ c, BX; \ - RORQ $28, DI; \ - MOVQ a, DX; \ - ANDQ b, BX; \ - RORQ $34, DX; \ - MOVQ a, CX; \ - ANDQ c, CX; \ - XORQ DX, DI; \ - XORQ CX, BX; \ - MOVQ a, DX; \ - MOVQ b, CX; \ - RORQ $39, DX; \ - ANDQ a, CX; \ - XORQ CX, BX; \ - XORQ DX, DI; \ - ADDQ DI, BX - -// Calculate T1 and T2, then e = d + T1 and a = T1 + T2. -// The values for e and a are stored in d and h, ready for rotation. -#define SHA512ROUND(index, const, a, b, c, d, e, f, g, h) \ - SHA512T1(const, e, f, g, h); \ - SHA512T2(a, b, c); \ - MOVQ BX, h; \ - ADDQ AX, d; \ - ADDQ AX, h - -#define SHA512ROUND0(index, const, a, b, c, d, e, f, g, h) \ - MSGSCHEDULE0(index); \ - SHA512ROUND(index, const, a, b, c, d, e, f, g, h) - -#define SHA512ROUND1(index, const, a, b, c, d, e, f, g, h) \ - MSGSCHEDULE1(index); \ - SHA512ROUND(index, const, a, b, c, d, e, f, g, h) - -TEXT ·block(SB),0,$648-32 - MOVQ p_base+8(FP), SI - MOVQ p_len+16(FP), DX - SHRQ $7, DX - SHLQ $7, DX - - LEAQ (SI)(DX*1), DI - MOVQ DI, 640(SP) - CMPQ SI, DI - JEQ end - - MOVQ dig+0(FP), BP - MOVQ (0*8)(BP), R8 // a = H0 - MOVQ (1*8)(BP), R9 // b = H1 - MOVQ (2*8)(BP), R10 // c = H2 - MOVQ (3*8)(BP), R11 // d = H3 - MOVQ (4*8)(BP), R12 // e = H4 - MOVQ (5*8)(BP), R13 // f = H5 - MOVQ (6*8)(BP), R14 // g = H6 - MOVQ (7*8)(BP), R15 // h = H7 - -loop: - MOVQ SP, BP // message schedule - - SHA512ROUND0(0, 0x428a2f98d728ae22, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND0(1, 0x7137449123ef65cd, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND0(2, 0xb5c0fbcfec4d3b2f, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND0(3, 0xe9b5dba58189dbbc, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND0(4, 0x3956c25bf348b538, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND0(5, 0x59f111f1b605d019, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND0(6, 0x923f82a4af194f9b, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND0(7, 0xab1c5ed5da6d8118, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND0(8, 0xd807aa98a3030242, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND0(9, 0x12835b0145706fbe, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND0(10, 0x243185be4ee4b28c, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND0(11, 0x550c7dc3d5ffb4e2, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND0(12, 0x72be5d74f27b896f, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND0(13, 0x80deb1fe3b1696b1, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND0(14, 0x9bdc06a725c71235, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND0(15, 0xc19bf174cf692694, R9, R10, R11, R12, R13, R14, R15, R8) - - SHA512ROUND1(16, 0xe49b69c19ef14ad2, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(17, 0xefbe4786384f25e3, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(18, 0x0fc19dc68b8cd5b5, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(19, 0x240ca1cc77ac9c65, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(20, 0x2de92c6f592b0275, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(21, 0x4a7484aa6ea6e483, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(22, 0x5cb0a9dcbd41fbd4, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(23, 0x76f988da831153b5, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND1(24, 0x983e5152ee66dfab, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(25, 0xa831c66d2db43210, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(26, 0xb00327c898fb213f, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(27, 0xbf597fc7beef0ee4, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(28, 0xc6e00bf33da88fc2, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(29, 0xd5a79147930aa725, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(30, 0x06ca6351e003826f, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(31, 0x142929670a0e6e70, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND1(32, 0x27b70a8546d22ffc, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(33, 0x2e1b21385c26c926, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(34, 0x4d2c6dfc5ac42aed, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(35, 0x53380d139d95b3df, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(36, 0x650a73548baf63de, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(37, 0x766a0abb3c77b2a8, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(38, 0x81c2c92e47edaee6, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(39, 0x92722c851482353b, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND1(40, 0xa2bfe8a14cf10364, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(41, 0xa81a664bbc423001, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(42, 0xc24b8b70d0f89791, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(43, 0xc76c51a30654be30, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(44, 0xd192e819d6ef5218, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(45, 0xd69906245565a910, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(46, 0xf40e35855771202a, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(47, 0x106aa07032bbd1b8, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND1(48, 0x19a4c116b8d2d0c8, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(49, 0x1e376c085141ab53, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(50, 0x2748774cdf8eeb99, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(51, 0x34b0bcb5e19b48a8, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(52, 0x391c0cb3c5c95a63, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(53, 0x4ed8aa4ae3418acb, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(54, 0x5b9cca4f7763e373, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(55, 0x682e6ff3d6b2b8a3, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND1(56, 0x748f82ee5defb2fc, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(57, 0x78a5636f43172f60, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(58, 0x84c87814a1f0ab72, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(59, 0x8cc702081a6439ec, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(60, 0x90befffa23631e28, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(61, 0xa4506cebde82bde9, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(62, 0xbef9a3f7b2c67915, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(63, 0xc67178f2e372532b, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND1(64, 0xca273eceea26619c, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(65, 0xd186b8c721c0c207, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(66, 0xeada7dd6cde0eb1e, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(67, 0xf57d4f7fee6ed178, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(68, 0x06f067aa72176fba, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(69, 0x0a637dc5a2c898a6, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(70, 0x113f9804bef90dae, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(71, 0x1b710b35131c471b, R9, R10, R11, R12, R13, R14, R15, R8) - SHA512ROUND1(72, 0x28db77f523047d84, R8, R9, R10, R11, R12, R13, R14, R15) - SHA512ROUND1(73, 0x32caab7b40c72493, R15, R8, R9, R10, R11, R12, R13, R14) - SHA512ROUND1(74, 0x3c9ebe0a15c9bebc, R14, R15, R8, R9, R10, R11, R12, R13) - SHA512ROUND1(75, 0x431d67c49c100d4c, R13, R14, R15, R8, R9, R10, R11, R12) - SHA512ROUND1(76, 0x4cc5d4becb3e42b6, R12, R13, R14, R15, R8, R9, R10, R11) - SHA512ROUND1(77, 0x597f299cfc657e2a, R11, R12, R13, R14, R15, R8, R9, R10) - SHA512ROUND1(78, 0x5fcb6fab3ad6faec, R10, R11, R12, R13, R14, R15, R8, R9) - SHA512ROUND1(79, 0x6c44198c4a475817, R9, R10, R11, R12, R13, R14, R15, R8) - - MOVQ dig+0(FP), BP - ADDQ (0*8)(BP), R8 // H0 = a + H0 - MOVQ R8, (0*8)(BP) - ADDQ (1*8)(BP), R9 // H1 = b + H1 - MOVQ R9, (1*8)(BP) - ADDQ (2*8)(BP), R10 // H2 = c + H2 - MOVQ R10, (2*8)(BP) - ADDQ (3*8)(BP), R11 // H3 = d + H3 - MOVQ R11, (3*8)(BP) - ADDQ (4*8)(BP), R12 // H4 = e + H4 - MOVQ R12, (4*8)(BP) - ADDQ (5*8)(BP), R13 // H5 = f + H5 - MOVQ R13, (5*8)(BP) - ADDQ (6*8)(BP), R14 // H6 = g + H6 - MOVQ R14, (6*8)(BP) - ADDQ (7*8)(BP), R15 // H7 = h + H7 - MOVQ R15, (7*8)(BP) - - ADDQ $128, SI - CMPQ SI, 640(SP) - JB loop - -end: - RET diff --git a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512block_decl.go b/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512block_decl.go deleted file mode 100644 index bef99de2..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/stevvooe/resumable/sha512/sha512block_decl.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64 - -package sha512 - -//go:noescape - -func block(dig *digest, p []byte) diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/.gitignore b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/.gitignore deleted file mode 100644 index 83c8f823..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -*.[68] -*.a -*.out -*.swp -_obj -_testmain.go -cmd/metrics-bench/metrics-bench -cmd/metrics-example/metrics-example -cmd/never-read/never-read diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/LICENSE deleted file mode 100644 index 363fa9ee..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -Copyright 2012 Richard Crowley. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - -THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS -OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF -THE POSSIBILITY OF SUCH DAMAGE. - -The views and conclusions contained in the software and documentation -are those of the authors and should not be interpreted as representing -official policies, either expressed or implied, of Richard Crowley. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/README.md b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/README.md deleted file mode 100644 index e0091a4b..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/README.md +++ /dev/null @@ -1,104 +0,0 @@ -go-metrics -========== - -Go port of Coda Hale's Metrics library: . - -Documentation: . - -Usage ------ - -Create and update metrics: - -```go -c := metrics.NewCounter() -metrics.Register("foo", c) -c.Inc(47) - -g := metrics.NewGauge() -metrics.Register("bar", g) -g.Update(47) - -s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028) -h := metrics.NewHistogram(s) -metrics.Register("baz", h) -h.Update(47) - -m := metrics.NewMeter() -metrics.Register("quux", m) -m.Mark(47) - -t := metrics.NewTimer() -metrics.Register("bang", t) -t.Time(func() {}) -t.Update(47) -``` - -Periodically log every metric in human-readable form to standard error: - -```go -go metrics.Log(metrics.DefaultRegistry, 60e9, log.New(os.Stderr, "metrics: ", log.Lmicroseconds)) -``` - -Periodically log every metric in slightly-more-parseable form to syslog: - -```go -w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics") -go metrics.Syslog(metrics.DefaultRegistry, 60e9, w) -``` - -Periodically emit every metric to Graphite: - -```go -addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003") -go metrics.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr) -``` - -Periodically emit every metric into InfluxDB: - -```go -import "github.com/rcrowley/go-metrics/influxdb" - -go influxdb.Influxdb(metrics.DefaultRegistry, 10e9, &influxdb.Config{ - Host: "127.0.0.1:8086", - Database: "metrics", - Username: "test", - Password: "test", -}) -``` - -Periodically upload every metric to Librato: - -```go -import "github.com/rcrowley/go-metrics/librato" - -go librato.Librato(metrics.DefaultRegistry, - 10e9, // interval - "example@example.com", // account owner email address - "token", // Librato API token - "hostname", // source - []float64{0.95}, // precentiles to send - time.Millisecond, // time unit -) -``` - -Periodically emit every metric to StatHat: - -```go -import "github.com/rcrowley/go-metrics/stathat" - -go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com") -``` - -Installation ------------- - -```sh -go get github.com/rcrowley/go-metrics -``` - -StatHat support additionally requires their Go client: - -```sh -go get github.com/stathat/go -``` diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/counter.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/counter.go deleted file mode 100644 index bb7b039c..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/counter.go +++ /dev/null @@ -1,112 +0,0 @@ -package metrics - -import "sync/atomic" - -// Counters hold an int64 value that can be incremented and decremented. -type Counter interface { - Clear() - Count() int64 - Dec(int64) - Inc(int64) - Snapshot() Counter -} - -// GetOrRegisterCounter returns an existing Counter or constructs and registers -// a new StandardCounter. -func GetOrRegisterCounter(name string, r Registry) Counter { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewCounter).(Counter) -} - -// NewCounter constructs a new StandardCounter. -func NewCounter() Counter { - if UseNilMetrics { - return NilCounter{} - } - return &StandardCounter{0} -} - -// NewRegisteredCounter constructs and registers a new StandardCounter. -func NewRegisteredCounter(name string, r Registry) Counter { - c := NewCounter() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// CounterSnapshot is a read-only copy of another Counter. -type CounterSnapshot int64 - -// Clear panics. -func (CounterSnapshot) Clear() { - panic("Clear called on a CounterSnapshot") -} - -// Count returns the count at the time the snapshot was taken. -func (c CounterSnapshot) Count() int64 { return int64(c) } - -// Dec panics. -func (CounterSnapshot) Dec(int64) { - panic("Dec called on a CounterSnapshot") -} - -// Inc panics. -func (CounterSnapshot) Inc(int64) { - panic("Inc called on a CounterSnapshot") -} - -// Snapshot returns the snapshot. -func (c CounterSnapshot) Snapshot() Counter { return c } - -// NilCounter is a no-op Counter. -type NilCounter struct{} - -// Clear is a no-op. -func (NilCounter) Clear() {} - -// Count is a no-op. -func (NilCounter) Count() int64 { return 0 } - -// Dec is a no-op. -func (NilCounter) Dec(i int64) {} - -// Inc is a no-op. -func (NilCounter) Inc(i int64) {} - -// Snapshot is a no-op. -func (NilCounter) Snapshot() Counter { return NilCounter{} } - -// StandardCounter is the standard implementation of a Counter and uses the -// sync/atomic package to manage a single int64 value. -type StandardCounter struct { - count int64 -} - -// Clear sets the counter to zero. -func (c *StandardCounter) Clear() { - atomic.StoreInt64(&c.count, 0) -} - -// Count returns the current count. -func (c *StandardCounter) Count() int64 { - return atomic.LoadInt64(&c.count) -} - -// Dec decrements the counter by the given amount. -func (c *StandardCounter) Dec(i int64) { - atomic.AddInt64(&c.count, -i) -} - -// Inc increments the counter by the given amount. -func (c *StandardCounter) Inc(i int64) { - atomic.AddInt64(&c.count, i) -} - -// Snapshot returns a read-only copy of the counter. -func (c *StandardCounter) Snapshot() Counter { - return CounterSnapshot(c.Count()) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/debug.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/debug.go deleted file mode 100644 index 043ccefa..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/debug.go +++ /dev/null @@ -1,76 +0,0 @@ -package metrics - -import ( - "runtime/debug" - "time" -) - -var ( - debugMetrics struct { - GCStats struct { - LastGC Gauge - NumGC Gauge - Pause Histogram - //PauseQuantiles Histogram - PauseTotal Gauge - } - ReadGCStats Timer - } - gcStats debug.GCStats -) - -// Capture new values for the Go garbage collector statistics exported in -// debug.GCStats. This is designed to be called as a goroutine. -func CaptureDebugGCStats(r Registry, d time.Duration) { - for _ = range time.Tick(d) { - CaptureDebugGCStatsOnce(r) - } -} - -// Capture new values for the Go garbage collector statistics exported in -// debug.GCStats. This is designed to be called in a background goroutine. -// Giving a registry which has not been given to RegisterDebugGCStats will -// panic. -// -// Be careful (but much less so) with this because debug.ReadGCStats calls -// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world -// operation, isn't something you want to be doing all the time. -func CaptureDebugGCStatsOnce(r Registry) { - lastGC := gcStats.LastGC - t := time.Now() - debug.ReadGCStats(&gcStats) - debugMetrics.ReadGCStats.UpdateSince(t) - - debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano())) - debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC)) - if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) { - debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0])) - } - //debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles) - debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal)) -} - -// Register metrics for the Go garbage collector statistics exported in -// debug.GCStats. The metrics are named by their fully-qualified Go symbols, -// i.e. debug.GCStats.PauseTotal. -func RegisterDebugGCStats(r Registry) { - debugMetrics.GCStats.LastGC = NewGauge() - debugMetrics.GCStats.NumGC = NewGauge() - debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015)) - //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015)) - debugMetrics.GCStats.PauseTotal = NewGauge() - debugMetrics.ReadGCStats = NewTimer() - - r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC) - r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC) - r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause) - //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles) - r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal) - r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats) -} - -// Allocate an initial slice for gcStats.Pause to avoid allocations during -// normal operation. -func init() { - gcStats.Pause = make([]time.Duration, 11) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/ewma.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/ewma.go deleted file mode 100644 index 7c152a17..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/ewma.go +++ /dev/null @@ -1,118 +0,0 @@ -package metrics - -import ( - "math" - "sync" - "sync/atomic" -) - -// EWMAs continuously calculate an exponentially-weighted moving average -// based on an outside source of clock ticks. -type EWMA interface { - Rate() float64 - Snapshot() EWMA - Tick() - Update(int64) -} - -// NewEWMA constructs a new EWMA with the given alpha. -func NewEWMA(alpha float64) EWMA { - if UseNilMetrics { - return NilEWMA{} - } - return &StandardEWMA{alpha: alpha} -} - -// NewEWMA1 constructs a new EWMA for a one-minute moving average. -func NewEWMA1() EWMA { - return NewEWMA(1 - math.Exp(-5.0/60.0/1)) -} - -// NewEWMA5 constructs a new EWMA for a five-minute moving average. -func NewEWMA5() EWMA { - return NewEWMA(1 - math.Exp(-5.0/60.0/5)) -} - -// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average. -func NewEWMA15() EWMA { - return NewEWMA(1 - math.Exp(-5.0/60.0/15)) -} - -// EWMASnapshot is a read-only copy of another EWMA. -type EWMASnapshot float64 - -// Rate returns the rate of events per second at the time the snapshot was -// taken. -func (a EWMASnapshot) Rate() float64 { return float64(a) } - -// Snapshot returns the snapshot. -func (a EWMASnapshot) Snapshot() EWMA { return a } - -// Tick panics. -func (EWMASnapshot) Tick() { - panic("Tick called on an EWMASnapshot") -} - -// Update panics. -func (EWMASnapshot) Update(int64) { - panic("Update called on an EWMASnapshot") -} - -// NilEWMA is a no-op EWMA. -type NilEWMA struct{} - -// Rate is a no-op. -func (NilEWMA) Rate() float64 { return 0.0 } - -// Snapshot is a no-op. -func (NilEWMA) Snapshot() EWMA { return NilEWMA{} } - -// Tick is a no-op. -func (NilEWMA) Tick() {} - -// Update is a no-op. -func (NilEWMA) Update(n int64) {} - -// StandardEWMA is the standard implementation of an EWMA and tracks the number -// of uncounted events and processes them on each tick. It uses the -// sync/atomic package to manage uncounted events. -type StandardEWMA struct { - uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment - alpha float64 - rate float64 - init bool - mutex sync.Mutex -} - -// Rate returns the moving average rate of events per second. -func (a *StandardEWMA) Rate() float64 { - a.mutex.Lock() - defer a.mutex.Unlock() - return a.rate * float64(1e9) -} - -// Snapshot returns a read-only copy of the EWMA. -func (a *StandardEWMA) Snapshot() EWMA { - return EWMASnapshot(a.Rate()) -} - -// Tick ticks the clock to update the moving average. It assumes it is called -// every five seconds. -func (a *StandardEWMA) Tick() { - count := atomic.LoadInt64(&a.uncounted) - atomic.AddInt64(&a.uncounted, -count) - instantRate := float64(count) / float64(5e9) - a.mutex.Lock() - defer a.mutex.Unlock() - if a.init { - a.rate += a.alpha * (instantRate - a.rate) - } else { - a.init = true - a.rate = instantRate - } -} - -// Update adds n uncounted events. -func (a *StandardEWMA) Update(n int64) { - atomic.AddInt64(&a.uncounted, n) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/gauge.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/gauge.go deleted file mode 100644 index 807638a3..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/gauge.go +++ /dev/null @@ -1,84 +0,0 @@ -package metrics - -import "sync/atomic" - -// Gauges hold an int64 value that can be set arbitrarily. -type Gauge interface { - Snapshot() Gauge - Update(int64) - Value() int64 -} - -// GetOrRegisterGauge returns an existing Gauge or constructs and registers a -// new StandardGauge. -func GetOrRegisterGauge(name string, r Registry) Gauge { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewGauge).(Gauge) -} - -// NewGauge constructs a new StandardGauge. -func NewGauge() Gauge { - if UseNilMetrics { - return NilGauge{} - } - return &StandardGauge{0} -} - -// NewRegisteredGauge constructs and registers a new StandardGauge. -func NewRegisteredGauge(name string, r Registry) Gauge { - c := NewGauge() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// GaugeSnapshot is a read-only copy of another Gauge. -type GaugeSnapshot int64 - -// Snapshot returns the snapshot. -func (g GaugeSnapshot) Snapshot() Gauge { return g } - -// Update panics. -func (GaugeSnapshot) Update(int64) { - panic("Update called on a GaugeSnapshot") -} - -// Value returns the value at the time the snapshot was taken. -func (g GaugeSnapshot) Value() int64 { return int64(g) } - -// NilGauge is a no-op Gauge. -type NilGauge struct{} - -// Snapshot is a no-op. -func (NilGauge) Snapshot() Gauge { return NilGauge{} } - -// Update is a no-op. -func (NilGauge) Update(v int64) {} - -// Value is a no-op. -func (NilGauge) Value() int64 { return 0 } - -// StandardGauge is the standard implementation of a Gauge and uses the -// sync/atomic package to manage a single int64 value. -type StandardGauge struct { - value int64 -} - -// Snapshot returns a read-only copy of the gauge. -func (g *StandardGauge) Snapshot() Gauge { - return GaugeSnapshot(g.Value()) -} - -// Update updates the gauge's value. -func (g *StandardGauge) Update(v int64) { - atomic.StoreInt64(&g.value, v) -} - -// Value returns the gauge's current value. -func (g *StandardGauge) Value() int64 { - return atomic.LoadInt64(&g.value) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/gauge_float64.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/gauge_float64.go deleted file mode 100644 index 47c3566c..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/gauge_float64.go +++ /dev/null @@ -1,91 +0,0 @@ -package metrics - -import "sync" - -// GaugeFloat64s hold a float64 value that can be set arbitrarily. -type GaugeFloat64 interface { - Snapshot() GaugeFloat64 - Update(float64) - Value() float64 -} - -// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a -// new StandardGaugeFloat64. -func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64) -} - -// NewGaugeFloat64 constructs a new StandardGaugeFloat64. -func NewGaugeFloat64() GaugeFloat64 { - if UseNilMetrics { - return NilGaugeFloat64{} - } - return &StandardGaugeFloat64{ - value: 0.0, - } -} - -// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64. -func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 { - c := NewGaugeFloat64() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64. -type GaugeFloat64Snapshot float64 - -// Snapshot returns the snapshot. -func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g } - -// Update panics. -func (GaugeFloat64Snapshot) Update(float64) { - panic("Update called on a GaugeFloat64Snapshot") -} - -// Value returns the value at the time the snapshot was taken. -func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) } - -// NilGauge is a no-op Gauge. -type NilGaugeFloat64 struct{} - -// Snapshot is a no-op. -func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} } - -// Update is a no-op. -func (NilGaugeFloat64) Update(v float64) {} - -// Value is a no-op. -func (NilGaugeFloat64) Value() float64 { return 0.0 } - -// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses -// sync.Mutex to manage a single float64 value. -type StandardGaugeFloat64 struct { - mutex sync.Mutex - value float64 -} - -// Snapshot returns a read-only copy of the gauge. -func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 { - return GaugeFloat64Snapshot(g.Value()) -} - -// Update updates the gauge's value. -func (g *StandardGaugeFloat64) Update(v float64) { - g.mutex.Lock() - defer g.mutex.Unlock() - g.value = v -} - -// Value returns the gauge's current value. -func (g *StandardGaugeFloat64) Value() float64 { - g.mutex.Lock() - defer g.mutex.Unlock() - return g.value -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/graphite.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/graphite.go deleted file mode 100644 index 643b3ec5..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/graphite.go +++ /dev/null @@ -1,104 +0,0 @@ -package metrics - -import ( - "bufio" - "fmt" - "log" - "net" - "strconv" - "strings" - "time" -) - -// GraphiteConfig provides a container with configuration parameters for -// the Graphite exporter -type GraphiteConfig struct { - Addr *net.TCPAddr // Network address to connect to - Registry Registry // Registry to be exported - FlushInterval time.Duration // Flush interval - DurationUnit time.Duration // Time conversion unit for durations - Prefix string // Prefix to be prepended to metric names - Percentiles []float64 // Percentiles to export from timers and histograms -} - -// Graphite is a blocking exporter function which reports metrics in r -// to a graphite server located at addr, flushing them every d duration -// and prepending metric names with prefix. -func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { - GraphiteWithConfig(GraphiteConfig{ - Addr: addr, - Registry: r, - FlushInterval: d, - DurationUnit: time.Nanosecond, - Prefix: prefix, - Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999}, - }) -} - -// GraphiteWithConfig is a blocking exporter function just like Graphite, -// but it takes a GraphiteConfig instead. -func GraphiteWithConfig(c GraphiteConfig) { - for _ = range time.Tick(c.FlushInterval) { - if err := graphite(&c); nil != err { - log.Println(err) - } - } -} - -func graphite(c *GraphiteConfig) error { - now := time.Now().Unix() - du := float64(c.DurationUnit) - conn, err := net.DialTCP("tcp", nil, c.Addr) - if nil != err { - return err - } - defer conn.Close() - w := bufio.NewWriter(conn) - c.Registry.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now) - case Gauge: - fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now) - case GaugeFloat64: - fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles(c.Percentiles) - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now) - fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now) - fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now) - fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now) - fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now) - for psIdx, psKey := range c.Percentiles { - key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) - fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) - } - case Meter: - m := metric.Snapshot() - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now) - fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now) - fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now) - fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now) - fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles(c.Percentiles) - fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now) - fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, int64(du)*t.Min(), now) - fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, int64(du)*t.Max(), now) - fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, du*t.Mean(), now) - fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, du*t.StdDev(), now) - for psIdx, psKey := range c.Percentiles { - key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) - fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) - } - fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now) - fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now) - fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now) - fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now) - } - w.Flush() - }) - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/healthcheck.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/healthcheck.go deleted file mode 100644 index 445131ca..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/healthcheck.go +++ /dev/null @@ -1,61 +0,0 @@ -package metrics - -// Healthchecks hold an error value describing an arbitrary up/down status. -type Healthcheck interface { - Check() - Error() error - Healthy() - Unhealthy(error) -} - -// NewHealthcheck constructs a new Healthcheck which will use the given -// function to update its status. -func NewHealthcheck(f func(Healthcheck)) Healthcheck { - if UseNilMetrics { - return NilHealthcheck{} - } - return &StandardHealthcheck{nil, f} -} - -// NilHealthcheck is a no-op. -type NilHealthcheck struct{} - -// Check is a no-op. -func (NilHealthcheck) Check() {} - -// Error is a no-op. -func (NilHealthcheck) Error() error { return nil } - -// Healthy is a no-op. -func (NilHealthcheck) Healthy() {} - -// Unhealthy is a no-op. -func (NilHealthcheck) Unhealthy(error) {} - -// StandardHealthcheck is the standard implementation of a Healthcheck and -// stores the status and a function to call to update the status. -type StandardHealthcheck struct { - err error - f func(Healthcheck) -} - -// Check runs the healthcheck function to update the healthcheck's status. -func (h *StandardHealthcheck) Check() { - h.f(h) -} - -// Error returns the healthcheck's status, which will be nil if it is healthy. -func (h *StandardHealthcheck) Error() error { - return h.err -} - -// Healthy marks the healthcheck as healthy. -func (h *StandardHealthcheck) Healthy() { - h.err = nil -} - -// Unhealthy marks the healthcheck as unhealthy. The error is stored and -// may be retrieved by the Error method. -func (h *StandardHealthcheck) Unhealthy(err error) { - h.err = err -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/histogram.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/histogram.go deleted file mode 100644 index 7f3ee70c..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/histogram.go +++ /dev/null @@ -1,192 +0,0 @@ -package metrics - -// Histograms calculate distribution statistics from a series of int64 values. -type Histogram interface { - Clear() - Count() int64 - Max() int64 - Mean() float64 - Min() int64 - Percentile(float64) float64 - Percentiles([]float64) []float64 - Sample() Sample - Snapshot() Histogram - StdDev() float64 - Update(int64) - Variance() float64 -} - -// GetOrRegisterHistogram returns an existing Histogram or constructs and -// registers a new StandardHistogram. -func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram) -} - -// NewHistogram constructs a new StandardHistogram from a Sample. -func NewHistogram(s Sample) Histogram { - if UseNilMetrics { - return NilHistogram{} - } - return &StandardHistogram{sample: s} -} - -// NewRegisteredHistogram constructs and registers a new StandardHistogram from -// a Sample. -func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram { - c := NewHistogram(s) - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// HistogramSnapshot is a read-only copy of another Histogram. -type HistogramSnapshot struct { - sample *SampleSnapshot -} - -// Clear panics. -func (*HistogramSnapshot) Clear() { - panic("Clear called on a HistogramSnapshot") -} - -// Count returns the number of samples recorded at the time the snapshot was -// taken. -func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() } - -// Max returns the maximum value in the sample at the time the snapshot was -// taken. -func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() } - -// Mean returns the mean of the values in the sample at the time the snapshot -// was taken. -func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() } - -// Min returns the minimum value in the sample at the time the snapshot was -// taken. -func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() } - -// Percentile returns an arbitrary percentile of values in the sample at the -// time the snapshot was taken. -func (h *HistogramSnapshot) Percentile(p float64) float64 { - return h.sample.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of values in the sample -// at the time the snapshot was taken. -func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 { - return h.sample.Percentiles(ps) -} - -// Sample returns the Sample underlying the histogram. -func (h *HistogramSnapshot) Sample() Sample { return h.sample } - -// Snapshot returns the snapshot. -func (h *HistogramSnapshot) Snapshot() Histogram { return h } - -// StdDev returns the standard deviation of the values in the sample at the -// time the snapshot was taken. -func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() } - -// Update panics. -func (*HistogramSnapshot) Update(int64) { - panic("Update called on a HistogramSnapshot") -} - -// Variance returns the variance of inputs at the time the snapshot was taken. -func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() } - -// NilHistogram is a no-op Histogram. -type NilHistogram struct{} - -// Clear is a no-op. -func (NilHistogram) Clear() {} - -// Count is a no-op. -func (NilHistogram) Count() int64 { return 0 } - -// Max is a no-op. -func (NilHistogram) Max() int64 { return 0 } - -// Mean is a no-op. -func (NilHistogram) Mean() float64 { return 0.0 } - -// Min is a no-op. -func (NilHistogram) Min() int64 { return 0 } - -// Percentile is a no-op. -func (NilHistogram) Percentile(p float64) float64 { return 0.0 } - -// Percentiles is a no-op. -func (NilHistogram) Percentiles(ps []float64) []float64 { - return make([]float64, len(ps)) -} - -// Sample is a no-op. -func (NilHistogram) Sample() Sample { return NilSample{} } - -// Snapshot is a no-op. -func (NilHistogram) Snapshot() Histogram { return NilHistogram{} } - -// StdDev is a no-op. -func (NilHistogram) StdDev() float64 { return 0.0 } - -// Update is a no-op. -func (NilHistogram) Update(v int64) {} - -// Variance is a no-op. -func (NilHistogram) Variance() float64 { return 0.0 } - -// StandardHistogram is the standard implementation of a Histogram and uses a -// Sample to bound its memory use. -type StandardHistogram struct { - sample Sample -} - -// Clear clears the histogram and its sample. -func (h *StandardHistogram) Clear() { h.sample.Clear() } - -// Count returns the number of samples recorded since the histogram was last -// cleared. -func (h *StandardHistogram) Count() int64 { return h.sample.Count() } - -// Max returns the maximum value in the sample. -func (h *StandardHistogram) Max() int64 { return h.sample.Max() } - -// Mean returns the mean of the values in the sample. -func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() } - -// Min returns the minimum value in the sample. -func (h *StandardHistogram) Min() int64 { return h.sample.Min() } - -// Percentile returns an arbitrary percentile of the values in the sample. -func (h *StandardHistogram) Percentile(p float64) float64 { - return h.sample.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of the values in the -// sample. -func (h *StandardHistogram) Percentiles(ps []float64) []float64 { - return h.sample.Percentiles(ps) -} - -// Sample returns the Sample underlying the histogram. -func (h *StandardHistogram) Sample() Sample { return h.sample } - -// Snapshot returns a read-only copy of the histogram. -func (h *StandardHistogram) Snapshot() Histogram { - return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)} -} - -// StdDev returns the standard deviation of the values in the sample. -func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() } - -// Update samples a new value. -func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) } - -// Variance returns the variance of the values in the sample. -func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() } diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/json.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/json.go deleted file mode 100644 index 04a9c919..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/json.go +++ /dev/null @@ -1,83 +0,0 @@ -package metrics - -import ( - "encoding/json" - "io" - "time" -) - -// MarshalJSON returns a byte slice containing a JSON representation of all -// the metrics in the Registry. -func (r StandardRegistry) MarshalJSON() ([]byte, error) { - data := make(map[string]map[string]interface{}) - r.Each(func(name string, i interface{}) { - values := make(map[string]interface{}) - switch metric := i.(type) { - case Counter: - values["count"] = metric.Count() - case Gauge: - values["value"] = metric.Value() - case GaugeFloat64: - values["value"] = metric.Value() - case Healthcheck: - values["error"] = nil - metric.Check() - if err := metric.Error(); nil != err { - values["error"] = metric.Error().Error() - } - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - values["count"] = h.Count() - values["min"] = h.Min() - values["max"] = h.Max() - values["mean"] = h.Mean() - values["stddev"] = h.StdDev() - values["median"] = ps[0] - values["75%"] = ps[1] - values["95%"] = ps[2] - values["99%"] = ps[3] - values["99.9%"] = ps[4] - case Meter: - m := metric.Snapshot() - values["count"] = m.Count() - values["1m.rate"] = m.Rate1() - values["5m.rate"] = m.Rate5() - values["15m.rate"] = m.Rate15() - values["mean.rate"] = m.RateMean() - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - values["count"] = t.Count() - values["min"] = t.Min() - values["max"] = t.Max() - values["mean"] = t.Mean() - values["stddev"] = t.StdDev() - values["median"] = ps[0] - values["75%"] = ps[1] - values["95%"] = ps[2] - values["99%"] = ps[3] - values["99.9%"] = ps[4] - values["1m.rate"] = t.Rate1() - values["5m.rate"] = t.Rate5() - values["15m.rate"] = t.Rate15() - values["mean.rate"] = t.RateMean() - } - data[name] = values - }) - return json.Marshal(data) -} - -// WriteJSON writes metrics from the given registry periodically to the -// specified io.Writer as JSON. -func WriteJSON(r Registry, d time.Duration, w io.Writer) { - for _ = range time.Tick(d) { - WriteJSONOnce(r, w) - } -} - -// WriteJSONOnce writes metrics from the given registry to the specified -// io.Writer as JSON. -func WriteJSONOnce(r Registry, w io.Writer) { - json.NewEncoder(w).Encode(r) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/log.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/log.go deleted file mode 100644 index 278a8a44..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/log.go +++ /dev/null @@ -1,70 +0,0 @@ -package metrics - -import ( - "log" - "time" -) - -// Output each metric in the given registry periodically using the given -// logger. -func Log(r Registry, d time.Duration, l *log.Logger) { - for _ = range time.Tick(d) { - r.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - l.Printf("counter %s\n", name) - l.Printf(" count: %9d\n", metric.Count()) - case Gauge: - l.Printf("gauge %s\n", name) - l.Printf(" value: %9d\n", metric.Value()) - case GaugeFloat64: - l.Printf("gauge %s\n", name) - l.Printf(" value: %f\n", metric.Value()) - case Healthcheck: - metric.Check() - l.Printf("healthcheck %s\n", name) - l.Printf(" error: %v\n", metric.Error()) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - l.Printf("histogram %s\n", name) - l.Printf(" count: %9d\n", h.Count()) - l.Printf(" min: %9d\n", h.Min()) - l.Printf(" max: %9d\n", h.Max()) - l.Printf(" mean: %12.2f\n", h.Mean()) - l.Printf(" stddev: %12.2f\n", h.StdDev()) - l.Printf(" median: %12.2f\n", ps[0]) - l.Printf(" 75%%: %12.2f\n", ps[1]) - l.Printf(" 95%%: %12.2f\n", ps[2]) - l.Printf(" 99%%: %12.2f\n", ps[3]) - l.Printf(" 99.9%%: %12.2f\n", ps[4]) - case Meter: - m := metric.Snapshot() - l.Printf("meter %s\n", name) - l.Printf(" count: %9d\n", m.Count()) - l.Printf(" 1-min rate: %12.2f\n", m.Rate1()) - l.Printf(" 5-min rate: %12.2f\n", m.Rate5()) - l.Printf(" 15-min rate: %12.2f\n", m.Rate15()) - l.Printf(" mean rate: %12.2f\n", m.RateMean()) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - l.Printf("timer %s\n", name) - l.Printf(" count: %9d\n", t.Count()) - l.Printf(" min: %9d\n", t.Min()) - l.Printf(" max: %9d\n", t.Max()) - l.Printf(" mean: %12.2f\n", t.Mean()) - l.Printf(" stddev: %12.2f\n", t.StdDev()) - l.Printf(" median: %12.2f\n", ps[0]) - l.Printf(" 75%%: %12.2f\n", ps[1]) - l.Printf(" 95%%: %12.2f\n", ps[2]) - l.Printf(" 99%%: %12.2f\n", ps[3]) - l.Printf(" 99.9%%: %12.2f\n", ps[4]) - l.Printf(" 1-min rate: %12.2f\n", t.Rate1()) - l.Printf(" 5-min rate: %12.2f\n", t.Rate5()) - l.Printf(" 15-min rate: %12.2f\n", t.Rate15()) - l.Printf(" mean rate: %12.2f\n", t.RateMean()) - } - }) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/memory.md b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/memory.md deleted file mode 100644 index 47454f54..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/memory.md +++ /dev/null @@ -1,285 +0,0 @@ -Memory usage -============ - -(Highly unscientific.) - -Command used to gather static memory usage: - -```sh -grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status" -``` - -Program used to gather baseline memory usage: - -```go -package main - -import "time" - -func main() { - time.Sleep(600e9) -} -``` - -Baseline --------- - -``` -VmPeak: 42604 kB -VmSize: 42604 kB -VmLck: 0 kB -VmHWM: 1120 kB -VmRSS: 1120 kB -VmData: 35460 kB -VmStk: 136 kB -VmExe: 1020 kB -VmLib: 1848 kB -VmPTE: 36 kB -VmSwap: 0 kB -``` - -Program used to gather metric memory usage (with other metrics being similar): - -```go -package main - -import ( - "fmt" - "metrics" - "time" -) - -func main() { - fmt.Sprintf("foo") - metrics.NewRegistry() - time.Sleep(600e9) -} -``` - -1000 counters registered ------------------------- - -``` -VmPeak: 44016 kB -VmSize: 44016 kB -VmLck: 0 kB -VmHWM: 1928 kB -VmRSS: 1928 kB -VmData: 36868 kB -VmStk: 136 kB -VmExe: 1024 kB -VmLib: 1848 kB -VmPTE: 40 kB -VmSwap: 0 kB -``` - -**1.412 kB virtual, TODO 0.808 kB resident per counter.** - -100000 counters registered --------------------------- - -``` -VmPeak: 55024 kB -VmSize: 55024 kB -VmLck: 0 kB -VmHWM: 12440 kB -VmRSS: 12440 kB -VmData: 47876 kB -VmStk: 136 kB -VmExe: 1024 kB -VmLib: 1848 kB -VmPTE: 64 kB -VmSwap: 0 kB -``` - -**0.1242 kB virtual, 0.1132 kB resident per counter.** - -1000 gauges registered ----------------------- - -``` -VmPeak: 44012 kB -VmSize: 44012 kB -VmLck: 0 kB -VmHWM: 1928 kB -VmRSS: 1928 kB -VmData: 36868 kB -VmStk: 136 kB -VmExe: 1020 kB -VmLib: 1848 kB -VmPTE: 40 kB -VmSwap: 0 kB -``` - -**1.408 kB virtual, 0.808 kB resident per counter.** - -100000 gauges registered ------------------------- - -``` -VmPeak: 55020 kB -VmSize: 55020 kB -VmLck: 0 kB -VmHWM: 12432 kB -VmRSS: 12432 kB -VmData: 47876 kB -VmStk: 136 kB -VmExe: 1020 kB -VmLib: 1848 kB -VmPTE: 60 kB -VmSwap: 0 kB -``` - -**0.12416 kB virtual, 0.11312 resident per gauge.** - -1000 histograms with a uniform sample size of 1028 --------------------------------------------------- - -``` -VmPeak: 72272 kB -VmSize: 72272 kB -VmLck: 0 kB -VmHWM: 16204 kB -VmRSS: 16204 kB -VmData: 65100 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 80 kB -VmSwap: 0 kB -``` - -**29.668 kB virtual, TODO 15.084 resident per histogram.** - -10000 histograms with a uniform sample size of 1028 ---------------------------------------------------- - -``` -VmPeak: 256912 kB -VmSize: 256912 kB -VmLck: 0 kB -VmHWM: 146204 kB -VmRSS: 146204 kB -VmData: 249740 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 448 kB -VmSwap: 0 kB -``` - -**21.4308 kB virtual, 14.5084 kB resident per histogram.** - -50000 histograms with a uniform sample size of 1028 ---------------------------------------------------- - -``` -VmPeak: 908112 kB -VmSize: 908112 kB -VmLck: 0 kB -VmHWM: 645832 kB -VmRSS: 645588 kB -VmData: 900940 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 1716 kB -VmSwap: 1544 kB -``` - -**17.31016 kB virtual, 12.88936 kB resident per histogram.** - -1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 -------------------------------------------------------------------------------------- - -``` -VmPeak: 62480 kB -VmSize: 62480 kB -VmLck: 0 kB -VmHWM: 11572 kB -VmRSS: 11572 kB -VmData: 55308 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 64 kB -VmSwap: 0 kB -``` - -**19.876 kB virtual, 10.452 kB resident per histogram.** - -10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 --------------------------------------------------------------------------------------- - -``` -VmPeak: 153296 kB -VmSize: 153296 kB -VmLck: 0 kB -VmHWM: 101176 kB -VmRSS: 101176 kB -VmData: 146124 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 240 kB -VmSwap: 0 kB -``` - -**11.0692 kB virtual, 10.0056 kB resident per histogram.** - -50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 --------------------------------------------------------------------------------------- - -``` -VmPeak: 557264 kB -VmSize: 557264 kB -VmLck: 0 kB -VmHWM: 501056 kB -VmRSS: 501056 kB -VmData: 550092 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 1032 kB -VmSwap: 0 kB -``` - -**10.2932 kB virtual, 9.99872 kB resident per histogram.** - -1000 meters ------------ - -``` -VmPeak: 74504 kB -VmSize: 74504 kB -VmLck: 0 kB -VmHWM: 24124 kB -VmRSS: 24124 kB -VmData: 67340 kB -VmStk: 136 kB -VmExe: 1040 kB -VmLib: 1848 kB -VmPTE: 92 kB -VmSwap: 0 kB -``` - -**31.9 kB virtual, 23.004 kB resident per meter.** - -10000 meters ------------- - -``` -VmPeak: 278920 kB -VmSize: 278920 kB -VmLck: 0 kB -VmHWM: 227300 kB -VmRSS: 227300 kB -VmData: 271756 kB -VmStk: 136 kB -VmExe: 1040 kB -VmLib: 1848 kB -VmPTE: 488 kB -VmSwap: 0 kB -``` - -**23.6316 kB virtual, 22.618 kB resident per meter.** diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/meter.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/meter.go deleted file mode 100644 index 0389ab0b..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/meter.go +++ /dev/null @@ -1,233 +0,0 @@ -package metrics - -import ( - "sync" - "time" -) - -// Meters count events to produce exponentially-weighted moving average rates -// at one-, five-, and fifteen-minutes and a mean rate. -type Meter interface { - Count() int64 - Mark(int64) - Rate1() float64 - Rate5() float64 - Rate15() float64 - RateMean() float64 - Snapshot() Meter -} - -// GetOrRegisterMeter returns an existing Meter or constructs and registers a -// new StandardMeter. -func GetOrRegisterMeter(name string, r Registry) Meter { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewMeter).(Meter) -} - -// NewMeter constructs a new StandardMeter and launches a goroutine. -func NewMeter() Meter { - if UseNilMetrics { - return NilMeter{} - } - m := newStandardMeter() - arbiter.Lock() - defer arbiter.Unlock() - arbiter.meters = append(arbiter.meters, m) - if !arbiter.started { - arbiter.started = true - go arbiter.tick() - } - return m -} - -// NewMeter constructs and registers a new StandardMeter and launches a -// goroutine. -func NewRegisteredMeter(name string, r Registry) Meter { - c := NewMeter() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// MeterSnapshot is a read-only copy of another Meter. -type MeterSnapshot struct { - count int64 - rate1, rate5, rate15, rateMean float64 -} - -// Count returns the count of events at the time the snapshot was taken. -func (m *MeterSnapshot) Count() int64 { return m.count } - -// Mark panics. -func (*MeterSnapshot) Mark(n int64) { - panic("Mark called on a MeterSnapshot") -} - -// Rate1 returns the one-minute moving average rate of events per second at the -// time the snapshot was taken. -func (m *MeterSnapshot) Rate1() float64 { return m.rate1 } - -// Rate5 returns the five-minute moving average rate of events per second at -// the time the snapshot was taken. -func (m *MeterSnapshot) Rate5() float64 { return m.rate5 } - -// Rate15 returns the fifteen-minute moving average rate of events per second -// at the time the snapshot was taken. -func (m *MeterSnapshot) Rate15() float64 { return m.rate15 } - -// RateMean returns the meter's mean rate of events per second at the time the -// snapshot was taken. -func (m *MeterSnapshot) RateMean() float64 { return m.rateMean } - -// Snapshot returns the snapshot. -func (m *MeterSnapshot) Snapshot() Meter { return m } - -// NilMeter is a no-op Meter. -type NilMeter struct{} - -// Count is a no-op. -func (NilMeter) Count() int64 { return 0 } - -// Mark is a no-op. -func (NilMeter) Mark(n int64) {} - -// Rate1 is a no-op. -func (NilMeter) Rate1() float64 { return 0.0 } - -// Rate5 is a no-op. -func (NilMeter) Rate5() float64 { return 0.0 } - -// Rate15is a no-op. -func (NilMeter) Rate15() float64 { return 0.0 } - -// RateMean is a no-op. -func (NilMeter) RateMean() float64 { return 0.0 } - -// Snapshot is a no-op. -func (NilMeter) Snapshot() Meter { return NilMeter{} } - -// StandardMeter is the standard implementation of a Meter. -type StandardMeter struct { - lock sync.RWMutex - snapshot *MeterSnapshot - a1, a5, a15 EWMA - startTime time.Time -} - -func newStandardMeter() *StandardMeter { - return &StandardMeter{ - snapshot: &MeterSnapshot{}, - a1: NewEWMA1(), - a5: NewEWMA5(), - a15: NewEWMA15(), - startTime: time.Now(), - } -} - -// Count returns the number of events recorded. -func (m *StandardMeter) Count() int64 { - m.lock.RLock() - count := m.snapshot.count - m.lock.RUnlock() - return count -} - -// Mark records the occurance of n events. -func (m *StandardMeter) Mark(n int64) { - m.lock.Lock() - defer m.lock.Unlock() - m.snapshot.count += n - m.a1.Update(n) - m.a5.Update(n) - m.a15.Update(n) - m.updateSnapshot() -} - -// Rate1 returns the one-minute moving average rate of events per second. -func (m *StandardMeter) Rate1() float64 { - m.lock.RLock() - rate1 := m.snapshot.rate1 - m.lock.RUnlock() - return rate1 -} - -// Rate5 returns the five-minute moving average rate of events per second. -func (m *StandardMeter) Rate5() float64 { - m.lock.RLock() - rate5 := m.snapshot.rate5 - m.lock.RUnlock() - return rate5 -} - -// Rate15 returns the fifteen-minute moving average rate of events per second. -func (m *StandardMeter) Rate15() float64 { - m.lock.RLock() - rate15 := m.snapshot.rate15 - m.lock.RUnlock() - return rate15 -} - -// RateMean returns the meter's mean rate of events per second. -func (m *StandardMeter) RateMean() float64 { - m.lock.RLock() - rateMean := m.snapshot.rateMean - m.lock.RUnlock() - return rateMean -} - -// Snapshot returns a read-only copy of the meter. -func (m *StandardMeter) Snapshot() Meter { - m.lock.RLock() - snapshot := *m.snapshot - m.lock.RUnlock() - return &snapshot -} - -func (m *StandardMeter) updateSnapshot() { - // should run with write lock held on m.lock - snapshot := m.snapshot - snapshot.rate1 = m.a1.Rate() - snapshot.rate5 = m.a5.Rate() - snapshot.rate15 = m.a15.Rate() - snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds() -} - -func (m *StandardMeter) tick() { - m.lock.Lock() - defer m.lock.Unlock() - m.a1.Tick() - m.a5.Tick() - m.a15.Tick() - m.updateSnapshot() -} - -type meterArbiter struct { - sync.RWMutex - started bool - meters []*StandardMeter - ticker *time.Ticker -} - -var arbiter = meterArbiter{ticker: time.NewTicker(5e9)} - -// Ticks meters on the scheduled interval -func (ma *meterArbiter) tick() { - for { - select { - case <-ma.ticker.C: - ma.tickMeters() - } - } -} - -func (ma *meterArbiter) tickMeters() { - ma.RLock() - defer ma.RUnlock() - for _, meter := range ma.meters { - meter.tick() - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/metrics.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/metrics.go deleted file mode 100644 index b97a49ed..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/metrics.go +++ /dev/null @@ -1,13 +0,0 @@ -// Go port of Coda Hale's Metrics library -// -// -// -// Coda Hale's original work: -package metrics - -// UseNilMetrics is checked by the constructor functions for all of the -// standard metrics. If it is true, the metric returned is a stub. -// -// This global kill-switch helps quantify the observer effect and makes -// for less cluttered pprof profiles. -var UseNilMetrics bool = false diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/opentsdb.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/opentsdb.go deleted file mode 100644 index fbc292de..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/opentsdb.go +++ /dev/null @@ -1,119 +0,0 @@ -package metrics - -import ( - "bufio" - "fmt" - "log" - "net" - "time" - "os" - "strings" -) - -var shortHostName string = "" - -// OpenTSDBConfig provides a container with configuration parameters for -// the OpenTSDB exporter -type OpenTSDBConfig struct { - Addr *net.TCPAddr // Network address to connect to - Registry Registry // Registry to be exported - FlushInterval time.Duration // Flush interval - DurationUnit time.Duration // Time conversion unit for durations - Prefix string // Prefix to be prepended to metric names -} - -// OpenTSDB is a blocking exporter function which reports metrics in r -// to a TSDB server located at addr, flushing them every d duration -// and prepending metric names with prefix. -func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { - OpenTSDBWithConfig(OpenTSDBConfig{ - Addr: addr, - Registry: r, - FlushInterval: d, - DurationUnit: time.Nanosecond, - Prefix: prefix, - }) -} - -// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB, -// but it takes a OpenTSDBConfig instead. -func OpenTSDBWithConfig(c OpenTSDBConfig) { - for _ = range time.Tick(c.FlushInterval) { - if err := openTSDB(&c); nil != err { - log.Println(err) - } - } -} - -func getShortHostname() string { - if shortHostName == "" { - host, _ := os.Hostname() - if index := strings.Index(host, "."); index > 0 { - shortHostName = host[:index] - } else { - shortHostName = host - } - } - return shortHostName -} - -func openTSDB(c *OpenTSDBConfig) error { - shortHostname := getShortHostname() - now := time.Now().Unix() - du := float64(c.DurationUnit) - conn, err := net.DialTCP("tcp", nil, c.Addr) - if nil != err { - return err - } - defer conn.Close() - w := bufio.NewWriter(conn) - c.Registry.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname) - case Gauge: - fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) - case GaugeFloat64: - fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname) - fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname) - fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname) - fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname) - fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname) - fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname) - fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname) - fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname) - fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname) - case Meter: - m := metric.Snapshot() - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname) - fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname) - fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname) - fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname) - fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, int64(du)*t.Min(), shortHostname) - fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, int64(du)*t.Max(), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, du*t.Mean(), shortHostname) - fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, du*t.StdDev(), shortHostname) - fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[0], shortHostname) - fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[1], shortHostname) - fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[2], shortHostname) - fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[3], shortHostname) - fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, du*ps[4], shortHostname) - fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname) - fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname) - fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname) - fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname) - } - w.Flush() - }) - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/registry.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/registry.go deleted file mode 100644 index 9ef498a2..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/registry.go +++ /dev/null @@ -1,168 +0,0 @@ -package metrics - -import ( - "fmt" - "reflect" - "sync" -) - -// DuplicateMetric is the error returned by Registry.Register when a metric -// already exists. If you mean to Register that metric you must first -// Unregister the existing metric. -type DuplicateMetric string - -func (err DuplicateMetric) Error() string { - return fmt.Sprintf("duplicate metric: %s", string(err)) -} - -// A Registry holds references to a set of metrics by name and can iterate -// over them, calling callback functions provided by the user. -// -// This is an interface so as to encourage other structs to implement -// the Registry API as appropriate. -type Registry interface { - - // Call the given function for each registered metric. - Each(func(string, interface{})) - - // Get the metric by the given name or nil if none is registered. - Get(string) interface{} - - // Gets an existing metric or registers the given one. - // The interface can be the metric to register if not found in registry, - // or a function returning the metric for lazy instantiation. - GetOrRegister(string, interface{}) interface{} - - // Register the given metric under the given name. - Register(string, interface{}) error - - // Run all registered healthchecks. - RunHealthchecks() - - // Unregister the metric with the given name. - Unregister(string) -} - -// The standard implementation of a Registry is a mutex-protected map -// of names to metrics. -type StandardRegistry struct { - metrics map[string]interface{} - mutex sync.Mutex -} - -// Create a new registry. -func NewRegistry() Registry { - return &StandardRegistry{metrics: make(map[string]interface{})} -} - -// Call the given function for each registered metric. -func (r *StandardRegistry) Each(f func(string, interface{})) { - for name, i := range r.registered() { - f(name, i) - } -} - -// Get the metric by the given name or nil if none is registered. -func (r *StandardRegistry) Get(name string) interface{} { - r.mutex.Lock() - defer r.mutex.Unlock() - return r.metrics[name] -} - -// Gets an existing metric or creates and registers a new one. Threadsafe -// alternative to calling Get and Register on failure. -// The interface can be the metric to register if not found in registry, -// or a function returning the metric for lazy instantiation. -func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} { - r.mutex.Lock() - defer r.mutex.Unlock() - if metric, ok := r.metrics[name]; ok { - return metric - } - if v := reflect.ValueOf(i); v.Kind() == reflect.Func { - i = v.Call(nil)[0].Interface() - } - r.register(name, i) - return i -} - -// Register the given metric under the given name. Returns a DuplicateMetric -// if a metric by the given name is already registered. -func (r *StandardRegistry) Register(name string, i interface{}) error { - r.mutex.Lock() - defer r.mutex.Unlock() - return r.register(name, i) -} - -// Run all registered healthchecks. -func (r *StandardRegistry) RunHealthchecks() { - r.mutex.Lock() - defer r.mutex.Unlock() - for _, i := range r.metrics { - if h, ok := i.(Healthcheck); ok { - h.Check() - } - } -} - -// Unregister the metric with the given name. -func (r *StandardRegistry) Unregister(name string) { - r.mutex.Lock() - defer r.mutex.Unlock() - delete(r.metrics, name) -} - -func (r *StandardRegistry) register(name string, i interface{}) error { - if _, ok := r.metrics[name]; ok { - return DuplicateMetric(name) - } - switch i.(type) { - case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer: - r.metrics[name] = i - } - return nil -} - -func (r *StandardRegistry) registered() map[string]interface{} { - metrics := make(map[string]interface{}, len(r.metrics)) - r.mutex.Lock() - defer r.mutex.Unlock() - for name, i := range r.metrics { - metrics[name] = i - } - return metrics -} - -var DefaultRegistry Registry = NewRegistry() - -// Call the given function for each registered metric. -func Each(f func(string, interface{})) { - DefaultRegistry.Each(f) -} - -// Get the metric by the given name or nil if none is registered. -func Get(name string) interface{} { - return DefaultRegistry.Get(name) -} - -// Gets an existing metric or creates and registers a new one. Threadsafe -// alternative to calling Get and Register on failure. -func GetOrRegister(name string, i interface{}) interface{} { - return DefaultRegistry.GetOrRegister(name, i) -} - -// Register the given metric under the given name. Returns a DuplicateMetric -// if a metric by the given name is already registered. -func Register(name string, i interface{}) error { - return DefaultRegistry.Register(name, i) -} - -// Run all registered healthchecks. -func RunHealthchecks() { - DefaultRegistry.RunHealthchecks() -} - -// Unregister the metric with the given name. -func Unregister(name string) { - DefaultRegistry.Unregister(name) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/runtime.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/runtime.go deleted file mode 100644 index 82574bf2..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/runtime.go +++ /dev/null @@ -1,200 +0,0 @@ -package metrics - -import ( - "runtime" - "time" -) - -var ( - memStats runtime.MemStats - runtimeMetrics struct { - MemStats struct { - Alloc Gauge - BuckHashSys Gauge - DebugGC Gauge - EnableGC Gauge - Frees Gauge - HeapAlloc Gauge - HeapIdle Gauge - HeapInuse Gauge - HeapObjects Gauge - HeapReleased Gauge - HeapSys Gauge - LastGC Gauge - Lookups Gauge - Mallocs Gauge - MCacheInuse Gauge - MCacheSys Gauge - MSpanInuse Gauge - MSpanSys Gauge - NextGC Gauge - NumGC Gauge - PauseNs Histogram - PauseTotalNs Gauge - StackInuse Gauge - StackSys Gauge - Sys Gauge - TotalAlloc Gauge - } - NumCgoCall Gauge - NumGoroutine Gauge - ReadMemStats Timer - } - frees uint64 - lookups uint64 - mallocs uint64 - numGC uint32 - numCgoCalls int64 -) - -// Capture new values for the Go runtime statistics exported in -// runtime.MemStats. This is designed to be called as a goroutine. -func CaptureRuntimeMemStats(r Registry, d time.Duration) { - for _ = range time.Tick(d) { - CaptureRuntimeMemStatsOnce(r) - } -} - -// Capture new values for the Go runtime statistics exported in -// runtime.MemStats. This is designed to be called in a background -// goroutine. Giving a registry which has not been given to -// RegisterRuntimeMemStats will panic. -// -// Be very careful with this because runtime.ReadMemStats calls the C -// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld() -// and that last one does what it says on the tin. -func CaptureRuntimeMemStatsOnce(r Registry) { - t := time.Now() - runtime.ReadMemStats(&memStats) // This takes 50-200us. - runtimeMetrics.ReadMemStats.UpdateSince(t) - - runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc)) - runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys)) - if memStats.DebugGC { - runtimeMetrics.MemStats.DebugGC.Update(1) - } else { - runtimeMetrics.MemStats.DebugGC.Update(0) - } - if memStats.EnableGC { - runtimeMetrics.MemStats.EnableGC.Update(1) - } else { - runtimeMetrics.MemStats.EnableGC.Update(0) - } - - runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees)) - runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc)) - runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle)) - runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse)) - runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects)) - runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased)) - runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys)) - runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC)) - runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups)) - runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs)) - runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse)) - runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys)) - runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse)) - runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys)) - runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC)) - runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC)) - - // - i := numGC % uint32(len(memStats.PauseNs)) - ii := memStats.NumGC % uint32(len(memStats.PauseNs)) - if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) { - for i = 0; i < uint32(len(memStats.PauseNs)); i++ { - runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) - } - } else { - if i > ii { - for ; i < uint32(len(memStats.PauseNs)); i++ { - runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) - } - i = 0 - } - for ; i < ii; i++ { - runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) - } - } - frees = memStats.Frees - lookups = memStats.Lookups - mallocs = memStats.Mallocs - numGC = memStats.NumGC - - runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs)) - runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse)) - runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys)) - runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys)) - runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc)) - - currentNumCgoCalls := numCgoCall() - runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls) - numCgoCalls = currentNumCgoCalls - - runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine())) -} - -// Register runtimeMetrics for the Go runtime statistics exported in runtime and -// specifically runtime.MemStats. The runtimeMetrics are named by their -// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc. -func RegisterRuntimeMemStats(r Registry) { - runtimeMetrics.MemStats.Alloc = NewGauge() - runtimeMetrics.MemStats.BuckHashSys = NewGauge() - runtimeMetrics.MemStats.DebugGC = NewGauge() - runtimeMetrics.MemStats.EnableGC = NewGauge() - runtimeMetrics.MemStats.Frees = NewGauge() - runtimeMetrics.MemStats.HeapAlloc = NewGauge() - runtimeMetrics.MemStats.HeapIdle = NewGauge() - runtimeMetrics.MemStats.HeapInuse = NewGauge() - runtimeMetrics.MemStats.HeapObjects = NewGauge() - runtimeMetrics.MemStats.HeapReleased = NewGauge() - runtimeMetrics.MemStats.HeapSys = NewGauge() - runtimeMetrics.MemStats.LastGC = NewGauge() - runtimeMetrics.MemStats.Lookups = NewGauge() - runtimeMetrics.MemStats.Mallocs = NewGauge() - runtimeMetrics.MemStats.MCacheInuse = NewGauge() - runtimeMetrics.MemStats.MCacheSys = NewGauge() - runtimeMetrics.MemStats.MSpanInuse = NewGauge() - runtimeMetrics.MemStats.MSpanSys = NewGauge() - runtimeMetrics.MemStats.NextGC = NewGauge() - runtimeMetrics.MemStats.NumGC = NewGauge() - runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015)) - runtimeMetrics.MemStats.PauseTotalNs = NewGauge() - runtimeMetrics.MemStats.StackInuse = NewGauge() - runtimeMetrics.MemStats.StackSys = NewGauge() - runtimeMetrics.MemStats.Sys = NewGauge() - runtimeMetrics.MemStats.TotalAlloc = NewGauge() - runtimeMetrics.NumCgoCall = NewGauge() - runtimeMetrics.NumGoroutine = NewGauge() - runtimeMetrics.ReadMemStats = NewTimer() - - r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc) - r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys) - r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC) - r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC) - r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees) - r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc) - r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle) - r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse) - r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects) - r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased) - r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys) - r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC) - r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups) - r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs) - r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse) - r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys) - r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse) - r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys) - r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC) - r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC) - r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs) - r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs) - r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse) - r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys) - r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys) - r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc) - r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall) - r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine) - r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/runtime_cgo.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/runtime_cgo.go deleted file mode 100644 index 38976a8c..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/runtime_cgo.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build cgo - -package metrics - -import "runtime" - -func numCgoCall() int64 { - return runtime.NumCgoCall() -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/runtime_no_cgo.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/runtime_no_cgo.go deleted file mode 100644 index 38220330..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/runtime_no_cgo.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !cgo - -package metrics - -func numCgoCall() int64 { - return 0 -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/sample.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/sample.go deleted file mode 100644 index e34b7b58..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/sample.go +++ /dev/null @@ -1,568 +0,0 @@ -package metrics - -import ( - "container/heap" - "math" - "math/rand" - "sort" - "sync" - "time" -) - -const rescaleThreshold = time.Hour - -// Samples maintain a statistically-significant selection of values from -// a stream. -type Sample interface { - Clear() - Count() int64 - Max() int64 - Mean() float64 - Min() int64 - Percentile(float64) float64 - Percentiles([]float64) []float64 - Size() int - Snapshot() Sample - StdDev() float64 - Sum() int64 - Update(int64) - Values() []int64 - Variance() float64 -} - -// ExpDecaySample is an exponentially-decaying sample using a forward-decaying -// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time -// Decay Model for Streaming Systems". -// -// -type ExpDecaySample struct { - alpha float64 - count int64 - mutex sync.Mutex - reservoirSize int - t0, t1 time.Time - values expDecaySampleHeap -} - -// NewExpDecaySample constructs a new exponentially-decaying sample with the -// given reservoir size and alpha. -func NewExpDecaySample(reservoirSize int, alpha float64) Sample { - if UseNilMetrics { - return NilSample{} - } - s := &ExpDecaySample{ - alpha: alpha, - reservoirSize: reservoirSize, - t0: time.Now(), - values: make(expDecaySampleHeap, 0, reservoirSize), - } - s.t1 = time.Now().Add(rescaleThreshold) - return s -} - -// Clear clears all samples. -func (s *ExpDecaySample) Clear() { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count = 0 - s.t0 = time.Now() - s.t1 = s.t0.Add(rescaleThreshold) - s.values = make(expDecaySampleHeap, 0, s.reservoirSize) -} - -// Count returns the number of samples recorded, which may exceed the -// reservoir size. -func (s *ExpDecaySample) Count() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.count -} - -// Max returns the maximum value in the sample, which may not be the maximum -// value ever to be part of the sample. -func (s *ExpDecaySample) Max() int64 { - return SampleMax(s.Values()) -} - -// Mean returns the mean of the values in the sample. -func (s *ExpDecaySample) Mean() float64 { - return SampleMean(s.Values()) -} - -// Min returns the minimum value in the sample, which may not be the minimum -// value ever to be part of the sample. -func (s *ExpDecaySample) Min() int64 { - return SampleMin(s.Values()) -} - -// Percentile returns an arbitrary percentile of values in the sample. -func (s *ExpDecaySample) Percentile(p float64) float64 { - return SamplePercentile(s.Values(), p) -} - -// Percentiles returns a slice of arbitrary percentiles of values in the -// sample. -func (s *ExpDecaySample) Percentiles(ps []float64) []float64 { - return SamplePercentiles(s.Values(), ps) -} - -// Size returns the size of the sample, which is at most the reservoir size. -func (s *ExpDecaySample) Size() int { - s.mutex.Lock() - defer s.mutex.Unlock() - return len(s.values) -} - -// Snapshot returns a read-only copy of the sample. -func (s *ExpDecaySample) Snapshot() Sample { - s.mutex.Lock() - defer s.mutex.Unlock() - values := make([]int64, len(s.values)) - for i, v := range s.values { - values[i] = v.v - } - return &SampleSnapshot{ - count: s.count, - values: values, - } -} - -// StdDev returns the standard deviation of the values in the sample. -func (s *ExpDecaySample) StdDev() float64 { - return SampleStdDev(s.Values()) -} - -// Sum returns the sum of the values in the sample. -func (s *ExpDecaySample) Sum() int64 { - return SampleSum(s.Values()) -} - -// Update samples a new value. -func (s *ExpDecaySample) Update(v int64) { - s.update(time.Now(), v) -} - -// Values returns a copy of the values in the sample. -func (s *ExpDecaySample) Values() []int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - values := make([]int64, len(s.values)) - for i, v := range s.values { - values[i] = v.v - } - return values -} - -// Variance returns the variance of the values in the sample. -func (s *ExpDecaySample) Variance() float64 { - return SampleVariance(s.Values()) -} - -// update samples a new value at a particular timestamp. This is a method all -// its own to facilitate testing. -func (s *ExpDecaySample) update(t time.Time, v int64) { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count++ - if len(s.values) == s.reservoirSize { - heap.Pop(&s.values) - } - heap.Push(&s.values, expDecaySample{ - k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(), - v: v, - }) - if t.After(s.t1) { - values := s.values - t0 := s.t0 - s.values = make(expDecaySampleHeap, 0, s.reservoirSize) - s.t0 = t - s.t1 = s.t0.Add(rescaleThreshold) - for _, v := range values { - v.k = v.k * math.Exp(-s.alpha*float64(s.t0.Sub(t0))) - heap.Push(&s.values, v) - } - } -} - -// NilSample is a no-op Sample. -type NilSample struct{} - -// Clear is a no-op. -func (NilSample) Clear() {} - -// Count is a no-op. -func (NilSample) Count() int64 { return 0 } - -// Max is a no-op. -func (NilSample) Max() int64 { return 0 } - -// Mean is a no-op. -func (NilSample) Mean() float64 { return 0.0 } - -// Min is a no-op. -func (NilSample) Min() int64 { return 0 } - -// Percentile is a no-op. -func (NilSample) Percentile(p float64) float64 { return 0.0 } - -// Percentiles is a no-op. -func (NilSample) Percentiles(ps []float64) []float64 { - return make([]float64, len(ps)) -} - -// Size is a no-op. -func (NilSample) Size() int { return 0 } - -// Sample is a no-op. -func (NilSample) Snapshot() Sample { return NilSample{} } - -// StdDev is a no-op. -func (NilSample) StdDev() float64 { return 0.0 } - -// Sum is a no-op. -func (NilSample) Sum() int64 { return 0 } - -// Update is a no-op. -func (NilSample) Update(v int64) {} - -// Values is a no-op. -func (NilSample) Values() []int64 { return []int64{} } - -// Variance is a no-op. -func (NilSample) Variance() float64 { return 0.0 } - -// SampleMax returns the maximum value of the slice of int64. -func SampleMax(values []int64) int64 { - if 0 == len(values) { - return 0 - } - var max int64 = math.MinInt64 - for _, v := range values { - if max < v { - max = v - } - } - return max -} - -// SampleMean returns the mean value of the slice of int64. -func SampleMean(values []int64) float64 { - if 0 == len(values) { - return 0.0 - } - return float64(SampleSum(values)) / float64(len(values)) -} - -// SampleMin returns the minimum value of the slice of int64. -func SampleMin(values []int64) int64 { - if 0 == len(values) { - return 0 - } - var min int64 = math.MaxInt64 - for _, v := range values { - if min > v { - min = v - } - } - return min -} - -// SamplePercentiles returns an arbitrary percentile of the slice of int64. -func SamplePercentile(values int64Slice, p float64) float64 { - return SamplePercentiles(values, []float64{p})[0] -} - -// SamplePercentiles returns a slice of arbitrary percentiles of the slice of -// int64. -func SamplePercentiles(values int64Slice, ps []float64) []float64 { - scores := make([]float64, len(ps)) - size := len(values) - if size > 0 { - sort.Sort(values) - for i, p := range ps { - pos := p * float64(size+1) - if pos < 1.0 { - scores[i] = float64(values[0]) - } else if pos >= float64(size) { - scores[i] = float64(values[size-1]) - } else { - lower := float64(values[int(pos)-1]) - upper := float64(values[int(pos)]) - scores[i] = lower + (pos-math.Floor(pos))*(upper-lower) - } - } - } - return scores -} - -// SampleSnapshot is a read-only copy of another Sample. -type SampleSnapshot struct { - count int64 - values []int64 -} - -// Clear panics. -func (*SampleSnapshot) Clear() { - panic("Clear called on a SampleSnapshot") -} - -// Count returns the count of inputs at the time the snapshot was taken. -func (s *SampleSnapshot) Count() int64 { return s.count } - -// Max returns the maximal value at the time the snapshot was taken. -func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) } - -// Mean returns the mean value at the time the snapshot was taken. -func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) } - -// Min returns the minimal value at the time the snapshot was taken. -func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) } - -// Percentile returns an arbitrary percentile of values at the time the -// snapshot was taken. -func (s *SampleSnapshot) Percentile(p float64) float64 { - return SamplePercentile(s.values, p) -} - -// Percentiles returns a slice of arbitrary percentiles of values at the time -// the snapshot was taken. -func (s *SampleSnapshot) Percentiles(ps []float64) []float64 { - return SamplePercentiles(s.values, ps) -} - -// Size returns the size of the sample at the time the snapshot was taken. -func (s *SampleSnapshot) Size() int { return len(s.values) } - -// Snapshot returns the snapshot. -func (s *SampleSnapshot) Snapshot() Sample { return s } - -// StdDev returns the standard deviation of values at the time the snapshot was -// taken. -func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) } - -// Sum returns the sum of values at the time the snapshot was taken. -func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) } - -// Update panics. -func (*SampleSnapshot) Update(int64) { - panic("Update called on a SampleSnapshot") -} - -// Values returns a copy of the values in the sample. -func (s *SampleSnapshot) Values() []int64 { - values := make([]int64, len(s.values)) - copy(values, s.values) - return values -} - -// Variance returns the variance of values at the time the snapshot was taken. -func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) } - -// SampleStdDev returns the standard deviation of the slice of int64. -func SampleStdDev(values []int64) float64 { - return math.Sqrt(SampleVariance(values)) -} - -// SampleSum returns the sum of the slice of int64. -func SampleSum(values []int64) int64 { - var sum int64 - for _, v := range values { - sum += v - } - return sum -} - -// SampleVariance returns the variance of the slice of int64. -func SampleVariance(values []int64) float64 { - if 0 == len(values) { - return 0.0 - } - m := SampleMean(values) - var sum float64 - for _, v := range values { - d := float64(v) - m - sum += d * d - } - return sum / float64(len(values)) -} - -// A uniform sample using Vitter's Algorithm R. -// -// -type UniformSample struct { - count int64 - mutex sync.Mutex - reservoirSize int - values []int64 -} - -// NewUniformSample constructs a new uniform sample with the given reservoir -// size. -func NewUniformSample(reservoirSize int) Sample { - if UseNilMetrics { - return NilSample{} - } - return &UniformSample{ - reservoirSize: reservoirSize, - values: make([]int64, 0, reservoirSize), - } -} - -// Clear clears all samples. -func (s *UniformSample) Clear() { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count = 0 - s.values = make([]int64, 0, s.reservoirSize) -} - -// Count returns the number of samples recorded, which may exceed the -// reservoir size. -func (s *UniformSample) Count() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.count -} - -// Max returns the maximum value in the sample, which may not be the maximum -// value ever to be part of the sample. -func (s *UniformSample) Max() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleMax(s.values) -} - -// Mean returns the mean of the values in the sample. -func (s *UniformSample) Mean() float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleMean(s.values) -} - -// Min returns the minimum value in the sample, which may not be the minimum -// value ever to be part of the sample. -func (s *UniformSample) Min() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleMin(s.values) -} - -// Percentile returns an arbitrary percentile of values in the sample. -func (s *UniformSample) Percentile(p float64) float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SamplePercentile(s.values, p) -} - -// Percentiles returns a slice of arbitrary percentiles of values in the -// sample. -func (s *UniformSample) Percentiles(ps []float64) []float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SamplePercentiles(s.values, ps) -} - -// Size returns the size of the sample, which is at most the reservoir size. -func (s *UniformSample) Size() int { - s.mutex.Lock() - defer s.mutex.Unlock() - return len(s.values) -} - -// Snapshot returns a read-only copy of the sample. -func (s *UniformSample) Snapshot() Sample { - s.mutex.Lock() - defer s.mutex.Unlock() - values := make([]int64, len(s.values)) - copy(values, s.values) - return &SampleSnapshot{ - count: s.count, - values: values, - } -} - -// StdDev returns the standard deviation of the values in the sample. -func (s *UniformSample) StdDev() float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleStdDev(s.values) -} - -// Sum returns the sum of the values in the sample. -func (s *UniformSample) Sum() int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleSum(s.values) -} - -// Update samples a new value. -func (s *UniformSample) Update(v int64) { - s.mutex.Lock() - defer s.mutex.Unlock() - s.count++ - if len(s.values) < s.reservoirSize { - s.values = append(s.values, v) - } else { - s.values[rand.Intn(s.reservoirSize)] = v - } -} - -// Values returns a copy of the values in the sample. -func (s *UniformSample) Values() []int64 { - s.mutex.Lock() - defer s.mutex.Unlock() - values := make([]int64, len(s.values)) - copy(values, s.values) - return values -} - -// Variance returns the variance of the values in the sample. -func (s *UniformSample) Variance() float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - return SampleVariance(s.values) -} - -// expDecaySample represents an individual sample in a heap. -type expDecaySample struct { - k float64 - v int64 -} - -// expDecaySampleHeap is a min-heap of expDecaySamples. -type expDecaySampleHeap []expDecaySample - -func (q expDecaySampleHeap) Len() int { - return len(q) -} - -func (q expDecaySampleHeap) Less(i, j int) bool { - return q[i].k < q[j].k -} - -func (q *expDecaySampleHeap) Pop() interface{} { - q_ := *q - n := len(q_) - i := q_[n-1] - q_ = q_[0 : n-1] - *q = q_ - return i -} - -func (q *expDecaySampleHeap) Push(x interface{}) { - q_ := *q - n := len(q_) - q_ = q_[0 : n+1] - q_[n] = x.(expDecaySample) - *q = q_ -} - -func (q expDecaySampleHeap) Swap(i, j int) { - q[i], q[j] = q[j], q[i] -} - -type int64Slice []int64 - -func (p int64Slice) Len() int { return len(p) } -func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/syslog.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/syslog.go deleted file mode 100644 index 693f1908..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/syslog.go +++ /dev/null @@ -1,78 +0,0 @@ -// +build !windows - -package metrics - -import ( - "fmt" - "log/syslog" - "time" -) - -// Output each metric in the given registry to syslog periodically using -// the given syslogger. -func Syslog(r Registry, d time.Duration, w *syslog.Writer) { - for _ = range time.Tick(d) { - r.Each(func(name string, i interface{}) { - switch metric := i.(type) { - case Counter: - w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count())) - case Gauge: - w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value())) - case GaugeFloat64: - w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value())) - case Healthcheck: - metric.Check() - w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error())) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - w.Info(fmt.Sprintf( - "histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f", - name, - h.Count(), - h.Min(), - h.Max(), - h.Mean(), - h.StdDev(), - ps[0], - ps[1], - ps[2], - ps[3], - ps[4], - )) - case Meter: - m := metric.Snapshot() - w.Info(fmt.Sprintf( - "meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f", - name, - m.Count(), - m.Rate1(), - m.Rate5(), - m.Rate15(), - m.RateMean(), - )) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - w.Info(fmt.Sprintf( - "timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f", - name, - t.Count(), - t.Min(), - t.Max(), - t.Mean(), - t.StdDev(), - ps[0], - ps[1], - ps[2], - ps[3], - ps[4], - t.Rate1(), - t.Rate5(), - t.Rate15(), - t.RateMean(), - )) - } - }) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/timer.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/timer.go deleted file mode 100644 index 73f19b58..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/timer.go +++ /dev/null @@ -1,299 +0,0 @@ -package metrics - -import ( - "sync" - "time" -) - -// Timers capture the duration and rate of events. -type Timer interface { - Count() int64 - Max() int64 - Mean() float64 - Min() int64 - Percentile(float64) float64 - Percentiles([]float64) []float64 - Rate1() float64 - Rate5() float64 - Rate15() float64 - RateMean() float64 - Snapshot() Timer - StdDev() float64 - Time(func()) - Update(time.Duration) - UpdateSince(time.Time) - Variance() float64 -} - -// GetOrRegisterTimer returns an existing Timer or constructs and registers a -// new StandardTimer. -func GetOrRegisterTimer(name string, r Registry) Timer { - if nil == r { - r = DefaultRegistry - } - return r.GetOrRegister(name, NewTimer).(Timer) -} - -// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter. -func NewCustomTimer(h Histogram, m Meter) Timer { - if UseNilMetrics { - return NilTimer{} - } - return &StandardTimer{ - histogram: h, - meter: m, - } -} - -// NewRegisteredTimer constructs and registers a new StandardTimer. -func NewRegisteredTimer(name string, r Registry) Timer { - c := NewTimer() - if nil == r { - r = DefaultRegistry - } - r.Register(name, c) - return c -} - -// NewTimer constructs a new StandardTimer using an exponentially-decaying -// sample with the same reservoir size and alpha as UNIX load averages. -func NewTimer() Timer { - if UseNilMetrics { - return NilTimer{} - } - return &StandardTimer{ - histogram: NewHistogram(NewExpDecaySample(1028, 0.015)), - meter: NewMeter(), - } -} - -// NilTimer is a no-op Timer. -type NilTimer struct { - h Histogram - m Meter -} - -// Count is a no-op. -func (NilTimer) Count() int64 { return 0 } - -// Max is a no-op. -func (NilTimer) Max() int64 { return 0 } - -// Mean is a no-op. -func (NilTimer) Mean() float64 { return 0.0 } - -// Min is a no-op. -func (NilTimer) Min() int64 { return 0 } - -// Percentile is a no-op. -func (NilTimer) Percentile(p float64) float64 { return 0.0 } - -// Percentiles is a no-op. -func (NilTimer) Percentiles(ps []float64) []float64 { - return make([]float64, len(ps)) -} - -// Rate1 is a no-op. -func (NilTimer) Rate1() float64 { return 0.0 } - -// Rate5 is a no-op. -func (NilTimer) Rate5() float64 { return 0.0 } - -// Rate15 is a no-op. -func (NilTimer) Rate15() float64 { return 0.0 } - -// RateMean is a no-op. -func (NilTimer) RateMean() float64 { return 0.0 } - -// Snapshot is a no-op. -func (NilTimer) Snapshot() Timer { return NilTimer{} } - -// StdDev is a no-op. -func (NilTimer) StdDev() float64 { return 0.0 } - -// Time is a no-op. -func (NilTimer) Time(func()) {} - -// Update is a no-op. -func (NilTimer) Update(time.Duration) {} - -// UpdateSince is a no-op. -func (NilTimer) UpdateSince(time.Time) {} - -// Variance is a no-op. -func (NilTimer) Variance() float64 { return 0.0 } - -// StandardTimer is the standard implementation of a Timer and uses a Histogram -// and Meter. -type StandardTimer struct { - histogram Histogram - meter Meter - mutex sync.Mutex -} - -// Count returns the number of events recorded. -func (t *StandardTimer) Count() int64 { - return t.histogram.Count() -} - -// Max returns the maximum value in the sample. -func (t *StandardTimer) Max() int64 { - return t.histogram.Max() -} - -// Mean returns the mean of the values in the sample. -func (t *StandardTimer) Mean() float64 { - return t.histogram.Mean() -} - -// Min returns the minimum value in the sample. -func (t *StandardTimer) Min() int64 { - return t.histogram.Min() -} - -// Percentile returns an arbitrary percentile of the values in the sample. -func (t *StandardTimer) Percentile(p float64) float64 { - return t.histogram.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of the values in the -// sample. -func (t *StandardTimer) Percentiles(ps []float64) []float64 { - return t.histogram.Percentiles(ps) -} - -// Rate1 returns the one-minute moving average rate of events per second. -func (t *StandardTimer) Rate1() float64 { - return t.meter.Rate1() -} - -// Rate5 returns the five-minute moving average rate of events per second. -func (t *StandardTimer) Rate5() float64 { - return t.meter.Rate5() -} - -// Rate15 returns the fifteen-minute moving average rate of events per second. -func (t *StandardTimer) Rate15() float64 { - return t.meter.Rate15() -} - -// RateMean returns the meter's mean rate of events per second. -func (t *StandardTimer) RateMean() float64 { - return t.meter.RateMean() -} - -// Snapshot returns a read-only copy of the timer. -func (t *StandardTimer) Snapshot() Timer { - t.mutex.Lock() - defer t.mutex.Unlock() - return &TimerSnapshot{ - histogram: t.histogram.Snapshot().(*HistogramSnapshot), - meter: t.meter.Snapshot().(*MeterSnapshot), - } -} - -// StdDev returns the standard deviation of the values in the sample. -func (t *StandardTimer) StdDev() float64 { - return t.histogram.StdDev() -} - -// Record the duration of the execution of the given function. -func (t *StandardTimer) Time(f func()) { - ts := time.Now() - f() - t.Update(time.Since(ts)) -} - -// Record the duration of an event. -func (t *StandardTimer) Update(d time.Duration) { - t.mutex.Lock() - defer t.mutex.Unlock() - t.histogram.Update(int64(d)) - t.meter.Mark(1) -} - -// Record the duration of an event that started at a time and ends now. -func (t *StandardTimer) UpdateSince(ts time.Time) { - t.mutex.Lock() - defer t.mutex.Unlock() - t.histogram.Update(int64(time.Since(ts))) - t.meter.Mark(1) -} - -// Variance returns the variance of the values in the sample. -func (t *StandardTimer) Variance() float64 { - return t.histogram.Variance() -} - -// TimerSnapshot is a read-only copy of another Timer. -type TimerSnapshot struct { - histogram *HistogramSnapshot - meter *MeterSnapshot -} - -// Count returns the number of events recorded at the time the snapshot was -// taken. -func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() } - -// Max returns the maximum value at the time the snapshot was taken. -func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() } - -// Mean returns the mean value at the time the snapshot was taken. -func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() } - -// Min returns the minimum value at the time the snapshot was taken. -func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() } - -// Percentile returns an arbitrary percentile of sampled values at the time the -// snapshot was taken. -func (t *TimerSnapshot) Percentile(p float64) float64 { - return t.histogram.Percentile(p) -} - -// Percentiles returns a slice of arbitrary percentiles of sampled values at -// the time the snapshot was taken. -func (t *TimerSnapshot) Percentiles(ps []float64) []float64 { - return t.histogram.Percentiles(ps) -} - -// Rate1 returns the one-minute moving average rate of events per second at the -// time the snapshot was taken. -func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() } - -// Rate5 returns the five-minute moving average rate of events per second at -// the time the snapshot was taken. -func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() } - -// Rate15 returns the fifteen-minute moving average rate of events per second -// at the time the snapshot was taken. -func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() } - -// RateMean returns the meter's mean rate of events per second at the time the -// snapshot was taken. -func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() } - -// Snapshot returns the snapshot. -func (t *TimerSnapshot) Snapshot() Timer { return t } - -// StdDev returns the standard deviation of the values at the time the snapshot -// was taken. -func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() } - -// Time panics. -func (*TimerSnapshot) Time(func()) { - panic("Time called on a TimerSnapshot") -} - -// Update panics. -func (*TimerSnapshot) Update(time.Duration) { - panic("Update called on a TimerSnapshot") -} - -// UpdateSince panics. -func (*TimerSnapshot) UpdateSince(time.Time) { - panic("UpdateSince called on a TimerSnapshot") -} - -// Variance returns the variance of the values at the time the snapshot was -// taken. -func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() } diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/writer.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/writer.go deleted file mode 100644 index 091e971d..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/go-metrics/writer.go +++ /dev/null @@ -1,100 +0,0 @@ -package metrics - -import ( - "fmt" - "io" - "sort" - "time" -) - -// Write sorts writes each metric in the given registry periodically to the -// given io.Writer. -func Write(r Registry, d time.Duration, w io.Writer) { - for _ = range time.Tick(d) { - WriteOnce(r, w) - } -} - -// WriteOnce sorts and writes metrics in the given registry to the given -// io.Writer. -func WriteOnce(r Registry, w io.Writer) { - var namedMetrics namedMetricSlice - r.Each(func(name string, i interface{}) { - namedMetrics = append(namedMetrics, namedMetric{name, i}) - }) - - sort.Sort(namedMetrics) - for _, namedMetric := range namedMetrics { - switch metric := namedMetric.m.(type) { - case Counter: - fmt.Fprintf(w, "counter %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", metric.Count()) - case Gauge: - fmt.Fprintf(w, "gauge %s\n", namedMetric.name) - fmt.Fprintf(w, " value: %9d\n", metric.Value()) - case GaugeFloat64: - fmt.Fprintf(w, "gauge %s\n", namedMetric.name) - fmt.Fprintf(w, " value: %f\n", metric.Value()) - case Healthcheck: - metric.Check() - fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name) - fmt.Fprintf(w, " error: %v\n", metric.Error()) - case Histogram: - h := metric.Snapshot() - ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "histogram %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", h.Count()) - fmt.Fprintf(w, " min: %9d\n", h.Min()) - fmt.Fprintf(w, " max: %9d\n", h.Max()) - fmt.Fprintf(w, " mean: %12.2f\n", h.Mean()) - fmt.Fprintf(w, " stddev: %12.2f\n", h.StdDev()) - fmt.Fprintf(w, " median: %12.2f\n", ps[0]) - fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) - fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) - fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) - fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) - case Meter: - m := metric.Snapshot() - fmt.Fprintf(w, "meter %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", m.Count()) - fmt.Fprintf(w, " 1-min rate: %12.2f\n", m.Rate1()) - fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5()) - fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15()) - fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean()) - case Timer: - t := metric.Snapshot() - ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) - fmt.Fprintf(w, "timer %s\n", namedMetric.name) - fmt.Fprintf(w, " count: %9d\n", t.Count()) - fmt.Fprintf(w, " min: %9d\n", t.Min()) - fmt.Fprintf(w, " max: %9d\n", t.Max()) - fmt.Fprintf(w, " mean: %12.2f\n", t.Mean()) - fmt.Fprintf(w, " stddev: %12.2f\n", t.StdDev()) - fmt.Fprintf(w, " median: %12.2f\n", ps[0]) - fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) - fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) - fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) - fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) - fmt.Fprintf(w, " 1-min rate: %12.2f\n", t.Rate1()) - fmt.Fprintf(w, " 5-min rate: %12.2f\n", t.Rate5()) - fmt.Fprintf(w, " 15-min rate: %12.2f\n", t.Rate15()) - fmt.Fprintf(w, " mean rate: %12.2f\n", t.RateMean()) - } - } -} - -type namedMetric struct { - name string - m interface{} -} - -// namedMetricSlice is a slice of namedMetrics that implements sort.Interface. -type namedMetricSlice []namedMetric - -func (nms namedMetricSlice) Len() int { return len(nms) } - -func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] } - -func (nms namedMetricSlice) Less(i, j int) bool { - return nms[i].name < nms[j].name -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/.gitignore b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/.gitignore deleted file mode 100644 index ca502e29..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.nut -*.swp -examples/example1 -examples/example_web diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/.travis.yml deleted file mode 100644 index 4f2ee4d9..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/LICENSE deleted file mode 100644 index 01a9a5c4..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2013 Yuriy Vasiyarov. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/README.md b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/README.md deleted file mode 100644 index 61068a82..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/README.md +++ /dev/null @@ -1,119 +0,0 @@ -# GoRelic - -New Relic agent for Go runtime. It collect a lot of metrics about scheduler, garbage collector and memory allocator and -send them to NewRelic. - -### Requirements -- Go 1.1 or higher -- github.com/yvasiyarov/gorelic -- github.com/yvasiyarov/newrelic_platform_go -- github.com/yvasiyarov/go-metrics - -You have to install manually only first two dependencies. All other dependencies will be installed automatically -by Go toolchain. - -### Installation -```bash -go get github.com/yvasiyarov/gorelic -``` -and add to the initialization part of your application following code: -```go -import ( - "github.com/yvasiyarov/gorelic" -) -.... - -agent := gorelic.NewAgent() -agent.Verbose = true -agent.NewrelicLicense = "YOUR NEWRELIC LICENSE KEY THERE" -agent.Run() - -``` - -### Middleware -If you using Beego, Martini, Revel or Gin framework you can hook up gorelic with your application by using the following middleware: -- https://github.com/yvasiyarov/beego_gorelic -- https://github.com/yvasiyarov/martini_gorelic -- https://github.com/yvasiyarov/gocraft_gorelic -- http://wiki.colar.net/revel_newelic -- https://github.com/jingweno/negroni-gorelic -- https://github.com/brandfolder/gin-gorelic - - -### Configuration -- NewrelicLicense - its the only mandatory setting of this agent. -- NewrelicName - component name in NewRelic dashboard. Default value: "Go daemon" -- NewrelicPollInterval - how often metrics will be sent to NewRelic. Default value: 60 seconds -- Verbose - print some usefull for debugging information. Default value: false -- CollectGcStat - should agent collect garbage collector statistic or not. Default value: true -- CollectHTTPStat - should agent collect HTTP metrics. Default value: false -- CollectMemoryStat - should agent collect memory allocator statistic or not. Default value: true -- GCPollInterval - how often should GC statistic collected. Default value: 10 seconds. It has performance impact. For more information, please, see metrics documentation. -- MemoryAllocatorPollInterval - how often should memory allocator statistic collected. Default value: 60 seconds. It has performance impact. For more information, please, read metrics documentation. - - -## Metrics reported by plugin -This agent use functions exposed by runtime or runtime/debug packages to collect most important information about Go runtime. - -### General metrics -- Runtime/General/NOGoroutines - number of runned go routines, as it reported by NumGoroutine() from runtime package -- Runtime/General/NOCgoCalls - number of runned cgo calls, as it reported by NumCgoCall() from runtime package - -### Garbage collector metrics -- Runtime/GC/NumberOfGCCalls - Nuber of GC calls, as it reported by ReadGCStats() from runtime/debug -- Runtime/GC/PauseTotalTime - Total pause time diring GC calls, as it reported by ReadGCStats() from runtime/debug (in nanoseconds) -- Runtime/GC/GCTime/Max - max GC time -- Runtime/GC/GCTime/Min - min GC time -- Runtime/GC/GCTime/Mean - GC mean time -- Runtime/GC/GCTime/Percentile95 - 95% percentile of GC time - -All this metrics are measured in nanoseconds. Last 4 of them can be inaccurate if GC called more often then once in GCPollInterval. -If in your workload GC is called more often - you can consider decreasing value of GCPollInterval. -But be carefull, ReadGCStats() blocks mheap, so its not good idea to set GCPollInterval to very low values. - -### Memory allocator -- Component/Runtime/Memory/SysMem/Total - number of bytes/minute allocated from OS totally. -- Component/Runtime/Memory/SysMem/Stack - number of bytes/minute allocated from OS for stacks. -- Component/Runtime/Memory/SysMem/MSpan - number of bytes/minute allocated from OS for internal MSpan structs. -- Component/Runtime/Memory/SysMem/MCache - number of bytes/minute allocated from OS for internal MCache structs. -- Component/Runtime/Memory/SysMem/Heap - number of bytes/minute allocated from OS for heap. -- Component/Runtime/Memory/SysMem/BuckHash - number of bytes/minute allocated from OS for internal BuckHash structs. -- Component/Runtime/Memory/Operations/NoFrees - number of memory frees per minute -- Component/Runtime/Memory/Operations/NoMallocs - number of memory allocations per minute -- Component/Runtime/Memory/Operations/NoPointerLookups - number of pointer lookups per minute -- Component/Runtime/Memory/InUse/Total - total amount of memory in use -- Component/Runtime/Memory/InUse/Heap - amount of memory in use for heap -- Component/Runtime/Memory/InUse/MCacheInuse - amount of memory in use for MCache internal structures -- Component/Runtime/Memory/InUse/MSpanInuse - amount of memory in use for MSpan internal structures -- Component/Runtime/Memory/InUse/Stack - amount of memory in use for stacks - -### Process metrics -- Component/Runtime/System/Threads - number of OS threads used -- Runtime/System/FDSize - number of file descriptors, used by process -- Runtime/System/Memory/VmPeakSize - VM max size -- Runtime/System/Memory/VmCurrent - VM current size -- Runtime/System/Memory/RssPeak - max size of resident memory set -- Runtime/System/Memory/RssCurrent - current size of resident memory set - -All this metrics collected once in MemoryAllocatorPollInterval. In order to collect this statistic agent use ReadMemStats() routine. -This routine calls stoptheworld() internally and it block everything. So, please, consider this when you change MemoryAllocatorPollInterval value. - -### HTTP metrics -- throughput (requests per second), calculated for last minute -- mean throughput (requests per second) -- mean response time -- min response time -- max response time -- 75%, 90%, 95% percentiles for response time - - -In order to collect HTTP metrics, handler functions must be wrapped using WrapHTTPHandlerFunc: - -```go -http.HandleFunc("/", agent.WrapHTTPHandlerFunc(handler)) -``` - -## TODO -- Collect per-size allocation statistic -- Collect user defined metrics - diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/agent.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/agent.go deleted file mode 100644 index 660623d6..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/agent.go +++ /dev/null @@ -1,137 +0,0 @@ -package gorelic - -import ( - "errors" - "fmt" - metrics "github.com/yvasiyarov/go-metrics" - "github.com/yvasiyarov/newrelic_platform_go" - "log" - "net/http" -) - -const ( - // DefaultNewRelicPollInterval - how often we will report metrics to NewRelic. - // Recommended values is 60 seconds - DefaultNewRelicPollInterval = 60 - - // DefaultGcPollIntervalInSeconds - how often we will get garbage collector run statistic - // Default value is - every 10 seconds - // During GC stat pooling - mheap will be locked, so be carefull changing this value - DefaultGcPollIntervalInSeconds = 10 - - // DefaultMemoryAllocatorPollIntervalInSeconds - how often we will get memory allocator statistic. - // Default value is - every 60 seconds - // During this process stoptheword() is called, so be carefull changing this value - DefaultMemoryAllocatorPollIntervalInSeconds = 60 - - //DefaultAgentGuid is plugin ID in NewRelic. - //You should not change it unless you want to create your own plugin. - DefaultAgentGuid = "com.github.yvasiyarov.GoRelic" - - //CurrentAgentVersion is plugin version - CurrentAgentVersion = "0.0.6" - - //DefaultAgentName in NewRelic GUI. You can change it. - DefaultAgentName = "Go daemon" -) - -//Agent - is NewRelic agent implementation. -//Agent start separate go routine which will report data to NewRelic -type Agent struct { - NewrelicName string - NewrelicLicense string - NewrelicPollInterval int - Verbose bool - CollectGcStat bool - CollectMemoryStat bool - CollectHTTPStat bool - GCPollInterval int - MemoryAllocatorPollInterval int - AgentGUID string - AgentVersion string - plugin *newrelic_platform_go.NewrelicPlugin - HTTPTimer metrics.Timer -} - -//NewAgent build new Agent objects. -func NewAgent() *Agent { - agent := &Agent{ - NewrelicName: DefaultAgentName, - NewrelicPollInterval: DefaultNewRelicPollInterval, - Verbose: false, - CollectGcStat: true, - CollectMemoryStat: true, - GCPollInterval: DefaultGcPollIntervalInSeconds, - MemoryAllocatorPollInterval: DefaultMemoryAllocatorPollIntervalInSeconds, - AgentGUID: DefaultAgentGuid, - AgentVersion: CurrentAgentVersion, - } - return agent -} - -//WrapHTTPHandlerFunc instrument HTTP handler functions to collect HTTP metrics -func (agent *Agent) WrapHTTPHandlerFunc(h tHTTPHandlerFunc) tHTTPHandlerFunc { - agent.initTimer() - return func(w http.ResponseWriter, req *http.Request) { - proxy := newHTTPHandlerFunc(h) - proxy.timer = agent.HTTPTimer - proxy.ServeHTTP(w, req) - } -} - -//WrapHTTPHandler instrument HTTP handler object to collect HTTP metrics -func (agent *Agent) WrapHTTPHandler(h http.Handler) http.Handler { - agent.initTimer() - - proxy := newHTTPHandler(h) - proxy.timer = agent.HTTPTimer - return proxy -} - -//Run initialize Agent instance and start harvest go routine -func (agent *Agent) Run() error { - if agent.NewrelicLicense == "" { - return errors.New("please, pass a valid newrelic license key") - } - - agent.plugin = newrelic_platform_go.NewNewrelicPlugin(agent.AgentVersion, agent.NewrelicLicense, agent.NewrelicPollInterval) - component := newrelic_platform_go.NewPluginComponent(agent.NewrelicName, agent.AgentGUID) - agent.plugin.AddComponent(component) - - addRuntimeMericsToComponent(component) - - if agent.CollectGcStat { - addGCMericsToComponent(component, agent.GCPollInterval) - agent.debug(fmt.Sprintf("Init GC metrics collection. Poll interval %d seconds.", agent.GCPollInterval)) - } - if agent.CollectMemoryStat { - addMemoryMericsToComponent(component, agent.MemoryAllocatorPollInterval) - agent.debug(fmt.Sprintf("Init memory allocator metrics collection. Poll interval %d seconds.", agent.MemoryAllocatorPollInterval)) - } - - if agent.CollectHTTPStat { - agent.initTimer() - addHTTPMericsToComponent(component, agent.HTTPTimer) - agent.debug(fmt.Sprintf("Init HTTP metrics collection.")) - } - - agent.plugin.Verbose = agent.Verbose - go agent.plugin.Run() - return nil -} - -//Initialize global metrics.Timer object, used to collect HTTP metrics -func (agent *Agent) initTimer() { - if agent.HTTPTimer == nil { - agent.HTTPTimer = metrics.NewTimer() - } - - agent.CollectHTTPStat = true -} - -//Print debug messages -func (agent *Agent) debug(msg string) { - if agent.Verbose { - log.Println(msg) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/doc.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/doc.go deleted file mode 100644 index 69de9fee..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package gorelic is an New Relic agent implementation for Go runtime. It collect a lot of metrics about Go scheduler, garbage collector and memory allocator and send them to NewRelic. -package gorelic diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/gc_metrics.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/gc_metrics.go deleted file mode 100644 index 39405940..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/gc_metrics.go +++ /dev/null @@ -1,65 +0,0 @@ -package gorelic - -import ( - metrics "github.com/yvasiyarov/go-metrics" - "github.com/yvasiyarov/newrelic_platform_go" - "time" -) - -func newGCMetricaDataSource(pollInterval int) goMetricaDataSource { - r := metrics.NewRegistry() - - metrics.RegisterDebugGCStats(r) - go metrics.CaptureDebugGCStats(r, time.Duration(pollInterval)*time.Second) - return goMetricaDataSource{r} -} - -func addGCMericsToComponent(component newrelic_platform_go.IComponent, pollInterval int) { - metrics := []*baseGoMetrica{ - &baseGoMetrica{ - name: "NumberOfGCCalls", - units: "calls", - dataSourceKey: "debug.GCStats.NumGC", - }, - &baseGoMetrica{ - name: "PauseTotalTime", - units: "nanoseconds", - dataSourceKey: "debug.GCStats.PauseTotal", - }, - } - - ds := newGCMetricaDataSource(pollInterval) - for _, m := range metrics { - m.basePath = "Runtime/GC/" - m.dataSource = ds - component.AddMetrica(&gaugeMetrica{m}) - } - - histogramMetrics := []*histogramMetrica{ - &histogramMetrica{ - statFunction: histogramMax, - baseGoMetrica: &baseGoMetrica{name: "Max"}, - }, - &histogramMetrica{ - statFunction: histogramMin, - baseGoMetrica: &baseGoMetrica{name: "Min"}, - }, - &histogramMetrica{ - statFunction: histogramMean, - baseGoMetrica: &baseGoMetrica{name: "Mean"}, - }, - &histogramMetrica{ - statFunction: histogramPercentile, - percentileValue: 0.95, - baseGoMetrica: &baseGoMetrica{name: "Percentile95"}, - }, - } - for _, m := range histogramMetrics { - m.baseGoMetrica.units = "nanoseconds" - m.baseGoMetrica.dataSourceKey = "debug.GCStats.Pause" - m.baseGoMetrica.basePath = "Runtime/GC/GCTime/" - m.baseGoMetrica.dataSource = ds - - component.AddMetrica(m) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/gometrica.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/gometrica.go deleted file mode 100644 index 52fcdd57..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/gometrica.go +++ /dev/null @@ -1,105 +0,0 @@ -package gorelic - -import ( - "fmt" - metrics "github.com/yvasiyarov/go-metrics" -) - -const ( - histogramMin = iota - histogramMax - histogramMean - histogramPercentile - histogramStdDev - histogramVariance - noHistogramFunctions -) - -type goMetricaDataSource struct { - metrics.Registry -} - -func (ds goMetricaDataSource) GetGaugeValue(key string) (float64, error) { - if valueContainer := ds.Get(key); valueContainer == nil { - return 0, fmt.Errorf("metrica with name %s is not registered\n", key) - } else if gauge, ok := valueContainer.(metrics.Gauge); ok { - return float64(gauge.Value()), nil - } else { - return 0, fmt.Errorf("metrica container has unexpected type: %T\n", valueContainer) - } -} - -func (ds goMetricaDataSource) GetHistogramValue(key string, statFunction int, percentile float64) (float64, error) { - if valueContainer := ds.Get(key); valueContainer == nil { - return 0, fmt.Errorf("metrica with name %s is not registered\n", key) - } else if histogram, ok := valueContainer.(metrics.Histogram); ok { - switch statFunction { - default: - return 0, fmt.Errorf("unsupported stat function for histogram: %s\n", statFunction) - case histogramMax: - return float64(histogram.Max()), nil - case histogramMin: - return float64(histogram.Min()), nil - case histogramMean: - return float64(histogram.Mean()), nil - case histogramStdDev: - return float64(histogram.StdDev()), nil - case histogramVariance: - return float64(histogram.Variance()), nil - case histogramPercentile: - return float64(histogram.Percentile(percentile)), nil - } - } else { - return 0, fmt.Errorf("metrica container has unexpected type: %T\n", valueContainer) - } -} - -type baseGoMetrica struct { - dataSource goMetricaDataSource - basePath string - name string - units string - dataSourceKey string -} - -func (metrica *baseGoMetrica) GetName() string { - return metrica.basePath + metrica.name -} - -func (metrica *baseGoMetrica) GetUnits() string { - return metrica.units -} - -type gaugeMetrica struct { - *baseGoMetrica -} - -func (metrica *gaugeMetrica) GetValue() (float64, error) { - return metrica.dataSource.GetGaugeValue(metrica.dataSourceKey) -} - -type gaugeIncMetrica struct { - *baseGoMetrica - previousValue float64 -} - -func (metrica *gaugeIncMetrica) GetValue() (float64, error) { - var value float64 - var currentValue float64 - var err error - if currentValue, err = metrica.dataSource.GetGaugeValue(metrica.dataSourceKey); err == nil { - value = currentValue - metrica.previousValue - metrica.previousValue = currentValue - } - return value, err -} - -type histogramMetrica struct { - *baseGoMetrica - statFunction int - percentileValue float64 -} - -func (metrica *histogramMetrica) GetValue() (float64, error) { - return metrica.dataSource.GetHistogramValue(metrica.dataSourceKey, metrica.statFunction, metrica.percentileValue) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/http_metrics.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/http_metrics.go deleted file mode 100644 index e54cbd37..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/http_metrics.go +++ /dev/null @@ -1,194 +0,0 @@ -package gorelic - -import ( - metrics "github.com/yvasiyarov/go-metrics" - "github.com/yvasiyarov/newrelic_platform_go" - "net/http" - "time" -) - -type tHTTPHandlerFunc func(http.ResponseWriter, *http.Request) -type tHTTPHandler struct { - originalHandler http.Handler - originalHandlerFunc tHTTPHandlerFunc - isFunc bool - timer metrics.Timer -} - -var httpTimer metrics.Timer - -func newHTTPHandlerFunc(h tHTTPHandlerFunc) *tHTTPHandler { - return &tHTTPHandler{ - isFunc: true, - originalHandlerFunc: h, - } -} -func newHTTPHandler(h http.Handler) *tHTTPHandler { - return &tHTTPHandler{ - isFunc: false, - originalHandler: h, - } -} - -func (handler *tHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - startTime := time.Now() - defer handler.timer.UpdateSince(startTime) - - if handler.isFunc { - handler.originalHandlerFunc(w, req) - } else { - handler.originalHandler.ServeHTTP(w, req) - } -} - -type baseTimerMetrica struct { - dataSource metrics.Timer - name string - units string -} - -func (metrica *baseTimerMetrica) GetName() string { - return metrica.name -} - -func (metrica *baseTimerMetrica) GetUnits() string { - return metrica.units -} - -type timerRate1Metrica struct { - *baseTimerMetrica -} - -func (metrica *timerRate1Metrica) GetValue() (float64, error) { - return metrica.dataSource.Rate1(), nil -} - -type timerRateMeanMetrica struct { - *baseTimerMetrica -} - -func (metrica *timerRateMeanMetrica) GetValue() (float64, error) { - return metrica.dataSource.RateMean(), nil -} - -type timerMeanMetrica struct { - *baseTimerMetrica -} - -func (metrica *timerMeanMetrica) GetValue() (float64, error) { - return metrica.dataSource.Mean() / float64(time.Millisecond), nil -} - -type timerMinMetrica struct { - *baseTimerMetrica -} - -func (metrica *timerMinMetrica) GetValue() (float64, error) { - return float64(metrica.dataSource.Min()) / float64(time.Millisecond), nil -} - -type timerMaxMetrica struct { - *baseTimerMetrica -} - -func (metrica *timerMaxMetrica) GetValue() (float64, error) { - return float64(metrica.dataSource.Max()) / float64(time.Millisecond), nil -} - -type timerPercentile75Metrica struct { - *baseTimerMetrica -} - -func (metrica *timerPercentile75Metrica) GetValue() (float64, error) { - return metrica.dataSource.Percentile(0.75) / float64(time.Millisecond), nil -} - -type timerPercentile90Metrica struct { - *baseTimerMetrica -} - -func (metrica *timerPercentile90Metrica) GetValue() (float64, error) { - return metrica.dataSource.Percentile(0.90) / float64(time.Millisecond), nil -} - -type timerPercentile95Metrica struct { - *baseTimerMetrica -} - -func (metrica *timerPercentile95Metrica) GetValue() (float64, error) { - return metrica.dataSource.Percentile(0.95) / float64(time.Millisecond), nil -} - -func addHTTPMericsToComponent(component newrelic_platform_go.IComponent, timer metrics.Timer) { - rate1 := &timerRate1Metrica{ - baseTimerMetrica: &baseTimerMetrica{ - name: "http/throughput/1minute", - units: "rps", - dataSource: timer, - }, - } - component.AddMetrica(rate1) - - rateMean := &timerRateMeanMetrica{ - baseTimerMetrica: &baseTimerMetrica{ - name: "http/throughput/rateMean", - units: "rps", - dataSource: timer, - }, - } - component.AddMetrica(rateMean) - - responseTimeMean := &timerMeanMetrica{ - baseTimerMetrica: &baseTimerMetrica{ - name: "http/responseTime/mean", - units: "ms", - dataSource: timer, - }, - } - component.AddMetrica(responseTimeMean) - - responseTimeMax := &timerMaxMetrica{ - baseTimerMetrica: &baseTimerMetrica{ - name: "http/responseTime/max", - units: "ms", - dataSource: timer, - }, - } - component.AddMetrica(responseTimeMax) - - responseTimeMin := &timerMinMetrica{ - baseTimerMetrica: &baseTimerMetrica{ - name: "http/responseTime/min", - units: "ms", - dataSource: timer, - }, - } - component.AddMetrica(responseTimeMin) - - responseTimePercentile75 := &timerPercentile75Metrica{ - baseTimerMetrica: &baseTimerMetrica{ - name: "http/responseTime/percentile75", - units: "ms", - dataSource: timer, - }, - } - component.AddMetrica(responseTimePercentile75) - - responseTimePercentile90 := &timerPercentile90Metrica{ - baseTimerMetrica: &baseTimerMetrica{ - name: "http/responseTime/percentile90", - units: "ms", - dataSource: timer, - }, - } - component.AddMetrica(responseTimePercentile90) - - responseTimePercentile95 := &timerPercentile95Metrica{ - baseTimerMetrica: &baseTimerMetrica{ - name: "http/responseTime/percentile95", - units: "ms", - dataSource: timer, - }, - } - component.AddMetrica(responseTimePercentile95) -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/memory_metrics.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/memory_metrics.go deleted file mode 100644 index 5c8d3e4e..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/memory_metrics.go +++ /dev/null @@ -1,110 +0,0 @@ -package gorelic - -import ( - metrics "github.com/yvasiyarov/go-metrics" - "github.com/yvasiyarov/newrelic_platform_go" - "time" -) - -func newMemoryMetricaDataSource(pollInterval int) goMetricaDataSource { - r := metrics.NewRegistry() - - metrics.RegisterRuntimeMemStats(r) - metrics.CaptureRuntimeMemStatsOnce(r) - go metrics.CaptureRuntimeMemStats(r, time.Duration(pollInterval)*time.Second) - return goMetricaDataSource{r} -} - -func addMemoryMericsToComponent(component newrelic_platform_go.IComponent, pollInterval int) { - gaugeMetrics := []*baseGoMetrica{ - //Memory in use metrics - &baseGoMetrica{ - name: "InUse/Total", - units: "bytes", - dataSourceKey: "runtime.MemStats.Alloc", - }, - &baseGoMetrica{ - name: "InUse/Heap", - units: "bytes", - dataSourceKey: "runtime.MemStats.HeapAlloc", - }, - &baseGoMetrica{ - name: "InUse/Stack", - units: "bytes", - dataSourceKey: "runtime.MemStats.StackInuse", - }, - &baseGoMetrica{ - name: "InUse/MSpanInuse", - units: "bytes", - dataSourceKey: "runtime.MemStats.MSpanInuse", - }, - &baseGoMetrica{ - name: "InUse/MCacheInuse", - units: "bytes", - dataSourceKey: "runtime.MemStats.MCacheInuse", - }, - } - ds := newMemoryMetricaDataSource(pollInterval) - for _, m := range gaugeMetrics { - m.basePath = "Runtime/Memory/" - m.dataSource = ds - component.AddMetrica(&gaugeMetrica{m}) - } - - gaugeIncMetrics := []*baseGoMetrica{ - //NO operations graph - &baseGoMetrica{ - name: "Operations/NoPointerLookups", - units: "lookups", - dataSourceKey: "runtime.MemStats.Lookups", - }, - &baseGoMetrica{ - name: "Operations/NoMallocs", - units: "mallocs", - dataSourceKey: "runtime.MemStats.Mallocs", - }, - &baseGoMetrica{ - name: "Operations/NoFrees", - units: "frees", - dataSourceKey: "runtime.MemStats.Frees", - }, - - // Sytem memory allocations - &baseGoMetrica{ - name: "SysMem/Total", - units: "bytes", - dataSourceKey: "runtime.MemStats.Sys", - }, - &baseGoMetrica{ - name: "SysMem/Heap", - units: "bytes", - dataSourceKey: "runtime.MemStats.HeapSys", - }, - &baseGoMetrica{ - name: "SysMem/Stack", - units: "bytes", - dataSourceKey: "runtime.MemStats.StackSys", - }, - &baseGoMetrica{ - name: "SysMem/MSpan", - units: "bytes", - dataSourceKey: "runtime.MemStats.MSpanSys", - }, - &baseGoMetrica{ - name: "SysMem/MCache", - units: "bytes", - dataSourceKey: "runtime.MemStats.MCacheSys", - }, - &baseGoMetrica{ - name: "SysMem/BuckHash", - units: "bytes", - dataSourceKey: "runtime.MemStats.BuckHashSys", - }, - } - - for _, m := range gaugeIncMetrics { - m.basePath = "Runtime/Memory/" - m.dataSource = ds - component.AddMetrica(&gaugeIncMetrica{baseGoMetrica: m}) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/nut.json b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/nut.json deleted file mode 100644 index 7abb8ec6..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/nut.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "Version": "0.0.6", - "Vendor": "yvasiyarov", - "Authors": [ - { - "FullName": "Yuriy Vasiyarov", - "Email": "varyous@gmail.com" - } - ], - "ExtraFiles": [ - "README.md", - "LICENSE" - ], - "Homepage": "https://github.com/yvasiyarov/gorelic" -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/runtime_metrics.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/runtime_metrics.go deleted file mode 100644 index 87a42ca6..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/gorelic/runtime_metrics.go +++ /dev/null @@ -1,196 +0,0 @@ -package gorelic - -import ( - "fmt" - "github.com/yvasiyarov/newrelic_platform_go" - "io/ioutil" - "os" - "runtime" - "strconv" - "strings" - "time" -) - -const linuxSystemQueryInterval = 60 - -// Number of goroutines metrica -type noGoroutinesMetrica struct{} - -func (metrica *noGoroutinesMetrica) GetName() string { - return "Runtime/General/NOGoroutines" -} -func (metrica *noGoroutinesMetrica) GetUnits() string { - return "goroutines" -} -func (metrica *noGoroutinesMetrica) GetValue() (float64, error) { - return float64(runtime.NumGoroutine()), nil -} - -// Number of CGO calls metrica -type noCgoCallsMetrica struct { - lastValue int64 -} - -func (metrica *noCgoCallsMetrica) GetName() string { - return "Runtime/General/NOCgoCalls" -} -func (metrica *noCgoCallsMetrica) GetUnits() string { - return "calls" -} -func (metrica *noCgoCallsMetrica) GetValue() (float64, error) { - currentValue := runtime.NumCgoCall() - value := float64(currentValue - metrica.lastValue) - metrica.lastValue = currentValue - - return value, nil -} - -//OS specific metrics data source interface -type iSystemMetricaDataSource interface { - GetValue(key string) (float64, error) -} - -// iSystemMetricaDataSource fabrica -func newSystemMetricaDataSource() iSystemMetricaDataSource { - var ds iSystemMetricaDataSource - switch runtime.GOOS { - default: - ds = &systemMetricaDataSource{} - case "linux": - ds = &linuxSystemMetricaDataSource{ - systemData: make(map[string]string), - } - } - return ds -} - -//Default implementation of iSystemMetricaDataSource. Just return an error -type systemMetricaDataSource struct{} - -func (ds *systemMetricaDataSource) GetValue(key string) (float64, error) { - return 0, fmt.Errorf("this metrica was not implemented yet for %s", runtime.GOOS) -} - -// Linux OS implementation of ISystemMetricaDataSource -type linuxSystemMetricaDataSource struct { - lastUpdate time.Time - systemData map[string]string -} - -func (ds *linuxSystemMetricaDataSource) GetValue(key string) (float64, error) { - if err := ds.checkAndUpdateData(); err != nil { - return 0, err - } else if val, ok := ds.systemData[key]; !ok { - return 0, fmt.Errorf("system data with key %s was not found", key) - } else if key == "VmSize" || key == "VmPeak" || key == "VmHWM" || key == "VmRSS" { - valueParts := strings.Split(val, " ") - if len(valueParts) != 2 { - return 0, fmt.Errorf("invalid format for value %s", key) - } - valConverted, err := strconv.ParseFloat(valueParts[0], 64) - if err != nil { - return 0, err - } - switch valueParts[1] { - case "kB": - valConverted *= 1 << 10 - case "mB": - valConverted *= 1 << 20 - case "gB": - valConverted *= 1 << 30 - } - return valConverted, nil - } else if valConverted, err := strconv.ParseFloat(val, 64); err != nil { - return valConverted, nil - } else { - return valConverted, nil - } -} -func (ds *linuxSystemMetricaDataSource) checkAndUpdateData() error { - startTime := time.Now() - if startTime.Sub(ds.lastUpdate) > time.Second*linuxSystemQueryInterval { - path := fmt.Sprintf("/proc/%d/status", os.Getpid()) - rawStats, err := ioutil.ReadFile(path) - if err != nil { - return err - } - - lines := strings.Split(string(rawStats), "\n") - for _, line := range lines { - parts := strings.Split(line, ":") - if len(parts) == 2 { - k := strings.TrimSpace(parts[0]) - v := strings.TrimSpace(parts[1]) - - ds.systemData[k] = v - } - } - ds.lastUpdate = startTime - } - return nil -} - -// OS specific metrica -type systemMetrica struct { - sourceKey string - newrelicName string - units string - dataSource iSystemMetricaDataSource -} - -func (metrica *systemMetrica) GetName() string { - return metrica.newrelicName -} -func (metrica *systemMetrica) GetUnits() string { - return metrica.units -} -func (metrica *systemMetrica) GetValue() (float64, error) { - return metrica.dataSource.GetValue(metrica.sourceKey) -} - -func addRuntimeMericsToComponent(component newrelic_platform_go.IComponent) { - component.AddMetrica(&noGoroutinesMetrica{}) - component.AddMetrica(&noCgoCallsMetrica{}) - - ds := newSystemMetricaDataSource() - metrics := []*systemMetrica{ - &systemMetrica{ - sourceKey: "Threads", - units: "Threads", - newrelicName: "Runtime/System/Threads", - }, - &systemMetrica{ - sourceKey: "FDSize", - units: "fd", - newrelicName: "Runtime/System/FDSize", - }, - // Peak virtual memory size - &systemMetrica{ - sourceKey: "VmPeak", - units: "bytes", - newrelicName: "Runtime/System/Memory/VmPeakSize", - }, - //Virtual memory size - &systemMetrica{ - sourceKey: "VmSize", - units: "bytes", - newrelicName: "Runtime/System/Memory/VmCurrent", - }, - //Peak resident set size - &systemMetrica{ - sourceKey: "VmHWM", - units: "bytes", - newrelicName: "Runtime/System/Memory/RssPeak", - }, - //Resident set size - &systemMetrica{ - sourceKey: "VmRSS", - units: "bytes", - newrelicName: "Runtime/System/Memory/RssCurrent", - }, - } - for _, m := range metrics { - m.dataSource = ds - component.AddMetrica(m) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/.travis.yml b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/.travis.yml deleted file mode 100644 index 4f2ee4d9..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/LICENSE deleted file mode 100644 index 01a9a5c4..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2013 Yuriy Vasiyarov. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/README.md b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/README.md deleted file mode 100644 index 34462344..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/README.md +++ /dev/null @@ -1,11 +0,0 @@ -New Relic Platform Agent SDK for Go(golang) -==================== - -[![Build Status](https://travis-ci.org/yvasiyarov/newrelic_platform_go.png?branch=master)](https://travis-ci.org/yvasiyarov/newrelic_platform_go) - -This package provide very simple interface to NewRelic Platform http://newrelic.com/platform - -For example of usage see examples/wave_plugin.go - -For real-word example, you can have a look at: -https://github.com/yvasiyarov/newrelic_sphinx diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/agent.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/agent.go deleted file mode 100644 index d9d27535..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/agent.go +++ /dev/null @@ -1,27 +0,0 @@ -package newrelic_platform_go - -import ( - "log" - "os" -) - -type Agent struct { - Host string `json:"host"` - Version string `json:"version"` - Pid int `json:"pid"` -} - -func NewAgent(Version string) *Agent { - agent := &Agent{ - Version: Version, - } - return agent -} - -func (agent *Agent) CollectEnvironmentInfo() { - var err error - agent.Pid = os.Getpid() - if agent.Host, err = os.Hostname(); err != nil { - log.Fatalf("Can not get hostname: %#v \n", err) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/component.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/component.go deleted file mode 100644 index 000f7ab7..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/component.go +++ /dev/null @@ -1,71 +0,0 @@ -package newrelic_platform_go - -import ( - "log" - "math" -) - -type ComponentData interface{} -type IComponent interface { - Harvest(plugin INewrelicPlugin) ComponentData - SetDuration(duration int) - AddMetrica(model IMetrica) - ClearSentData() -} - -type PluginComponent struct { - Name string `json:"name"` - GUID string `json:"guid"` - Duration int `json:"duration"` - Metrics map[string]MetricaValue `json:"metrics"` - MetricaModels []IMetrica `json:"-"` -} - -func NewPluginComponent(name string, guid string) *PluginComponent { - c := &PluginComponent{ - Name: name, - GUID: guid, - } - return c -} - -func (component *PluginComponent) AddMetrica(model IMetrica) { - component.MetricaModels = append(component.MetricaModels, model) -} - -func (component *PluginComponent) ClearSentData() { - component.Metrics = nil -} - -func (component *PluginComponent) SetDuration(duration int) { - component.Duration = duration -} - -func (component *PluginComponent) Harvest(plugin INewrelicPlugin) ComponentData { - component.Metrics = make(map[string]MetricaValue, len(component.MetricaModels)) - for i := 0; i < len(component.MetricaModels); i++ { - model := component.MetricaModels[i] - metricaKey := plugin.GetMetricaKey(model) - - if newValue, err := model.GetValue(); err == nil { - if math.IsInf(newValue, 0) || math.IsNaN(newValue) { - newValue = 0 - } - - if existMetric, ok := component.Metrics[metricaKey]; ok { - if floatExistVal, ok := existMetric.(float64); ok { - component.Metrics[metricaKey] = NewAggregatedMetricaValue(floatExistVal, newValue) - } else if aggregatedValue, ok := existMetric.(*AggregatedMetricaValue); ok { - aggregatedValue.Aggregate(newValue) - } else { - panic("Invalid type in metrica value") - } - } else { - component.Metrics[metricaKey] = newValue - } - } else { - log.Printf("Can not get metrica: %v, got error:%#v", model.GetName(), err) - } - } - return component -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/doc.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/doc.go deleted file mode 100644 index ef41e969..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package newrelic_platform_go is New Relic Platform Agent SDK for Go language. -package newrelic_platform_go diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/metrica.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/metrica.go deleted file mode 100644 index fc4fbd48..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/metrica.go +++ /dev/null @@ -1,42 +0,0 @@ -package newrelic_platform_go - -import ( - "math" -) - -type IMetrica interface { - GetValue() (float64, error) - GetName() string - GetUnits() string -} - -type MetricaValue interface{} - -type SimpleMetricaValue float64 - -type AggregatedMetricaValue struct { - Min float64 `json:"min"` - Max float64 `json:"max"` - Total float64 `json:"total"` - Count int `json:"count"` - SumOfSquares float64 `json:"sum_of_squares"` -} - -func NewAggregatedMetricaValue(existValue float64, newValue float64) *AggregatedMetricaValue { - v := &AggregatedMetricaValue{ - Min: math.Min(newValue, existValue), - Max: math.Max(newValue, existValue), - Total: newValue + existValue, - Count: 2, - SumOfSquares: newValue*newValue + existValue*existValue, - } - return v -} - -func (aggregatedValue *AggregatedMetricaValue) Aggregate(newValue float64) { - aggregatedValue.Min = math.Min(newValue, aggregatedValue.Min) - aggregatedValue.Max = math.Max(newValue, aggregatedValue.Max) - aggregatedValue.Total += newValue - aggregatedValue.Count++ - aggregatedValue.SumOfSquares += newValue * newValue -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/nut.json b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/nut.json deleted file mode 100644 index 1e57c395..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/nut.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "Version": "0.0.1", - "Vendor": "yvasiyarov", - "Authors": [ - { - "FullName": "Yuriy Vasiyarov", - "Email": "varyous@gmail.com" - } - ], - "ExtraFiles": [ - "README.md", - "LICENSE" - ], - "Homepage": "https://github.com/yvasiyarov/newrelic_platform_go" -} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/plugin.go b/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/plugin.go deleted file mode 100644 index 3e45666d..00000000 --- a/vendor/github.com/docker/distribution/vendor/github.com/yvasiyarov/newrelic_platform_go/plugin.go +++ /dev/null @@ -1,194 +0,0 @@ -package newrelic_platform_go - -import ( - "bytes" - "encoding/json" - "fmt" - "log" - "net/http" - "strings" - "time" -) - -const ( - NEWRELIC_API_URL = "https://platform-api.newrelic.com/platform/v1/metrics" -) - -type INewrelicPlugin interface { - GetMetricaKey(metrica IMetrica) string - Harvest() error - Run() - AddComponent(component IComponent) -} -type NewrelicPlugin struct { - Agent *Agent `json:"agent"` - Components []ComponentData `json:"components"` - - ComponentModels []IComponent `json:"-"` - LastPollTime time.Time `json:"-"` - Verbose bool `json:"-"` - LicenseKey string `json:"-"` - PollIntervalInSecond int `json:"-"` -} - -func NewNewrelicPlugin(version string, licenseKey string, pollInterval int) *NewrelicPlugin { - plugin := &NewrelicPlugin{ - LicenseKey: licenseKey, - PollIntervalInSecond: pollInterval, - } - - plugin.Agent = NewAgent(version) - plugin.Agent.CollectEnvironmentInfo() - - plugin.ComponentModels = []IComponent{} - return plugin -} - -func (plugin *NewrelicPlugin) Harvest() error { - startTime := time.Now() - var duration int - if plugin.LastPollTime.IsZero() { - duration = plugin.PollIntervalInSecond - } else { - duration = int(startTime.Sub(plugin.LastPollTime).Seconds()) - } - - plugin.Components = make([]ComponentData, 0, len(plugin.ComponentModels)) - for i := 0; i < len(plugin.ComponentModels); i++ { - plugin.ComponentModels[i].SetDuration(duration) - plugin.Components = append(plugin.Components, plugin.ComponentModels[i].Harvest(plugin)) - } - - if httpCode, err := plugin.SendMetricas(); err != nil { - log.Printf("Can not send metricas to newrelic: %#v\n", err) - return err - } else { - - if plugin.Verbose { - log.Printf("Got HTTP response code:%d", httpCode) - } - - if err, isFatal := plugin.CheckResponse(httpCode); isFatal { - log.Printf("Got fatal error:%v\n", err) - return err - } else { - if err != nil { - log.Printf("WARNING: %v", err) - } - return err - } - } - return nil -} - -func (plugin *NewrelicPlugin) GetMetricaKey(metrica IMetrica) string { - var keyBuffer bytes.Buffer - - keyBuffer.WriteString("Component/") - keyBuffer.WriteString(metrica.GetName()) - keyBuffer.WriteString("[") - keyBuffer.WriteString(metrica.GetUnits()) - keyBuffer.WriteString("]") - - return keyBuffer.String() -} - -func (plugin *NewrelicPlugin) SendMetricas() (int, error) { - client := &http.Client{} - var metricasJson []byte - var encodingError error - - if plugin.Verbose { - metricasJson, encodingError = json.MarshalIndent(plugin, "", " ") - } else { - metricasJson, encodingError = json.Marshal(plugin) - } - - if encodingError != nil { - return 0, encodingError - } - - jsonAsString := string(metricasJson) - if plugin.Verbose { - log.Printf("Send data:%s \n", jsonAsString) - } - - if httpRequest, err := http.NewRequest("POST", NEWRELIC_API_URL, strings.NewReader(jsonAsString)); err != nil { - return 0, err - } else { - httpRequest.Header.Set("X-License-Key", plugin.LicenseKey) - httpRequest.Header.Set("Content-Type", "application/json") - httpRequest.Header.Set("Accept", "application/json") - - if httpResponse, err := client.Do(httpRequest); err != nil { - return 0, err - } else { - defer httpResponse.Body.Close() - return httpResponse.StatusCode, nil - } - } - - // we will never get there - return 0, nil -} - -func (plugin *NewrelicPlugin) ClearSentData() { - for _, component := range plugin.ComponentModels { - component.ClearSentData() - } - plugin.Components = nil - plugin.LastPollTime = time.Now() -} - -func (plugin *NewrelicPlugin) CheckResponse(httpResponseCode int) (error, bool) { - isFatal := false - var err error - switch httpResponseCode { - case http.StatusOK: - { - plugin.ClearSentData() - } - case http.StatusForbidden: - { - err = fmt.Errorf("Authentication error (no license key header, or invalid license key).\n") - isFatal = true - } - case http.StatusBadRequest: - { - err = fmt.Errorf("The request or headers are in the wrong format or the URL is incorrect.\n") - isFatal = true - } - case http.StatusNotFound: - { - err = fmt.Errorf("Invalid URL\n") - isFatal = true - } - case http.StatusRequestEntityTooLarge: - { - err = fmt.Errorf("Too many metrics were sent in one request, or too many components (instances) were specified in one request, or other single-request limits were reached.\n") - //discard metrics - plugin.ClearSentData() - } - case http.StatusInternalServerError, http.StatusBadGateway, http.StatusServiceUnavailable, http.StatusGatewayTimeout: - { - err = fmt.Errorf("Got %v response code.Metricas will be aggregated", httpResponseCode) - } - } - return err, isFatal -} - -func (plugin *NewrelicPlugin) Run() { - plugin.Harvest() - tickerChannel := time.Tick(time.Duration(plugin.PollIntervalInSecond) * time.Second) - for ts := range tickerChannel { - plugin.Harvest() - - if plugin.Verbose { - log.Printf("Harvest ended at:%v\n", ts) - } - } -} - -func (plugin *NewrelicPlugin) AddComponent(component IComponent) { - plugin.ComponentModels = append(plugin.ComponentModels, component) -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/LICENSE b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/LICENSE deleted file mode 100644 index 6a66aea5..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/PATENTS b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/PATENTS deleted file mode 100644 index 73309904..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/bcrypt/base64.go deleted file mode 100644 index fc311609..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/bcrypt/base64.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bcrypt - -import "encoding/base64" - -const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" - -var bcEncoding = base64.NewEncoding(alphabet) - -func base64Encode(src []byte) []byte { - n := bcEncoding.EncodedLen(len(src)) - dst := make([]byte, n) - bcEncoding.Encode(dst, src) - for dst[n-1] == '=' { - n-- - } - return dst[:n] -} - -func base64Decode(src []byte) ([]byte, error) { - numOfEquals := 4 - (len(src) % 4) - for i := 0; i < numOfEquals; i++ { - src = append(src, '=') - } - - dst := make([]byte, bcEncoding.DecodedLen(len(src))) - n, err := bcEncoding.Decode(dst, src) - if err != nil { - return nil, err - } - return dst[:n], nil -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/bcrypt/bcrypt.go deleted file mode 100644 index b8e18d74..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/bcrypt/bcrypt.go +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing -// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf -package bcrypt - -// The code is a port of Provos and Mazières's C implementation. -import ( - "crypto/rand" - "crypto/subtle" - "errors" - "fmt" - "golang.org/x/crypto/blowfish" - "io" - "strconv" -) - -const ( - MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword - MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword - DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword -) - -// The error returned from CompareHashAndPassword when a password and hash do -// not match. -var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") - -// The error returned from CompareHashAndPassword when a hash is too short to -// be a bcrypt hash. -var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") - -// The error returned from CompareHashAndPassword when a hash was created with -// a bcrypt algorithm newer than this implementation. -type HashVersionTooNewError byte - -func (hv HashVersionTooNewError) Error() string { - return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) -} - -// The error returned from CompareHashAndPassword when a hash starts with something other than '$' -type InvalidHashPrefixError byte - -func (ih InvalidHashPrefixError) Error() string { - return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) -} - -type InvalidCostError int - -func (ic InvalidCostError) Error() string { - return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost)) -} - -const ( - majorVersion = '2' - minorVersion = 'a' - maxSaltSize = 16 - maxCryptedHashSize = 23 - encodedSaltSize = 22 - encodedHashSize = 31 - minHashSize = 59 -) - -// magicCipherData is an IV for the 64 Blowfish encryption calls in -// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. -var magicCipherData = []byte{ - 0x4f, 0x72, 0x70, 0x68, - 0x65, 0x61, 0x6e, 0x42, - 0x65, 0x68, 0x6f, 0x6c, - 0x64, 0x65, 0x72, 0x53, - 0x63, 0x72, 0x79, 0x44, - 0x6f, 0x75, 0x62, 0x74, -} - -type hashed struct { - hash []byte - salt []byte - cost int // allowed range is MinCost to MaxCost - major byte - minor byte -} - -// GenerateFromPassword returns the bcrypt hash of the password at the given -// cost. If the cost given is less than MinCost, the cost will be set to -// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, -// to compare the returned hashed password with its cleartext version. -func GenerateFromPassword(password []byte, cost int) ([]byte, error) { - p, err := newFromPassword(password, cost) - if err != nil { - return nil, err - } - return p.Hash(), nil -} - -// CompareHashAndPassword compares a bcrypt hashed password with its possible -// plaintext equivalent. Returns nil on success, or an error on failure. -func CompareHashAndPassword(hashedPassword, password []byte) error { - p, err := newFromHash(hashedPassword) - if err != nil { - return err - } - - otherHash, err := bcrypt(password, p.cost, p.salt) - if err != nil { - return err - } - - otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} - if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { - return nil - } - - return ErrMismatchedHashAndPassword -} - -// Cost returns the hashing cost used to create the given hashed -// password. When, in the future, the hashing cost of a password system needs -// to be increased in order to adjust for greater computational power, this -// function allows one to establish which passwords need to be updated. -func Cost(hashedPassword []byte) (int, error) { - p, err := newFromHash(hashedPassword) - if err != nil { - return 0, err - } - return p.cost, nil -} - -func newFromPassword(password []byte, cost int) (*hashed, error) { - if cost < MinCost { - cost = DefaultCost - } - p := new(hashed) - p.major = majorVersion - p.minor = minorVersion - - err := checkCost(cost) - if err != nil { - return nil, err - } - p.cost = cost - - unencodedSalt := make([]byte, maxSaltSize) - _, err = io.ReadFull(rand.Reader, unencodedSalt) - if err != nil { - return nil, err - } - - p.salt = base64Encode(unencodedSalt) - hash, err := bcrypt(password, p.cost, p.salt) - if err != nil { - return nil, err - } - p.hash = hash - return p, err -} - -func newFromHash(hashedSecret []byte) (*hashed, error) { - if len(hashedSecret) < minHashSize { - return nil, ErrHashTooShort - } - p := new(hashed) - n, err := p.decodeVersion(hashedSecret) - if err != nil { - return nil, err - } - hashedSecret = hashedSecret[n:] - n, err = p.decodeCost(hashedSecret) - if err != nil { - return nil, err - } - hashedSecret = hashedSecret[n:] - - // The "+2" is here because we'll have to append at most 2 '=' to the salt - // when base64 decoding it in expensiveBlowfishSetup(). - p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) - copy(p.salt, hashedSecret[:encodedSaltSize]) - - hashedSecret = hashedSecret[encodedSaltSize:] - p.hash = make([]byte, len(hashedSecret)) - copy(p.hash, hashedSecret) - - return p, nil -} - -func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { - cipherData := make([]byte, len(magicCipherData)) - copy(cipherData, magicCipherData) - - c, err := expensiveBlowfishSetup(password, uint32(cost), salt) - if err != nil { - return nil, err - } - - for i := 0; i < 24; i += 8 { - for j := 0; j < 64; j++ { - c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) - } - } - - // Bug compatibility with C bcrypt implementations. We only encode 23 of - // the 24 bytes encrypted. - hsh := base64Encode(cipherData[:maxCryptedHashSize]) - return hsh, nil -} - -func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { - - csalt, err := base64Decode(salt) - if err != nil { - return nil, err - } - - // Bug compatibility with C bcrypt implementations. They use the trailing - // NULL in the key string during expansion. - ckey := append(key, 0) - - c, err := blowfish.NewSaltedCipher(ckey, csalt) - if err != nil { - return nil, err - } - - var i, rounds uint64 - rounds = 1 << cost - for i = 0; i < rounds; i++ { - blowfish.ExpandKey(ckey, c) - blowfish.ExpandKey(csalt, c) - } - - return c, nil -} - -func (p *hashed) Hash() []byte { - arr := make([]byte, 60) - arr[0] = '$' - arr[1] = p.major - n := 2 - if p.minor != 0 { - arr[2] = p.minor - n = 3 - } - arr[n] = '$' - n += 1 - copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) - n += 2 - arr[n] = '$' - n += 1 - copy(arr[n:], p.salt) - n += encodedSaltSize - copy(arr[n:], p.hash) - n += encodedHashSize - return arr[:n] -} - -func (p *hashed) decodeVersion(sbytes []byte) (int, error) { - if sbytes[0] != '$' { - return -1, InvalidHashPrefixError(sbytes[0]) - } - if sbytes[1] > majorVersion { - return -1, HashVersionTooNewError(sbytes[1]) - } - p.major = sbytes[1] - n := 3 - if sbytes[2] != '$' { - p.minor = sbytes[2] - n++ - } - return n, nil -} - -// sbytes should begin where decodeVersion left off. -func (p *hashed) decodeCost(sbytes []byte) (int, error) { - cost, err := strconv.Atoi(string(sbytes[0:2])) - if err != nil { - return -1, err - } - err = checkCost(cost) - if err != nil { - return -1, err - } - p.cost = cost - return 3, nil -} - -func (p *hashed) String() string { - return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) -} - -func checkCost(cost int) error { - if cost < MinCost || cost > MaxCost { - return InvalidCostError(cost) - } - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/blowfish/block.go deleted file mode 100644 index 9d80f195..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/blowfish/block.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blowfish - -// getNextWord returns the next big-endian uint32 value from the byte slice -// at the given position in a circular manner, updating the position. -func getNextWord(b []byte, pos *int) uint32 { - var w uint32 - j := *pos - for i := 0; i < 4; i++ { - w = w<<8 | uint32(b[j]) - j++ - if j >= len(b) { - j = 0 - } - } - *pos = j - return w -} - -// ExpandKey performs a key expansion on the given *Cipher. Specifically, it -// performs the Blowfish algorithm's key schedule which sets up the *Cipher's -// pi and substitution tables for calls to Encrypt. This is used, primarily, -// by the bcrypt package to reuse the Blowfish key schedule during its -// set up. It's unlikely that you need to use this directly. -func ExpandKey(key []byte, c *Cipher) { - j := 0 - for i := 0; i < 18; i++ { - // Using inlined getNextWord for performance. - var d uint32 - for k := 0; k < 4; k++ { - d = d<<8 | uint32(key[j]) - j++ - if j >= len(key) { - j = 0 - } - } - c.p[i] ^= d - } - - var l, r uint32 - for i := 0; i < 18; i += 2 { - l, r = encryptBlock(l, r, c) - c.p[i], c.p[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s0[i], c.s0[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s1[i], c.s1[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s2[i], c.s2[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s3[i], c.s3[i+1] = l, r - } -} - -// This is similar to ExpandKey, but folds the salt during the key -// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero -// salt passed in, reusing ExpandKey turns out to be a place of inefficiency -// and specializing it here is useful. -func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { - j := 0 - for i := 0; i < 18; i++ { - c.p[i] ^= getNextWord(key, &j) - } - - j = 0 - var l, r uint32 - for i := 0; i < 18; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.p[i], c.p[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s0[i], c.s0[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s1[i], c.s1[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s2[i], c.s2[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s3[i], c.s3[i+1] = l, r - } -} - -func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { - xl, xr := l, r - xl ^= c.p[0] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] - xr ^= c.p[17] - return xr, xl -} - -func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { - xl, xr := l, r - xl ^= c.p[17] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] - xr ^= c.p[0] - return xr, xl -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/blowfish/cipher.go deleted file mode 100644 index 5019658a..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/blowfish/cipher.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. -package blowfish - -// The code is a port of Bruce Schneier's C implementation. -// See http://www.schneier.com/blowfish.html. - -import "strconv" - -// The Blowfish block size in bytes. -const BlockSize = 8 - -// A Cipher is an instance of Blowfish encryption using a particular key. -type Cipher struct { - p [18]uint32 - s0, s1, s2, s3 [256]uint32 -} - -type KeySizeError int - -func (k KeySizeError) Error() string { - return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) -} - -// NewCipher creates and returns a Cipher. -// The key argument should be the Blowfish key, from 1 to 56 bytes. -func NewCipher(key []byte) (*Cipher, error) { - var result Cipher - if k := len(key); k < 1 || k > 56 { - return nil, KeySizeError(k) - } - initCipher(&result) - ExpandKey(key, &result) - return &result, nil -} - -// NewSaltedCipher creates a returns a Cipher that folds a salt into its key -// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is -// sufficient and desirable. For bcrypt compatiblity, the key can be over 56 -// bytes. -func NewSaltedCipher(key, salt []byte) (*Cipher, error) { - if len(salt) == 0 { - return NewCipher(key) - } - var result Cipher - if k := len(key); k < 1 { - return nil, KeySizeError(k) - } - initCipher(&result) - expandKeyWithSalt(key, salt, &result) - return &result, nil -} - -// BlockSize returns the Blowfish block size, 8 bytes. -// It is necessary to satisfy the Block interface in the -// package "crypto/cipher". -func (c *Cipher) BlockSize() int { return BlockSize } - -// Encrypt encrypts the 8-byte buffer src using the key k -// and stores the result in dst. -// Note that for amounts of data larger than a block, -// it is not safe to just call Encrypt on successive blocks; -// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). -func (c *Cipher) Encrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - l, r = encryptBlock(l, r, c) - dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) - dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) -} - -// Decrypt decrypts the 8-byte buffer src using the key k -// and stores the result in dst. -func (c *Cipher) Decrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - l, r = decryptBlock(l, r, c) - dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) - dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) -} - -func initCipher(c *Cipher) { - copy(c.p[0:], p[0:]) - copy(c.s0[0:], s0[0:]) - copy(c.s1[0:], s1[0:]) - copy(c.s2[0:], s2[0:]) - copy(c.s3[0:], s3[0:]) -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/blowfish/const.go deleted file mode 100644 index 8c5ee4cb..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/blowfish/const.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The startup permutation array and substitution boxes. -// They are the hexadecimal digits of PI; see: -// http://www.schneier.com/code/constants.txt. - -package blowfish - -var s0 = [256]uint32{ - 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, - 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, - 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, - 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, - 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, - 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, - 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, - 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, - 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, - 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, - 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, - 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, - 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, - 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, - 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, - 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, - 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, - 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, - 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, - 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, - 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, - 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, - 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, - 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, - 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, - 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, - 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, - 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, - 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, - 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, - 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, - 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, - 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, - 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, - 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, - 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, - 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, - 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, - 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, - 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, - 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, - 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, - 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, -} - -var s1 = [256]uint32{ - 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, - 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, - 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, - 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, - 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, - 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, - 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, - 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, - 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, - 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, - 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, - 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, - 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, - 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, - 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, - 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, - 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, - 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, - 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, - 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, - 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, - 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, - 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, - 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, - 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, - 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, - 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, - 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, - 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, - 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, - 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, - 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, - 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, - 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, - 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, - 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, - 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, - 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, - 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, - 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, - 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, - 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, - 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, -} - -var s2 = [256]uint32{ - 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, - 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, - 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, - 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, - 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, - 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, - 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, - 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, - 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, - 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, - 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, - 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, - 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, - 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, - 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, - 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, - 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, - 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, - 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, - 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, - 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, - 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, - 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, - 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, - 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, - 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, - 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, - 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, - 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, - 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, - 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, - 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, - 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, - 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, - 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, - 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, - 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, - 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, - 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, - 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, - 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, - 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, - 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, -} - -var s3 = [256]uint32{ - 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, - 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, - 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, - 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, - 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, - 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, - 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, - 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, - 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, - 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, - 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, - 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, - 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, - 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, - 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, - 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, - 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, - 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, - 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, - 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, - 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, - 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, - 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, - 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, - 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, - 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, - 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, - 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, - 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, - 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, - 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, - 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, - 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, - 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, - 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, - 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, - 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, - 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, - 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, - 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, - 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, - 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, - 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, -} - -var p = [18]uint32{ - 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, - 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, - 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/ocsp/ocsp.go deleted file mode 100644 index f6a1bd4d..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/crypto/ocsp/ocsp.go +++ /dev/null @@ -1,592 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses -// are signed messages attesting to the validity of a certificate for a small -// period of time. This is used to manage revocation for X.509 certificates. -package ocsp - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "errors" - "math/big" - "time" -) - -var idPKIXOCSPBasic = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 5, 5, 7, 48, 1, 1}) - -// These are internal structures that reflect the ASN.1 structure of an OCSP -// response. See RFC 2560, section 4.2. - -const ( - ocspSuccess = 0 - ocspMalformed = 1 - ocspInternalError = 2 - ocspTryLater = 3 - ocspSigRequired = 4 - ocspUnauthorized = 5 -) - -type certID struct { - HashAlgorithm pkix.AlgorithmIdentifier - NameHash []byte - IssuerKeyHash []byte - SerialNumber *big.Int -} - -// https://tools.ietf.org/html/rfc2560#section-4.1.1 -type ocspRequest struct { - TBSRequest tbsRequest -} - -type tbsRequest struct { - Version int `asn1:"explicit,tag:0,default:0,optional"` - RequestorName pkix.RDNSequence `asn1:"explicit,tag:1,optional"` - RequestList []request -} - -type request struct { - Cert certID -} - -type responseASN1 struct { - Status asn1.Enumerated - Response responseBytes `asn1:"explicit,tag:0"` -} - -type responseBytes struct { - ResponseType asn1.ObjectIdentifier - Response []byte -} - -type basicResponse struct { - TBSResponseData responseData - SignatureAlgorithm pkix.AlgorithmIdentifier - Signature asn1.BitString - Certificates []asn1.RawValue `asn1:"explicit,tag:0,optional"` -} - -type responseData struct { - Raw asn1.RawContent - Version int `asn1:"optional,default:1,explicit,tag:0"` - RawResponderName asn1.RawValue `asn1:"optional,explicit,tag:1"` - KeyHash []byte `asn1:"optional,explicit,tag:2"` - ProducedAt time.Time `asn1:"generalized"` - Responses []singleResponse -} - -type singleResponse struct { - CertID certID - Good asn1.Flag `asn1:"tag:0,optional"` - Revoked revokedInfo `asn1:"explicit,tag:1,optional"` - Unknown asn1.Flag `asn1:"tag:2,optional"` - ThisUpdate time.Time `asn1:"generalized"` - NextUpdate time.Time `asn1:"generalized,explicit,tag:0,optional"` -} - -type revokedInfo struct { - RevocationTime time.Time `asn1:"generalized"` - Reason int `asn1:"explicit,tag:0,optional"` -} - -var ( - oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2} - oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} - oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} - oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} - oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} - oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} - oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} - oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 4, 3, 2} - oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} - oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} - oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} - oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} -) - -var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{ - crypto.SHA1: asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}), - crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}), - crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}), - crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}), -} - -// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below -var signatureAlgorithmDetails = []struct { - algo x509.SignatureAlgorithm - oid asn1.ObjectIdentifier - pubKeyAlgo x509.PublicKeyAlgorithm - hash crypto.Hash -}{ - {x509.MD2WithRSA, oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */}, - {x509.MD5WithRSA, oidSignatureMD5WithRSA, x509.RSA, crypto.MD5}, - {x509.SHA1WithRSA, oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1}, - {x509.SHA256WithRSA, oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256}, - {x509.SHA384WithRSA, oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384}, - {x509.SHA512WithRSA, oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512}, - {x509.DSAWithSHA1, oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1}, - {x509.DSAWithSHA256, oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256}, - {x509.ECDSAWithSHA1, oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1}, - {x509.ECDSAWithSHA256, oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256}, - {x509.ECDSAWithSHA384, oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384}, - {x509.ECDSAWithSHA512, oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512}, -} - -// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below -func signingParamsForPublicKey(pub interface{}, requestedSigAlgo x509.SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) { - var pubType x509.PublicKeyAlgorithm - - switch pub := pub.(type) { - case *rsa.PublicKey: - pubType = x509.RSA - hashFunc = crypto.SHA256 - sigAlgo.Algorithm = oidSignatureSHA256WithRSA - sigAlgo.Parameters = asn1.RawValue{ - Tag: 5, - } - - case *ecdsa.PublicKey: - pubType = x509.ECDSA - - switch pub.Curve { - case elliptic.P224(), elliptic.P256(): - hashFunc = crypto.SHA256 - sigAlgo.Algorithm = oidSignatureECDSAWithSHA256 - case elliptic.P384(): - hashFunc = crypto.SHA384 - sigAlgo.Algorithm = oidSignatureECDSAWithSHA384 - case elliptic.P521(): - hashFunc = crypto.SHA512 - sigAlgo.Algorithm = oidSignatureECDSAWithSHA512 - default: - err = errors.New("x509: unknown elliptic curve") - } - - default: - err = errors.New("x509: only RSA and ECDSA keys supported") - } - - if err != nil { - return - } - - if requestedSigAlgo == 0 { - return - } - - found := false - for _, details := range signatureAlgorithmDetails { - if details.algo == requestedSigAlgo { - if details.pubKeyAlgo != pubType { - err = errors.New("x509: requested SignatureAlgorithm does not match private key type") - return - } - sigAlgo.Algorithm, hashFunc = details.oid, details.hash - if hashFunc == 0 { - err = errors.New("x509: cannot sign with hash function requested") - return - } - found = true - break - } - } - - if !found { - err = errors.New("x509: unknown SignatureAlgorithm") - } - - return -} - -// TODO(agl): this is taken from crypto/x509 and so should probably be exported -// from crypto/x509 or crypto/x509/pkix. -func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) x509.SignatureAlgorithm { - for _, details := range signatureAlgorithmDetails { - if oid.Equal(details.oid) { - return details.algo - } - } - return x509.UnknownSignatureAlgorithm -} - -// TODO(rlb): This is not taken from crypto/x509, but it's of the same general form. -func getHashAlgorithmFromOID(target asn1.ObjectIdentifier) crypto.Hash { - for hash, oid := range hashOIDs { - if oid.Equal(target) { - return hash - } - } - return crypto.Hash(0) -} - -// This is the exposed reflection of the internal OCSP structures. - -const ( - // Good means that the certificate is valid. - Good = iota - // Revoked means that the certificate has been deliberately revoked. - Revoked = iota - // Unknown means that the OCSP responder doesn't know about the certificate. - Unknown = iota - // ServerFailed means that the OCSP responder failed to process the request. - ServerFailed = iota -) - -// Request represents an OCSP request. See RFC 2560. -type Request struct { - HashAlgorithm crypto.Hash - IssuerNameHash []byte - IssuerKeyHash []byte - SerialNumber *big.Int -} - -// Response represents an OCSP response. See RFC 2560. -type Response struct { - // Status is one of {Good, Revoked, Unknown, ServerFailed} - Status int - SerialNumber *big.Int - ProducedAt, ThisUpdate, NextUpdate, RevokedAt time.Time - RevocationReason int - Certificate *x509.Certificate - // TBSResponseData contains the raw bytes of the signed response. If - // Certificate is nil then this can be used to verify Signature. - TBSResponseData []byte - Signature []byte - SignatureAlgorithm x509.SignatureAlgorithm -} - -// These are pre-serialized error responses for the various non-success codes -// defined by OCSP. The Unauthorized code in particular can be used by an OCSP -// responder that supports only pre-signed responses as a response to requests -// for certificates with unknown status. See RFC 5019. -var ( - MalformedRequestErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x01} - InternalErrorErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x02} - TryLaterErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x03} - SigRequredErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x05} - UnauthorizedErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x06} -) - -// CheckSignatureFrom checks that the signature in resp is a valid signature -// from issuer. This should only be used if resp.Certificate is nil. Otherwise, -// the OCSP response contained an intermediate certificate that created the -// signature. That signature is checked by ParseResponse and only -// resp.Certificate remains to be validated. -func (resp *Response) CheckSignatureFrom(issuer *x509.Certificate) error { - return issuer.CheckSignature(resp.SignatureAlgorithm, resp.TBSResponseData, resp.Signature) -} - -// ParseError results from an invalid OCSP response. -type ParseError string - -func (p ParseError) Error() string { - return string(p) -} - -// ParseRequest parses an OCSP request in DER form. It only supports -// requests for a single certificate. Signed requests are not supported. -// If a request includes a signature, it will result in a ParseError. -func ParseRequest(bytes []byte) (*Request, error) { - var req ocspRequest - rest, err := asn1.Unmarshal(bytes, &req) - if err != nil { - return nil, err - } - if len(rest) > 0 { - return nil, ParseError("trailing data in OCSP request") - } - - if len(req.TBSRequest.RequestList) == 0 { - return nil, ParseError("OCSP request contains no request body") - } - innerRequest := req.TBSRequest.RequestList[0] - - hashFunc := getHashAlgorithmFromOID(innerRequest.Cert.HashAlgorithm.Algorithm) - if hashFunc == crypto.Hash(0) { - return nil, ParseError("OCSP request uses unknown hash function") - } - - return &Request{ - HashAlgorithm: hashFunc, - IssuerNameHash: innerRequest.Cert.NameHash, - IssuerKeyHash: innerRequest.Cert.IssuerKeyHash, - SerialNumber: innerRequest.Cert.SerialNumber, - }, nil -} - -// ParseResponse parses an OCSP response in DER form. It only supports -// responses for a single certificate. If the response contains a certificate -// then the signature over the response is checked. If issuer is not nil then -// it will be used to validate the signature or embedded certificate. Invalid -// signatures or parse failures will result in a ParseError. -func ParseResponse(bytes []byte, issuer *x509.Certificate) (*Response, error) { - var resp responseASN1 - rest, err := asn1.Unmarshal(bytes, &resp) - if err != nil { - return nil, err - } - if len(rest) > 0 { - return nil, ParseError("trailing data in OCSP response") - } - - ret := new(Response) - if resp.Status != ocspSuccess { - ret.Status = ServerFailed - return ret, nil - } - - if !resp.Response.ResponseType.Equal(idPKIXOCSPBasic) { - return nil, ParseError("bad OCSP response type") - } - - var basicResp basicResponse - rest, err = asn1.Unmarshal(resp.Response.Response, &basicResp) - if err != nil { - return nil, err - } - - if len(basicResp.Certificates) > 1 { - return nil, ParseError("OCSP response contains bad number of certificates") - } - - if len(basicResp.TBSResponseData.Responses) != 1 { - return nil, ParseError("OCSP response contains bad number of responses") - } - - ret.TBSResponseData = basicResp.TBSResponseData.Raw - ret.Signature = basicResp.Signature.RightAlign() - ret.SignatureAlgorithm = getSignatureAlgorithmFromOID(basicResp.SignatureAlgorithm.Algorithm) - - if len(basicResp.Certificates) > 0 { - ret.Certificate, err = x509.ParseCertificate(basicResp.Certificates[0].FullBytes) - if err != nil { - return nil, err - } - - if err := ret.CheckSignatureFrom(ret.Certificate); err != nil { - return nil, ParseError("bad OCSP signature") - } - - if issuer != nil { - if err := issuer.CheckSignature(ret.Certificate.SignatureAlgorithm, ret.Certificate.RawTBSCertificate, ret.Certificate.Signature); err != nil { - return nil, ParseError("bad signature on embedded certificate") - } - } - } else if issuer != nil { - if err := ret.CheckSignatureFrom(issuer); err != nil { - return nil, ParseError("bad OCSP signature") - } - } - - r := basicResp.TBSResponseData.Responses[0] - - ret.SerialNumber = r.CertID.SerialNumber - - switch { - case bool(r.Good): - ret.Status = Good - case bool(r.Unknown): - ret.Status = Unknown - default: - ret.Status = Revoked - ret.RevokedAt = r.Revoked.RevocationTime - ret.RevocationReason = r.Revoked.Reason - } - - ret.ProducedAt = basicResp.TBSResponseData.ProducedAt - ret.ThisUpdate = r.ThisUpdate - ret.NextUpdate = r.NextUpdate - - return ret, nil -} - -// RequestOptions contains options for constructing OCSP requests. -type RequestOptions struct { - // Hash contains the hash function that should be used when - // constructing the OCSP request. If zero, SHA-1 will be used. - Hash crypto.Hash -} - -func (opts *RequestOptions) hash() crypto.Hash { - if opts == nil || opts.Hash == 0 { - // SHA-1 is nearly universally used in OCSP. - return crypto.SHA1 - } - return opts.Hash -} - -// CreateRequest returns a DER-encoded, OCSP request for the status of cert. If -// opts is nil then sensible defaults are used. -func CreateRequest(cert, issuer *x509.Certificate, opts *RequestOptions) ([]byte, error) { - hashFunc := opts.hash() - - // OCSP seems to be the only place where these raw hash identifiers are - // used. I took the following from - // http://msdn.microsoft.com/en-us/library/ff635603.aspx - var hashOID asn1.ObjectIdentifier - hashOID, ok := hashOIDs[hashFunc] - if !ok { - return nil, x509.ErrUnsupportedAlgorithm - } - - if !hashFunc.Available() { - return nil, x509.ErrUnsupportedAlgorithm - } - h := opts.hash().New() - - var publicKeyInfo struct { - Algorithm pkix.AlgorithmIdentifier - PublicKey asn1.BitString - } - if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil { - return nil, err - } - - h.Write(publicKeyInfo.PublicKey.RightAlign()) - issuerKeyHash := h.Sum(nil) - - h.Reset() - h.Write(issuer.RawSubject) - issuerNameHash := h.Sum(nil) - - return asn1.Marshal(ocspRequest{ - tbsRequest{ - Version: 0, - RequestList: []request{ - { - Cert: certID{ - pkix.AlgorithmIdentifier{ - Algorithm: hashOID, - Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, - }, - issuerNameHash, - issuerKeyHash, - cert.SerialNumber, - }, - }, - }, - }, - }) -} - -// CreateResponse returns a DER-encoded OCSP response with the specified contents. -// The fields in the response are populated as follows: -// -// The responder cert is used to populate the ResponderName field, and the certificate -// itself is provided alongside the OCSP response signature. -// -// The issuer cert is used to puplate the IssuerNameHash and IssuerKeyHash fields. -// (SHA-1 is used for the hash function; this is not configurable.) -// -// The template is used to populate the SerialNumber, RevocationStatus, RevokedAt, -// RevocationReason, ThisUpdate, and NextUpdate fields. -// -// The ProducedAt date is automatically set to the current date, to the nearest minute. -func CreateResponse(issuer, responderCert *x509.Certificate, template Response, priv crypto.Signer) ([]byte, error) { - var publicKeyInfo struct { - Algorithm pkix.AlgorithmIdentifier - PublicKey asn1.BitString - } - if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil { - return nil, err - } - - h := sha1.New() - h.Write(publicKeyInfo.PublicKey.RightAlign()) - issuerKeyHash := h.Sum(nil) - - h.Reset() - h.Write(issuer.RawSubject) - issuerNameHash := h.Sum(nil) - - innerResponse := singleResponse{ - CertID: certID{ - HashAlgorithm: pkix.AlgorithmIdentifier{ - Algorithm: hashOIDs[crypto.SHA1], - Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, - }, - NameHash: issuerNameHash, - IssuerKeyHash: issuerKeyHash, - SerialNumber: template.SerialNumber, - }, - ThisUpdate: template.ThisUpdate.UTC(), - NextUpdate: template.NextUpdate.UTC(), - } - - switch template.Status { - case Good: - innerResponse.Good = true - case Unknown: - innerResponse.Unknown = true - case Revoked: - innerResponse.Revoked = revokedInfo{ - RevocationTime: template.RevokedAt.UTC(), - Reason: template.RevocationReason, - } - } - - responderName := asn1.RawValue{ - Class: 2, // context-specific - Tag: 1, // explicit tag - IsCompound: true, - Bytes: responderCert.RawSubject, - } - tbsResponseData := responseData{ - Version: 0, - RawResponderName: responderName, - ProducedAt: time.Now().Truncate(time.Minute).UTC(), - Responses: []singleResponse{innerResponse}, - } - - tbsResponseDataDER, err := asn1.Marshal(tbsResponseData) - if err != nil { - return nil, err - } - - hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(priv.Public(), template.SignatureAlgorithm) - if err != nil { - return nil, err - } - - responseHash := hashFunc.New() - responseHash.Write(tbsResponseDataDER) - signature, err := priv.Sign(rand.Reader, responseHash.Sum(nil), hashFunc) - if err != nil { - return nil, err - } - - response := basicResponse{ - TBSResponseData: tbsResponseData, - SignatureAlgorithm: signatureAlgorithm, - Signature: asn1.BitString{ - Bytes: signature, - BitLength: 8 * len(signature), - }, - } - if template.Certificate != nil { - response.Certificates = []asn1.RawValue{ - asn1.RawValue{FullBytes: template.Certificate.Raw}, - } - } - responseDER, err := asn1.Marshal(response) - if err != nil { - return nil, err - } - - return asn1.Marshal(responseASN1{ - Status: ocspSuccess, - Response: responseBytes{ - ResponseType: idPKIXOCSPBasic, - Response: responseDER, - }, - }) -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/LICENSE b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/LICENSE deleted file mode 100644 index 6a66aea5..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/PATENTS b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/PATENTS deleted file mode 100644 index 73309904..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/context.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index 46629881..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,447 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out <-chan Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go deleted file mode 100644 index e3170e33..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/ctxhttp/cancelreq.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.5 - -package ctxhttp - -import "net/http" - -func canceler(client *http.Client, req *http.Request) func() { - // TODO(djd): Respect any existing value of req.Cancel. - ch := make(chan struct{}) - req.Cancel = ch - - return func() { - close(ch) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go deleted file mode 100644 index 56bcbadb..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.5 - -package ctxhttp - -import "net/http" - -type requestCanceler interface { - CancelRequest(*http.Request) -} - -func canceler(client *http.Client, req *http.Request) func() { - rc, ok := client.Transport.(requestCanceler) - if !ok { - return func() {} - } - return func() { - rc.CancelRequest(req) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go deleted file mode 100644 index e35860a7..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ctxhttp provides helper functions for performing context-aware HTTP requests. -package ctxhttp - -import ( - "io" - "net/http" - "net/url" - "strings" - - "golang.org/x/net/context" -) - -func nop() {} - -var ( - testHookContextDoneBeforeHeaders = nop - testHookDoReturned = nop - testHookDidBodyClose = nop -) - -// Do sends an HTTP request with the provided http.Client and returns an HTTP response. -// If the client is nil, http.DefaultClient is used. -// If the context is canceled or times out, ctx.Err() will be returned. -func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - - // Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go. - cancel := canceler(client, req) - - type responseAndError struct { - resp *http.Response - err error - } - result := make(chan responseAndError, 1) - - // Make local copies of test hooks closed over by goroutines below. - // Prevents data races in tests. - testHookDoReturned := testHookDoReturned - testHookDidBodyClose := testHookDidBodyClose - - go func() { - resp, err := client.Do(req) - testHookDoReturned() - result <- responseAndError{resp, err} - }() - - var resp *http.Response - - select { - case <-ctx.Done(): - testHookContextDoneBeforeHeaders() - cancel() - // Clean up after the goroutine calling client.Do: - go func() { - if r := <-result; r.resp != nil { - testHookDidBodyClose() - r.resp.Body.Close() - } - }() - return nil, ctx.Err() - case r := <-result: - var err error - resp, err = r.resp, r.err - if err != nil { - return resp, err - } - } - - c := make(chan struct{}) - go func() { - select { - case <-ctx.Done(): - cancel() - case <-c: - // The response's Body is closed. - } - }() - resp.Body = ¬ifyingReader{resp.Body, c} - - return resp, nil -} - -// Get issues a GET request via the Do function. -func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Head issues a HEAD request via the Do function. -func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Post issues a POST request via the Do function. -func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return Do(ctx, client, req) -} - -// PostForm issues a POST request via the Do function. -func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { - return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} - -// notifyingReader is an io.ReadCloser that closes the notify channel after -// Close is called or a Read fails on the underlying ReadCloser. -type notifyingReader struct { - io.ReadCloser - notify chan<- struct{} -} - -func (r *notifyingReader) Read(p []byte) (int, error) { - n, err := r.ReadCloser.Read(p) - if err != nil && r.notify != nil { - close(r.notify) - r.notify = nil - } - return n, err -} - -func (r *notifyingReader) Close() error { - err := r.ReadCloser.Close() - if r.notify != nil { - close(r.notify) - r.notify = nil - } - return err -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/.gitignore b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/.gitignore deleted file mode 100644 index 190f1223..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*~ -h2i/h2i diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/Dockerfile b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/Dockerfile deleted file mode 100644 index 53fc5257..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/Dockerfile +++ /dev/null @@ -1,51 +0,0 @@ -# -# This Dockerfile builds a recent curl with HTTP/2 client support, using -# a recent nghttp2 build. -# -# See the Makefile for how to tag it. If Docker and that image is found, the -# Go tests use this curl binary for integration tests. -# - -FROM ubuntu:trusty - -RUN apt-get update && \ - apt-get upgrade -y && \ - apt-get install -y git-core build-essential wget - -RUN apt-get install -y --no-install-recommends \ - autotools-dev libtool pkg-config zlib1g-dev \ - libcunit1-dev libssl-dev libxml2-dev libevent-dev \ - automake autoconf - -# The list of packages nghttp2 recommends for h2load: -RUN apt-get install -y --no-install-recommends make binutils \ - autoconf automake autotools-dev \ - libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ - libev-dev libevent-dev libjansson-dev libjemalloc-dev \ - cython python3.4-dev python-setuptools - -# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: -ENV NGHTTP2_VER 895da9a -RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git - -WORKDIR /root/nghttp2 -RUN git reset --hard $NGHTTP2_VER -RUN autoreconf -i -RUN automake -RUN autoconf -RUN ./configure -RUN make -RUN make install - -WORKDIR /root -RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz -RUN tar -zxvf curl-7.45.0.tar.gz -WORKDIR /root/curl-7.45.0 -RUN ./configure --with-ssl --with-nghttp2=/usr/local -RUN make -RUN make install -RUN ldconfig - -CMD ["-h"] -ENTRYPOINT ["/usr/local/bin/curl"] - diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/Makefile b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/Makefile deleted file mode 100644 index 55fd826f..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -curlimage: - docker build -t gohttp2/curl . - diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/README b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/README deleted file mode 100644 index 360d5aa3..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/README +++ /dev/null @@ -1,20 +0,0 @@ -This is a work-in-progress HTTP/2 implementation for Go. - -It will eventually live in the Go standard library and won't require -any changes to your code to use. It will just be automatic. - -Status: - -* The server support is pretty good. A few things are missing - but are being worked on. -* The client work has just started but shares a lot of code - is coming along much quicker. - -Docs are at https://godoc.org/golang.org/x/net/http2 - -Demo test server at https://http2.golang.org/ - -Help & bug reports welcome! - -Contributing: https://golang.org/doc/contribute.html -Bugs: https://golang.org/issue/new?title=x/net/http2:+ diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/client_conn_pool.go deleted file mode 100644 index 772ea5e9..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/client_conn_pool.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Transport code's client connection pooling. - -package http2 - -import ( - "crypto/tls" - "net/http" - "sync" -) - -// ClientConnPool manages a pool of HTTP/2 client connections. -type ClientConnPool interface { - GetClientConn(req *http.Request, addr string) (*ClientConn, error) - MarkDead(*ClientConn) -} - -// TODO: use singleflight for dialing and addConnCalls? -type clientConnPool struct { - t *Transport - - mu sync.Mutex // TODO: maybe switch to RWMutex - // TODO: add support for sharing conns based on cert names - // (e.g. share conn for googleapis.com and appspot.com) - conns map[string][]*ClientConn // key is host:port - dialing map[string]*dialCall // currently in-flight dials - keys map[*ClientConn][]string - addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls -} - -func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { - return p.getClientConn(req, addr, dialOnMiss) -} - -const ( - dialOnMiss = true - noDialOnMiss = false -) - -func (p *clientConnPool) getClientConn(_ *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { - p.mu.Lock() - for _, cc := range p.conns[addr] { - if cc.CanTakeNewRequest() { - p.mu.Unlock() - return cc, nil - } - } - if !dialOnMiss { - p.mu.Unlock() - return nil, ErrNoCachedConn - } - call := p.getStartDialLocked(addr) - p.mu.Unlock() - <-call.done - return call.res, call.err -} - -// dialCall is an in-flight Transport dial call to a host. -type dialCall struct { - p *clientConnPool - done chan struct{} // closed when done - res *ClientConn // valid after done is closed - err error // valid after done is closed -} - -// requires p.mu is held. -func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { - if call, ok := p.dialing[addr]; ok { - // A dial is already in-flight. Don't start another. - return call - } - call := &dialCall{p: p, done: make(chan struct{})} - if p.dialing == nil { - p.dialing = make(map[string]*dialCall) - } - p.dialing[addr] = call - go call.dial(addr) - return call -} - -// run in its own goroutine. -func (c *dialCall) dial(addr string) { - c.res, c.err = c.p.t.dialClientConn(addr) - close(c.done) - - c.p.mu.Lock() - delete(c.p.dialing, addr) - if c.err == nil { - c.p.addConnLocked(addr, c.res) - } - c.p.mu.Unlock() -} - -// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't -// already exist. It coalesces concurrent calls with the same key. -// This is used by the http1 Transport code when it creates a new connection. Because -// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know -// the protocol), it can get into a situation where it has multiple TLS connections. -// This code decides which ones live or die. -// The return value used is whether c was used. -// c is never closed. -func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { - p.mu.Lock() - for _, cc := range p.conns[key] { - if cc.CanTakeNewRequest() { - p.mu.Unlock() - return false, nil - } - } - call, dup := p.addConnCalls[key] - if !dup { - if p.addConnCalls == nil { - p.addConnCalls = make(map[string]*addConnCall) - } - call = &addConnCall{ - p: p, - done: make(chan struct{}), - } - p.addConnCalls[key] = call - go call.run(t, key, c) - } - p.mu.Unlock() - - <-call.done - if call.err != nil { - return false, call.err - } - return !dup, nil -} - -type addConnCall struct { - p *clientConnPool - done chan struct{} // closed when done - err error -} - -func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { - cc, err := t.NewClientConn(tc) - - p := c.p - p.mu.Lock() - if err != nil { - c.err = err - } else { - p.addConnLocked(key, cc) - } - delete(p.addConnCalls, key) - p.mu.Unlock() - close(c.done) -} - -func (p *clientConnPool) addConn(key string, cc *ClientConn) { - p.mu.Lock() - p.addConnLocked(key, cc) - p.mu.Unlock() -} - -// p.mu must be held -func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { - for _, v := range p.conns[key] { - if v == cc { - return - } - } - if p.conns == nil { - p.conns = make(map[string][]*ClientConn) - } - if p.keys == nil { - p.keys = make(map[*ClientConn][]string) - } - p.conns[key] = append(p.conns[key], cc) - p.keys[cc] = append(p.keys[cc], key) -} - -func (p *clientConnPool) MarkDead(cc *ClientConn) { - p.mu.Lock() - defer p.mu.Unlock() - for _, key := range p.keys[cc] { - vv, ok := p.conns[key] - if !ok { - continue - } - newList := filterOutClientConn(vv, cc) - if len(newList) > 0 { - p.conns[key] = newList - } else { - delete(p.conns, key) - } - } - delete(p.keys, cc) -} - -func (p *clientConnPool) closeIdleConnections() { - p.mu.Lock() - defer p.mu.Unlock() - // TODO: don't close a cc if it was just added to the pool - // milliseconds ago and has never been used. There's currently - // a small race window with the HTTP/1 Transport's integration - // where it can add an idle conn just before using it, and - // somebody else can concurrently call CloseIdleConns and - // break some caller's RoundTrip. - for _, vv := range p.conns { - for _, cc := range vv { - cc.closeIfIdle() - } - } -} - -func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { - out := in[:0] - for _, v := range in { - if v != exclude { - out = append(out, v) - } - } - // If we filtered it out, zero out the last item to prevent - // the GC from seeing it. - if len(in) != len(out) { - in[len(in)-1] = nil - } - return out -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/configure_transport.go deleted file mode 100644 index daa17f5d..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/configure_transport.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.6 - -package http2 - -import ( - "crypto/tls" - "fmt" - "net/http" -) - -func configureTransport(t1 *http.Transport) (*Transport, error) { - connPool := new(clientConnPool) - t2 := &Transport{ - ConnPool: noDialClientConnPool{connPool}, - t1: t1, - } - connPool.t = t2 - if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { - return nil, err - } - if t1.TLSClientConfig == nil { - t1.TLSClientConfig = new(tls.Config) - } - if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { - t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) - } - if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { - t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") - } - upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { - addr := authorityAddr(authority) - if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { - go c.Close() - return erringRoundTripper{err} - } else if !used { - // Turns out we don't need this c. - // For example, two goroutines made requests to the same host - // at the same time, both kicking off TCP dials. (since protocol - // was unknown) - go c.Close() - } - return t2 - } - if m := t1.TLSNextProto; len(m) == 0 { - t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ - "h2": upgradeFn, - } - } else { - m["h2"] = upgradeFn - } - return t2, nil -} - -// registerHTTPSProtocol calls Transport.RegisterProtocol but -// convering panics into errors. -func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { - defer func() { - if e := recover(); e != nil { - err = fmt.Errorf("%v", e) - } - }() - t.RegisterProtocol("https", rt) - return nil -} - -// noDialClientConnPool is an implementation of http2.ClientConnPool -// which never dials. We let the HTTP/1.1 client dial and use its TLS -// connection instead. -type noDialClientConnPool struct{ *clientConnPool } - -func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { - return p.getClientConn(req, addr, noDialOnMiss) -} - -// noDialH2RoundTripper is a RoundTripper which only tries to complete the request -// if there's already has a cached connection to the host. -type noDialH2RoundTripper struct{ t *Transport } - -func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - res, err := rt.t.RoundTrip(req) - if err == ErrNoCachedConn { - return nil, http.ErrSkipAltProtocol - } - return res, err -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/errors.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/errors.go deleted file mode 100644 index 71a4e290..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/errors.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" - "fmt" -) - -// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. -type ErrCode uint32 - -const ( - ErrCodeNo ErrCode = 0x0 - ErrCodeProtocol ErrCode = 0x1 - ErrCodeInternal ErrCode = 0x2 - ErrCodeFlowControl ErrCode = 0x3 - ErrCodeSettingsTimeout ErrCode = 0x4 - ErrCodeStreamClosed ErrCode = 0x5 - ErrCodeFrameSize ErrCode = 0x6 - ErrCodeRefusedStream ErrCode = 0x7 - ErrCodeCancel ErrCode = 0x8 - ErrCodeCompression ErrCode = 0x9 - ErrCodeConnect ErrCode = 0xa - ErrCodeEnhanceYourCalm ErrCode = 0xb - ErrCodeInadequateSecurity ErrCode = 0xc - ErrCodeHTTP11Required ErrCode = 0xd -) - -var errCodeName = map[ErrCode]string{ - ErrCodeNo: "NO_ERROR", - ErrCodeProtocol: "PROTOCOL_ERROR", - ErrCodeInternal: "INTERNAL_ERROR", - ErrCodeFlowControl: "FLOW_CONTROL_ERROR", - ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT", - ErrCodeStreamClosed: "STREAM_CLOSED", - ErrCodeFrameSize: "FRAME_SIZE_ERROR", - ErrCodeRefusedStream: "REFUSED_STREAM", - ErrCodeCancel: "CANCEL", - ErrCodeCompression: "COMPRESSION_ERROR", - ErrCodeConnect: "CONNECT_ERROR", - ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", - ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", - ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", -} - -func (e ErrCode) String() string { - if s, ok := errCodeName[e]; ok { - return s - } - return fmt.Sprintf("unknown error code 0x%x", uint32(e)) -} - -// ConnectionError is an error that results in the termination of the -// entire connection. -type ConnectionError ErrCode - -func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) } - -// StreamError is an error that only affects one stream within an -// HTTP/2 connection. -type StreamError struct { - StreamID uint32 - Code ErrCode -} - -func (e StreamError) Error() string { - return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) -} - -// 6.9.1 The Flow Control Window -// "If a sender receives a WINDOW_UPDATE that causes a flow control -// window to exceed this maximum it MUST terminate either the stream -// or the connection, as appropriate. For streams, [...]; for the -// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code." -type goAwayFlowError struct{} - -func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } - -// connErrorReason wraps a ConnectionError with an informative error about why it occurs. - -// Errors of this type are only returned by the frame parser functions -// and converted into ConnectionError(ErrCodeProtocol). -type connError struct { - Code ErrCode - Reason string -} - -func (e connError) Error() string { - return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) -} - -type pseudoHeaderError string - -func (e pseudoHeaderError) Error() string { - return fmt.Sprintf("invalid pseudo-header %q", string(e)) -} - -type duplicatePseudoHeaderError string - -func (e duplicatePseudoHeaderError) Error() string { - return fmt.Sprintf("duplicate pseudo-header %q", string(e)) -} - -type headerFieldNameError string - -func (e headerFieldNameError) Error() string { - return fmt.Sprintf("invalid header field name %q", string(e)) -} - -type headerFieldValueError string - -func (e headerFieldValueError) Error() string { - return fmt.Sprintf("invalid header field value %q", string(e)) -} - -var ( - errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") - errPseudoAfterRegular = errors.New("pseudo header field after regular") -) diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/fixed_buffer.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/fixed_buffer.go deleted file mode 100644 index 47da0f0b..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/fixed_buffer.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" -) - -// fixedBuffer is an io.ReadWriter backed by a fixed size buffer. -// It never allocates, but moves old data as new data is written. -type fixedBuffer struct { - buf []byte - r, w int -} - -var ( - errReadEmpty = errors.New("read from empty fixedBuffer") - errWriteFull = errors.New("write on full fixedBuffer") -) - -// Read copies bytes from the buffer into p. -// It is an error to read when no data is available. -func (b *fixedBuffer) Read(p []byte) (n int, err error) { - if b.r == b.w { - return 0, errReadEmpty - } - n = copy(p, b.buf[b.r:b.w]) - b.r += n - if b.r == b.w { - b.r = 0 - b.w = 0 - } - return n, nil -} - -// Len returns the number of bytes of the unread portion of the buffer. -func (b *fixedBuffer) Len() int { - return b.w - b.r -} - -// Write copies bytes from p into the buffer. -// It is an error to write more data than the buffer can hold. -func (b *fixedBuffer) Write(p []byte) (n int, err error) { - // Slide existing data to beginning. - if b.r > 0 && len(p) > len(b.buf)-b.w { - copy(b.buf, b.buf[b.r:b.w]) - b.w -= b.r - b.r = 0 - } - - // Write new data. - n = copy(b.buf[b.w:], p) - b.w += n - if n < len(p) { - err = errWriteFull - } - return n, err -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/flow.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/flow.go deleted file mode 100644 index 957de254..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/flow.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Flow control - -package http2 - -// flow is the flow control window's size. -type flow struct { - // n is the number of DATA bytes we're allowed to send. - // A flow is kept both on a conn and a per-stream. - n int32 - - // conn points to the shared connection-level flow that is - // shared by all streams on that conn. It is nil for the flow - // that's on the conn directly. - conn *flow -} - -func (f *flow) setConnFlow(cf *flow) { f.conn = cf } - -func (f *flow) available() int32 { - n := f.n - if f.conn != nil && f.conn.n < n { - n = f.conn.n - } - return n -} - -func (f *flow) take(n int32) { - if n > f.available() { - panic("internal error: took too much") - } - f.n -= n - if f.conn != nil { - f.conn.n -= n - } -} - -// add adds n bytes (positive or negative) to the flow control window. -// It returns false if the sum would exceed 2^31-1. -func (f *flow) add(n int32) bool { - remain := (1<<31 - 1) - f.n - if n > remain { - return false - } - f.n += n - return true -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/frame.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/frame.go deleted file mode 100644 index 6943f933..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/frame.go +++ /dev/null @@ -1,1496 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "log" - "strings" - "sync" - - "golang.org/x/net/http2/hpack" -) - -const frameHeaderLen = 9 - -var padZeros = make([]byte, 255) // zeros for padding - -// A FrameType is a registered frame type as defined in -// http://http2.github.io/http2-spec/#rfc.section.11.2 -type FrameType uint8 - -const ( - FrameData FrameType = 0x0 - FrameHeaders FrameType = 0x1 - FramePriority FrameType = 0x2 - FrameRSTStream FrameType = 0x3 - FrameSettings FrameType = 0x4 - FramePushPromise FrameType = 0x5 - FramePing FrameType = 0x6 - FrameGoAway FrameType = 0x7 - FrameWindowUpdate FrameType = 0x8 - FrameContinuation FrameType = 0x9 -) - -var frameName = map[FrameType]string{ - FrameData: "DATA", - FrameHeaders: "HEADERS", - FramePriority: "PRIORITY", - FrameRSTStream: "RST_STREAM", - FrameSettings: "SETTINGS", - FramePushPromise: "PUSH_PROMISE", - FramePing: "PING", - FrameGoAway: "GOAWAY", - FrameWindowUpdate: "WINDOW_UPDATE", - FrameContinuation: "CONTINUATION", -} - -func (t FrameType) String() string { - if s, ok := frameName[t]; ok { - return s - } - return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) -} - -// Flags is a bitmask of HTTP/2 flags. -// The meaning of flags varies depending on the frame type. -type Flags uint8 - -// Has reports whether f contains all (0 or more) flags in v. -func (f Flags) Has(v Flags) bool { - return (f & v) == v -} - -// Frame-specific FrameHeader flag bits. -const ( - // Data Frame - FlagDataEndStream Flags = 0x1 - FlagDataPadded Flags = 0x8 - - // Headers Frame - FlagHeadersEndStream Flags = 0x1 - FlagHeadersEndHeaders Flags = 0x4 - FlagHeadersPadded Flags = 0x8 - FlagHeadersPriority Flags = 0x20 - - // Settings Frame - FlagSettingsAck Flags = 0x1 - - // Ping Frame - FlagPingAck Flags = 0x1 - - // Continuation Frame - FlagContinuationEndHeaders Flags = 0x4 - - FlagPushPromiseEndHeaders Flags = 0x4 - FlagPushPromisePadded Flags = 0x8 -) - -var flagName = map[FrameType]map[Flags]string{ - FrameData: { - FlagDataEndStream: "END_STREAM", - FlagDataPadded: "PADDED", - }, - FrameHeaders: { - FlagHeadersEndStream: "END_STREAM", - FlagHeadersEndHeaders: "END_HEADERS", - FlagHeadersPadded: "PADDED", - FlagHeadersPriority: "PRIORITY", - }, - FrameSettings: { - FlagSettingsAck: "ACK", - }, - FramePing: { - FlagPingAck: "ACK", - }, - FrameContinuation: { - FlagContinuationEndHeaders: "END_HEADERS", - }, - FramePushPromise: { - FlagPushPromiseEndHeaders: "END_HEADERS", - FlagPushPromisePadded: "PADDED", - }, -} - -// a frameParser parses a frame given its FrameHeader and payload -// bytes. The length of payload will always equal fh.Length (which -// might be 0). -type frameParser func(fh FrameHeader, payload []byte) (Frame, error) - -var frameParsers = map[FrameType]frameParser{ - FrameData: parseDataFrame, - FrameHeaders: parseHeadersFrame, - FramePriority: parsePriorityFrame, - FrameRSTStream: parseRSTStreamFrame, - FrameSettings: parseSettingsFrame, - FramePushPromise: parsePushPromise, - FramePing: parsePingFrame, - FrameGoAway: parseGoAwayFrame, - FrameWindowUpdate: parseWindowUpdateFrame, - FrameContinuation: parseContinuationFrame, -} - -func typeFrameParser(t FrameType) frameParser { - if f := frameParsers[t]; f != nil { - return f - } - return parseUnknownFrame -} - -// A FrameHeader is the 9 byte header of all HTTP/2 frames. -// -// See http://http2.github.io/http2-spec/#FrameHeader -type FrameHeader struct { - valid bool // caller can access []byte fields in the Frame - - // Type is the 1 byte frame type. There are ten standard frame - // types, but extension frame types may be written by WriteRawFrame - // and will be returned by ReadFrame (as UnknownFrame). - Type FrameType - - // Flags are the 1 byte of 8 potential bit flags per frame. - // They are specific to the frame type. - Flags Flags - - // Length is the length of the frame, not including the 9 byte header. - // The maximum size is one byte less than 16MB (uint24), but only - // frames up to 16KB are allowed without peer agreement. - Length uint32 - - // StreamID is which stream this frame is for. Certain frames - // are not stream-specific, in which case this field is 0. - StreamID uint32 -} - -// Header returns h. It exists so FrameHeaders can be embedded in other -// specific frame types and implement the Frame interface. -func (h FrameHeader) Header() FrameHeader { return h } - -func (h FrameHeader) String() string { - var buf bytes.Buffer - buf.WriteString("[FrameHeader ") - h.writeDebug(&buf) - buf.WriteByte(']') - return buf.String() -} - -func (h FrameHeader) writeDebug(buf *bytes.Buffer) { - buf.WriteString(h.Type.String()) - if h.Flags != 0 { - buf.WriteString(" flags=") - set := 0 - for i := uint8(0); i < 8; i++ { - if h.Flags&(1< 1 { - buf.WriteByte('|') - } - name := flagName[h.Type][Flags(1<>24), - byte(streamID>>16), - byte(streamID>>8), - byte(streamID)) -} - -func (f *Framer) endWrite() error { - // Now that we know the final size, fill in the FrameHeader in - // the space previously reserved for it. Abuse append. - length := len(f.wbuf) - frameHeaderLen - if length >= (1 << 24) { - return ErrFrameTooLarge - } - _ = append(f.wbuf[:0], - byte(length>>16), - byte(length>>8), - byte(length)) - if logFrameWrites { - f.logWrite() - } - - n, err := f.w.Write(f.wbuf) - if err == nil && n != len(f.wbuf) { - err = io.ErrShortWrite - } - return err -} - -func (f *Framer) logWrite() { - if f.debugFramer == nil { - f.debugFramerBuf = new(bytes.Buffer) - f.debugFramer = NewFramer(nil, f.debugFramerBuf) - f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below - // Let us read anything, even if we accidentally wrote it - // in the wrong order: - f.debugFramer.AllowIllegalReads = true - } - f.debugFramerBuf.Write(f.wbuf) - fr, err := f.debugFramer.ReadFrame() - if err != nil { - log.Printf("http2: Framer %p: failed to decode just-written frame", f) - return - } - log.Printf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) -} - -func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } -func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) } -func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) } -func (f *Framer) writeUint32(v uint32) { - f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) -} - -const ( - minMaxFrameSize = 1 << 14 - maxFrameSize = 1<<24 - 1 -) - -// NewFramer returns a Framer that writes frames to w and reads them from r. -func NewFramer(w io.Writer, r io.Reader) *Framer { - fr := &Framer{ - w: w, - r: r, - logReads: logFrameReads, - } - fr.getReadBuf = func(size uint32) []byte { - if cap(fr.readBuf) >= int(size) { - return fr.readBuf[:size] - } - fr.readBuf = make([]byte, size) - return fr.readBuf - } - fr.SetMaxReadFrameSize(maxFrameSize) - return fr -} - -// SetMaxReadFrameSize sets the maximum size of a frame -// that will be read by a subsequent call to ReadFrame. -// It is the caller's responsibility to advertise this -// limit with a SETTINGS frame. -func (fr *Framer) SetMaxReadFrameSize(v uint32) { - if v > maxFrameSize { - v = maxFrameSize - } - fr.maxReadSize = v -} - -// ErrorDetail returns a more detailed error of the last error -// returned by Framer.ReadFrame. For instance, if ReadFrame -// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail -// will say exactly what was invalid. ErrorDetail is not guaranteed -// to return a non-nil value and like the rest of the http2 package, -// its return value is not protected by an API compatibility promise. -// ErrorDetail is reset after the next call to ReadFrame. -func (fr *Framer) ErrorDetail() error { - return fr.errDetail -} - -// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer -// sends a frame that is larger than declared with SetMaxReadFrameSize. -var ErrFrameTooLarge = errors.New("http2: frame too large") - -// terminalReadFrameError reports whether err is an unrecoverable -// error from ReadFrame and no other frames should be read. -func terminalReadFrameError(err error) bool { - if _, ok := err.(StreamError); ok { - return false - } - return err != nil -} - -// ReadFrame reads a single frame. The returned Frame is only valid -// until the next call to ReadFrame. -// -// If the frame is larger than previously set with SetMaxReadFrameSize, the -// returned error is ErrFrameTooLarge. Other errors may be of type -// ConnectionError, StreamError, or anything else from from the underlying -// reader. -func (fr *Framer) ReadFrame() (Frame, error) { - fr.errDetail = nil - if fr.lastFrame != nil { - fr.lastFrame.invalidate() - } - fh, err := readFrameHeader(fr.headerBuf[:], fr.r) - if err != nil { - return nil, err - } - if fh.Length > fr.maxReadSize { - return nil, ErrFrameTooLarge - } - payload := fr.getReadBuf(fh.Length) - if _, err := io.ReadFull(fr.r, payload); err != nil { - return nil, err - } - f, err := typeFrameParser(fh.Type)(fh, payload) - if err != nil { - if ce, ok := err.(connError); ok { - return nil, fr.connError(ce.Code, ce.Reason) - } - return nil, err - } - if err := fr.checkFrameOrder(f); err != nil { - return nil, err - } - if fr.logReads { - log.Printf("http2: Framer %p: read %v", fr, summarizeFrame(f)) - } - if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { - return fr.readMetaFrame(f.(*HeadersFrame)) - } - return f, nil -} - -// connError returns ConnectionError(code) but first -// stashes away a public reason to the caller can optionally relay it -// to the peer before hanging up on them. This might help others debug -// their implementations. -func (fr *Framer) connError(code ErrCode, reason string) error { - fr.errDetail = errors.New(reason) - return ConnectionError(code) -} - -// checkFrameOrder reports an error if f is an invalid frame to return -// next from ReadFrame. Mostly it checks whether HEADERS and -// CONTINUATION frames are contiguous. -func (fr *Framer) checkFrameOrder(f Frame) error { - last := fr.lastFrame - fr.lastFrame = f - if fr.AllowIllegalReads { - return nil - } - - fh := f.Header() - if fr.lastHeaderStream != 0 { - if fh.Type != FrameContinuation { - return fr.connError(ErrCodeProtocol, - fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", - fh.Type, fh.StreamID, - last.Header().Type, fr.lastHeaderStream)) - } - if fh.StreamID != fr.lastHeaderStream { - return fr.connError(ErrCodeProtocol, - fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d", - fh.StreamID, fr.lastHeaderStream)) - } - } else if fh.Type == FrameContinuation { - return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID)) - } - - switch fh.Type { - case FrameHeaders, FrameContinuation: - if fh.Flags.Has(FlagHeadersEndHeaders) { - fr.lastHeaderStream = 0 - } else { - fr.lastHeaderStream = fh.StreamID - } - } - - return nil -} - -// A DataFrame conveys arbitrary, variable-length sequences of octets -// associated with a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.1 -type DataFrame struct { - FrameHeader - data []byte -} - -func (f *DataFrame) StreamEnded() bool { - return f.FrameHeader.Flags.Has(FlagDataEndStream) -} - -// Data returns the frame's data octets, not including any padding -// size byte or padding suffix bytes. -// The caller must not retain the returned memory past the next -// call to ReadFrame. -func (f *DataFrame) Data() []byte { - f.checkValid() - return f.data -} - -func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { - if fh.StreamID == 0 { - // DATA frames MUST be associated with a stream. If a - // DATA frame is received whose stream identifier - // field is 0x0, the recipient MUST respond with a - // connection error (Section 5.4.1) of type - // PROTOCOL_ERROR. - return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} - } - f := &DataFrame{ - FrameHeader: fh, - } - var padSize byte - if fh.Flags.Has(FlagDataPadded) { - var err error - payload, padSize, err = readByte(payload) - if err != nil { - return nil, err - } - } - if int(padSize) > len(payload) { - // If the length of the padding is greater than the - // length of the frame payload, the recipient MUST - // treat this as a connection error. - // Filed: https://github.com/http2/http2-spec/issues/610 - return nil, connError{ErrCodeProtocol, "pad size larger than data payload"} - } - f.data = payload[:len(payload)-int(padSize)] - return f, nil -} - -var errStreamID = errors.New("invalid streamid") - -func validStreamID(streamID uint32) bool { - return streamID != 0 && streamID&(1<<31) == 0 -} - -// WriteData writes a DATA frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { - // TODO: ignoring padding for now. will add when somebody cares. - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - var flags Flags - if endStream { - flags |= FlagDataEndStream - } - f.startWrite(FrameData, flags, streamID) - f.wbuf = append(f.wbuf, data...) - return f.endWrite() -} - -// A SettingsFrame conveys configuration parameters that affect how -// endpoints communicate, such as preferences and constraints on peer -// behavior. -// -// See http://http2.github.io/http2-spec/#SETTINGS -type SettingsFrame struct { - FrameHeader - p []byte -} - -func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) { - if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { - // When this (ACK 0x1) bit is set, the payload of the - // SETTINGS frame MUST be empty. Receipt of a - // SETTINGS frame with the ACK flag set and a length - // field value other than 0 MUST be treated as a - // connection error (Section 5.4.1) of type - // FRAME_SIZE_ERROR. - return nil, ConnectionError(ErrCodeFrameSize) - } - if fh.StreamID != 0 { - // SETTINGS frames always apply to a connection, - // never a single stream. The stream identifier for a - // SETTINGS frame MUST be zero (0x0). If an endpoint - // receives a SETTINGS frame whose stream identifier - // field is anything other than 0x0, the endpoint MUST - // respond with a connection error (Section 5.4.1) of - // type PROTOCOL_ERROR. - return nil, ConnectionError(ErrCodeProtocol) - } - if len(p)%6 != 0 { - // Expecting even number of 6 byte settings. - return nil, ConnectionError(ErrCodeFrameSize) - } - f := &SettingsFrame{FrameHeader: fh, p: p} - if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 { - // Values above the maximum flow control window size of 2^31 - 1 MUST - // be treated as a connection error (Section 5.4.1) of type - // FLOW_CONTROL_ERROR. - return nil, ConnectionError(ErrCodeFlowControl) - } - return f, nil -} - -func (f *SettingsFrame) IsAck() bool { - return f.FrameHeader.Flags.Has(FlagSettingsAck) -} - -func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) { - f.checkValid() - buf := f.p - for len(buf) > 0 { - settingID := SettingID(binary.BigEndian.Uint16(buf[:2])) - if settingID == s { - return binary.BigEndian.Uint32(buf[2:6]), true - } - buf = buf[6:] - } - return 0, false -} - -// ForeachSetting runs fn for each setting. -// It stops and returns the first error. -func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { - f.checkValid() - buf := f.p - for len(buf) > 0 { - if err := fn(Setting{ - SettingID(binary.BigEndian.Uint16(buf[:2])), - binary.BigEndian.Uint32(buf[2:6]), - }); err != nil { - return err - } - buf = buf[6:] - } - return nil -} - -// WriteSettings writes a SETTINGS frame with zero or more settings -// specified and the ACK bit not set. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteSettings(settings ...Setting) error { - f.startWrite(FrameSettings, 0, 0) - for _, s := range settings { - f.writeUint16(uint16(s.ID)) - f.writeUint32(s.Val) - } - return f.endWrite() -} - -// WriteSettings writes an empty SETTINGS frame with the ACK bit set. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteSettingsAck() error { - f.startWrite(FrameSettings, FlagSettingsAck, 0) - return f.endWrite() -} - -// A PingFrame is a mechanism for measuring a minimal round trip time -// from the sender, as well as determining whether an idle connection -// is still functional. -// See http://http2.github.io/http2-spec/#rfc.section.6.7 -type PingFrame struct { - FrameHeader - Data [8]byte -} - -func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } - -func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) { - if len(payload) != 8 { - return nil, ConnectionError(ErrCodeFrameSize) - } - if fh.StreamID != 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - f := &PingFrame{FrameHeader: fh} - copy(f.Data[:], payload) - return f, nil -} - -func (f *Framer) WritePing(ack bool, data [8]byte) error { - var flags Flags - if ack { - flags = FlagPingAck - } - f.startWrite(FramePing, flags, 0) - f.writeBytes(data[:]) - return f.endWrite() -} - -// A GoAwayFrame informs the remote peer to stop creating streams on this connection. -// See http://http2.github.io/http2-spec/#rfc.section.6.8 -type GoAwayFrame struct { - FrameHeader - LastStreamID uint32 - ErrCode ErrCode - debugData []byte -} - -// DebugData returns any debug data in the GOAWAY frame. Its contents -// are not defined. -// The caller must not retain the returned memory past the next -// call to ReadFrame. -func (f *GoAwayFrame) DebugData() []byte { - f.checkValid() - return f.debugData -} - -func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) { - if fh.StreamID != 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - if len(p) < 8 { - return nil, ConnectionError(ErrCodeFrameSize) - } - return &GoAwayFrame{ - FrameHeader: fh, - LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1), - ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])), - debugData: p[8:], - }, nil -} - -func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error { - f.startWrite(FrameGoAway, 0, 0) - f.writeUint32(maxStreamID & (1<<31 - 1)) - f.writeUint32(uint32(code)) - f.writeBytes(debugData) - return f.endWrite() -} - -// An UnknownFrame is the frame type returned when the frame type is unknown -// or no specific frame type parser exists. -type UnknownFrame struct { - FrameHeader - p []byte -} - -// Payload returns the frame's payload (after the header). It is not -// valid to call this method after a subsequent call to -// Framer.ReadFrame, nor is it valid to retain the returned slice. -// The memory is owned by the Framer and is invalidated when the next -// frame is read. -func (f *UnknownFrame) Payload() []byte { - f.checkValid() - return f.p -} - -func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) { - return &UnknownFrame{fh, p}, nil -} - -// A WindowUpdateFrame is used to implement flow control. -// See http://http2.github.io/http2-spec/#rfc.section.6.9 -type WindowUpdateFrame struct { - FrameHeader - Increment uint32 // never read with high bit set -} - -func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) { - if len(p) != 4 { - return nil, ConnectionError(ErrCodeFrameSize) - } - inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit - if inc == 0 { - // A receiver MUST treat the receipt of a - // WINDOW_UPDATE frame with an flow control window - // increment of 0 as a stream error (Section 5.4.2) of - // type PROTOCOL_ERROR; errors on the connection flow - // control window MUST be treated as a connection - // error (Section 5.4.1). - if fh.StreamID == 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - return nil, StreamError{fh.StreamID, ErrCodeProtocol} - } - return &WindowUpdateFrame{ - FrameHeader: fh, - Increment: inc, - }, nil -} - -// WriteWindowUpdate writes a WINDOW_UPDATE frame. -// The increment value must be between 1 and 2,147,483,647, inclusive. -// If the Stream ID is zero, the window update applies to the -// connection as a whole. -func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error { - // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets." - if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites { - return errors.New("illegal window increment value") - } - f.startWrite(FrameWindowUpdate, 0, streamID) - f.writeUint32(incr) - return f.endWrite() -} - -// A HeadersFrame is used to open a stream and additionally carries a -// header block fragment. -type HeadersFrame struct { - FrameHeader - - // Priority is set if FlagHeadersPriority is set in the FrameHeader. - Priority PriorityParam - - headerFragBuf []byte // not owned -} - -func (f *HeadersFrame) HeaderBlockFragment() []byte { - f.checkValid() - return f.headerFragBuf -} - -func (f *HeadersFrame) HeadersEnded() bool { - return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders) -} - -func (f *HeadersFrame) StreamEnded() bool { - return f.FrameHeader.Flags.Has(FlagHeadersEndStream) -} - -func (f *HeadersFrame) HasPriority() bool { - return f.FrameHeader.Flags.Has(FlagHeadersPriority) -} - -func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) { - hf := &HeadersFrame{ - FrameHeader: fh, - } - if fh.StreamID == 0 { - // HEADERS frames MUST be associated with a stream. If a HEADERS frame - // is received whose stream identifier field is 0x0, the recipient MUST - // respond with a connection error (Section 5.4.1) of type - // PROTOCOL_ERROR. - return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"} - } - var padLength uint8 - if fh.Flags.Has(FlagHeadersPadded) { - if p, padLength, err = readByte(p); err != nil { - return - } - } - if fh.Flags.Has(FlagHeadersPriority) { - var v uint32 - p, v, err = readUint32(p) - if err != nil { - return nil, err - } - hf.Priority.StreamDep = v & 0x7fffffff - hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set - p, hf.Priority.Weight, err = readByte(p) - if err != nil { - return nil, err - } - } - if len(p)-int(padLength) <= 0 { - return nil, StreamError{fh.StreamID, ErrCodeProtocol} - } - hf.headerFragBuf = p[:len(p)-int(padLength)] - return hf, nil -} - -// HeadersFrameParam are the parameters for writing a HEADERS frame. -type HeadersFrameParam struct { - // StreamID is the required Stream ID to initiate. - StreamID uint32 - // BlockFragment is part (or all) of a Header Block. - BlockFragment []byte - - // EndStream indicates that the header block is the last that - // the endpoint will send for the identified stream. Setting - // this flag causes the stream to enter one of "half closed" - // states. - EndStream bool - - // EndHeaders indicates that this frame contains an entire - // header block and is not followed by any - // CONTINUATION frames. - EndHeaders bool - - // PadLength is the optional number of bytes of zeros to add - // to this frame. - PadLength uint8 - - // Priority, if non-zero, includes stream priority information - // in the HEADER frame. - Priority PriorityParam -} - -// WriteHeaders writes a single HEADERS frame. -// -// This is a low-level header writing method. Encoding headers and -// splitting them into any necessary CONTINUATION frames is handled -// elsewhere. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteHeaders(p HeadersFrameParam) error { - if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { - return errStreamID - } - var flags Flags - if p.PadLength != 0 { - flags |= FlagHeadersPadded - } - if p.EndStream { - flags |= FlagHeadersEndStream - } - if p.EndHeaders { - flags |= FlagHeadersEndHeaders - } - if !p.Priority.IsZero() { - flags |= FlagHeadersPriority - } - f.startWrite(FrameHeaders, flags, p.StreamID) - if p.PadLength != 0 { - f.writeByte(p.PadLength) - } - if !p.Priority.IsZero() { - v := p.Priority.StreamDep - if !validStreamID(v) && !f.AllowIllegalWrites { - return errors.New("invalid dependent stream id") - } - if p.Priority.Exclusive { - v |= 1 << 31 - } - f.writeUint32(v) - f.writeByte(p.Priority.Weight) - } - f.wbuf = append(f.wbuf, p.BlockFragment...) - f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) - return f.endWrite() -} - -// A PriorityFrame specifies the sender-advised priority of a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.3 -type PriorityFrame struct { - FrameHeader - PriorityParam -} - -// PriorityParam are the stream prioritzation parameters. -type PriorityParam struct { - // StreamDep is a 31-bit stream identifier for the - // stream that this stream depends on. Zero means no - // dependency. - StreamDep uint32 - - // Exclusive is whether the dependency is exclusive. - Exclusive bool - - // Weight is the stream's zero-indexed weight. It should be - // set together with StreamDep, or neither should be set. Per - // the spec, "Add one to the value to obtain a weight between - // 1 and 256." - Weight uint8 -} - -func (p PriorityParam) IsZero() bool { - return p == PriorityParam{} -} - -func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) { - if fh.StreamID == 0 { - return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} - } - if len(payload) != 5 { - return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} - } - v := binary.BigEndian.Uint32(payload[:4]) - streamID := v & 0x7fffffff // mask off high bit - return &PriorityFrame{ - FrameHeader: fh, - PriorityParam: PriorityParam{ - Weight: payload[4], - StreamDep: streamID, - Exclusive: streamID != v, // was high bit set? - }, - }, nil -} - -// WritePriority writes a PRIORITY frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - f.startWrite(FramePriority, 0, streamID) - v := p.StreamDep - if p.Exclusive { - v |= 1 << 31 - } - f.writeUint32(v) - f.writeByte(p.Weight) - return f.endWrite() -} - -// A RSTStreamFrame allows for abnormal termination of a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.4 -type RSTStreamFrame struct { - FrameHeader - ErrCode ErrCode -} - -func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) { - if len(p) != 4 { - return nil, ConnectionError(ErrCodeFrameSize) - } - if fh.StreamID == 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil -} - -// WriteRSTStream writes a RST_STREAM frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - f.startWrite(FrameRSTStream, 0, streamID) - f.writeUint32(uint32(code)) - return f.endWrite() -} - -// A ContinuationFrame is used to continue a sequence of header block fragments. -// See http://http2.github.io/http2-spec/#rfc.section.6.10 -type ContinuationFrame struct { - FrameHeader - headerFragBuf []byte -} - -func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) { - if fh.StreamID == 0 { - return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} - } - return &ContinuationFrame{fh, p}, nil -} - -func (f *ContinuationFrame) HeaderBlockFragment() []byte { - f.checkValid() - return f.headerFragBuf -} - -func (f *ContinuationFrame) HeadersEnded() bool { - return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders) -} - -// WriteContinuation writes a CONTINUATION frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - var flags Flags - if endHeaders { - flags |= FlagContinuationEndHeaders - } - f.startWrite(FrameContinuation, flags, streamID) - f.wbuf = append(f.wbuf, headerBlockFragment...) - return f.endWrite() -} - -// A PushPromiseFrame is used to initiate a server stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.6 -type PushPromiseFrame struct { - FrameHeader - PromiseID uint32 - headerFragBuf []byte // not owned -} - -func (f *PushPromiseFrame) HeaderBlockFragment() []byte { - f.checkValid() - return f.headerFragBuf -} - -func (f *PushPromiseFrame) HeadersEnded() bool { - return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) -} - -func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) { - pp := &PushPromiseFrame{ - FrameHeader: fh, - } - if pp.StreamID == 0 { - // PUSH_PROMISE frames MUST be associated with an existing, - // peer-initiated stream. The stream identifier of a - // PUSH_PROMISE frame indicates the stream it is associated - // with. If the stream identifier field specifies the value - // 0x0, a recipient MUST respond with a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - return nil, ConnectionError(ErrCodeProtocol) - } - // The PUSH_PROMISE frame includes optional padding. - // Padding fields and flags are identical to those defined for DATA frames - var padLength uint8 - if fh.Flags.Has(FlagPushPromisePadded) { - if p, padLength, err = readByte(p); err != nil { - return - } - } - - p, pp.PromiseID, err = readUint32(p) - if err != nil { - return - } - pp.PromiseID = pp.PromiseID & (1<<31 - 1) - - if int(padLength) > len(p) { - // like the DATA frame, error out if padding is longer than the body. - return nil, ConnectionError(ErrCodeProtocol) - } - pp.headerFragBuf = p[:len(p)-int(padLength)] - return pp, nil -} - -// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame. -type PushPromiseParam struct { - // StreamID is the required Stream ID to initiate. - StreamID uint32 - - // PromiseID is the required Stream ID which this - // Push Promises - PromiseID uint32 - - // BlockFragment is part (or all) of a Header Block. - BlockFragment []byte - - // EndHeaders indicates that this frame contains an entire - // header block and is not followed by any - // CONTINUATION frames. - EndHeaders bool - - // PadLength is the optional number of bytes of zeros to add - // to this frame. - PadLength uint8 -} - -// WritePushPromise writes a single PushPromise Frame. -// -// As with Header Frames, This is the low level call for writing -// individual frames. Continuation frames are handled elsewhere. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WritePushPromise(p PushPromiseParam) error { - if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { - return errStreamID - } - var flags Flags - if p.PadLength != 0 { - flags |= FlagPushPromisePadded - } - if p.EndHeaders { - flags |= FlagPushPromiseEndHeaders - } - f.startWrite(FramePushPromise, flags, p.StreamID) - if p.PadLength != 0 { - f.writeByte(p.PadLength) - } - if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites { - return errStreamID - } - f.writeUint32(p.PromiseID) - f.wbuf = append(f.wbuf, p.BlockFragment...) - f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) - return f.endWrite() -} - -// WriteRawFrame writes a raw frame. This can be used to write -// extension frames unknown to this package. -func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error { - f.startWrite(t, flags, streamID) - f.writeBytes(payload) - return f.endWrite() -} - -func readByte(p []byte) (remain []byte, b byte, err error) { - if len(p) == 0 { - return nil, 0, io.ErrUnexpectedEOF - } - return p[1:], p[0], nil -} - -func readUint32(p []byte) (remain []byte, v uint32, err error) { - if len(p) < 4 { - return nil, 0, io.ErrUnexpectedEOF - } - return p[4:], binary.BigEndian.Uint32(p[:4]), nil -} - -type streamEnder interface { - StreamEnded() bool -} - -type headersEnder interface { - HeadersEnded() bool -} - -type headersOrContinuation interface { - headersEnder - HeaderBlockFragment() []byte -} - -// A MetaHeadersFrame is the representation of one HEADERS frame and -// zero or more contiguous CONTINUATION frames and the decoding of -// their HPACK-encoded contents. -// -// This type of frame does not appear on the wire and is only returned -// by the Framer when Framer.ReadMetaHeaders is set. -type MetaHeadersFrame struct { - *HeadersFrame - - // Fields are the fields contained in the HEADERS and - // CONTINUATION frames. The underlying slice is owned by the - // Framer and must not be retained after the next call to - // ReadFrame. - // - // Fields are guaranteed to be in the correct http2 order and - // not have unknown pseudo header fields or invalid header - // field names or values. Required pseudo header fields may be - // missing, however. Use the MetaHeadersFrame.Pseudo accessor - // method access pseudo headers. - Fields []hpack.HeaderField - - // Truncated is whether the max header list size limit was hit - // and Fields is incomplete. The hpack decoder state is still - // valid, however. - Truncated bool -} - -// PseudoValue returns the given pseudo header field's value. -// The provided pseudo field should not contain the leading colon. -func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string { - for _, hf := range mh.Fields { - if !hf.IsPseudo() { - return "" - } - if hf.Name[1:] == pseudo { - return hf.Value - } - } - return "" -} - -// RegularFields returns the regular (non-pseudo) header fields of mh. -// The caller does not own the returned slice. -func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField { - for i, hf := range mh.Fields { - if !hf.IsPseudo() { - return mh.Fields[i:] - } - } - return nil -} - -// PseudoFields returns the pseudo header fields of mh. -// The caller does not own the returned slice. -func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField { - for i, hf := range mh.Fields { - if !hf.IsPseudo() { - return mh.Fields[:i] - } - } - return mh.Fields -} - -func (mh *MetaHeadersFrame) checkPseudos() error { - var isRequest, isResponse bool - pf := mh.PseudoFields() - for i, hf := range pf { - switch hf.Name { - case ":method", ":path", ":scheme", ":authority": - isRequest = true - case ":status": - isResponse = true - default: - return pseudoHeaderError(hf.Name) - } - // Check for duplicates. - // This would be a bad algorithm, but N is 4. - // And this doesn't allocate. - for _, hf2 := range pf[:i] { - if hf.Name == hf2.Name { - return duplicatePseudoHeaderError(hf.Name) - } - } - } - if isRequest && isResponse { - return errMixPseudoHeaderTypes - } - return nil -} - -func (fr *Framer) maxHeaderStringLen() int { - v := fr.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) - } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 -} - -// readMetaFrame returns 0 or more CONTINUATION frames from fr and -// merge them into into the provided hf and returns a MetaHeadersFrame -// with the decoded hpack values. -func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { - if fr.AllowIllegalReads { - return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") - } - mh := &MetaHeadersFrame{ - HeadersFrame: hf, - } - var remainSize = fr.maxHeaderListSize() - var sawRegular bool - - var invalid error // pseudo header field errors - hdec := fr.ReadMetaHeaders - hdec.SetEmitEnabled(true) - hdec.SetMaxStringLength(fr.maxHeaderStringLen()) - hdec.SetEmitFunc(func(hf hpack.HeaderField) { - if !validHeaderFieldValue(hf.Value) { - invalid = headerFieldValueError(hf.Value) - } - isPseudo := strings.HasPrefix(hf.Name, ":") - if isPseudo { - if sawRegular { - invalid = errPseudoAfterRegular - } - } else { - sawRegular = true - if !validHeaderFieldName(hf.Name) { - invalid = headerFieldNameError(hf.Name) - } - } - - if invalid != nil { - hdec.SetEmitEnabled(false) - return - } - - size := hf.Size() - if size > remainSize { - hdec.SetEmitEnabled(false) - mh.Truncated = true - return - } - remainSize -= size - - mh.Fields = append(mh.Fields, hf) - }) - // Lose reference to MetaHeadersFrame: - defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) - - var hc headersOrContinuation = hf - for { - frag := hc.HeaderBlockFragment() - if _, err := hdec.Write(frag); err != nil { - return nil, ConnectionError(ErrCodeCompression) - } - - if hc.HeadersEnded() { - break - } - if f, err := fr.ReadFrame(); err != nil { - return nil, err - } else { - hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder - } - } - - mh.HeadersFrame.headerFragBuf = nil - mh.HeadersFrame.invalidate() - - if err := hdec.Close(); err != nil { - return nil, ConnectionError(ErrCodeCompression) - } - if invalid != nil { - fr.errDetail = invalid - return nil, StreamError{mh.StreamID, ErrCodeProtocol} - } - if err := mh.checkPseudos(); err != nil { - fr.errDetail = err - return nil, StreamError{mh.StreamID, ErrCodeProtocol} - } - return mh, nil -} - -func summarizeFrame(f Frame) string { - var buf bytes.Buffer - f.Header().writeDebug(&buf) - switch f := f.(type) { - case *SettingsFrame: - n := 0 - f.ForeachSetting(func(s Setting) error { - n++ - if n == 1 { - buf.WriteString(", settings:") - } - fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val) - return nil - }) - if n > 0 { - buf.Truncate(buf.Len() - 1) // remove trailing comma - } - case *DataFrame: - data := f.Data() - const max = 256 - if len(data) > max { - data = data[:max] - } - fmt.Fprintf(&buf, " data=%q", data) - if len(f.Data()) > max { - fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max) - } - case *WindowUpdateFrame: - if f.StreamID == 0 { - buf.WriteString(" (conn)") - } - fmt.Fprintf(&buf, " incr=%v", f.Increment) - case *PingFrame: - fmt.Fprintf(&buf, " ping=%q", f.Data[:]) - case *GoAwayFrame: - fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q", - f.LastStreamID, f.ErrCode, f.debugData) - case *RSTStreamFrame: - fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode) - } - return buf.String() -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/go15.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/go15.go deleted file mode 100644 index f0a56241..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/go15.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.5 - -package http2 - -import "net/http" - -func requestCancel(req *http.Request) <-chan struct{} { return req.Cancel } diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/gotrack.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/gotrack.go deleted file mode 100644 index 9933c9f8..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/gotrack.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Defensive debug-only utility to track that functions run on the -// goroutine that they're supposed to. - -package http2 - -import ( - "bytes" - "errors" - "fmt" - "os" - "runtime" - "strconv" - "sync" -) - -var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" - -type goroutineLock uint64 - -func newGoroutineLock() goroutineLock { - if !DebugGoroutines { - return 0 - } - return goroutineLock(curGoroutineID()) -} - -func (g goroutineLock) check() { - if !DebugGoroutines { - return - } - if curGoroutineID() != uint64(g) { - panic("running on the wrong goroutine") - } -} - -func (g goroutineLock) checkNotOn() { - if !DebugGoroutines { - return - } - if curGoroutineID() == uint64(g) { - panic("running on the wrong goroutine") - } -} - -var goroutineSpace = []byte("goroutine ") - -func curGoroutineID() uint64 { - bp := littleBuf.Get().(*[]byte) - defer littleBuf.Put(bp) - b := *bp - b = b[:runtime.Stack(b, false)] - // Parse the 4707 out of "goroutine 4707 [" - b = bytes.TrimPrefix(b, goroutineSpace) - i := bytes.IndexByte(b, ' ') - if i < 0 { - panic(fmt.Sprintf("No space found in %q", b)) - } - b = b[:i] - n, err := parseUintBytes(b, 10, 64) - if err != nil { - panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) - } - return n -} - -var littleBuf = sync.Pool{ - New: func() interface{} { - buf := make([]byte, 64) - return &buf - }, -} - -// parseUintBytes is like strconv.ParseUint, but using a []byte. -func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { - var cutoff, maxVal uint64 - - if bitSize == 0 { - bitSize = int(strconv.IntSize) - } - - s0 := s - switch { - case len(s) < 1: - err = strconv.ErrSyntax - goto Error - - case 2 <= base && base <= 36: - // valid base; nothing to do - - case base == 0: - // Look for octal, hex prefix. - switch { - case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): - base = 16 - s = s[2:] - if len(s) < 1 { - err = strconv.ErrSyntax - goto Error - } - case s[0] == '0': - base = 8 - default: - base = 10 - } - - default: - err = errors.New("invalid base " + strconv.Itoa(base)) - goto Error - } - - n = 0 - cutoff = cutoff64(base) - maxVal = 1<= base { - n = 0 - err = strconv.ErrSyntax - goto Error - } - - if n >= cutoff { - // n*base overflows - n = 1<<64 - 1 - err = strconv.ErrRange - goto Error - } - n *= uint64(base) - - n1 := n + uint64(v) - if n1 < n || n1 > maxVal { - // n+v overflows - n = 1<<64 - 1 - err = strconv.ErrRange - goto Error - } - n = n1 - } - - return n, nil - -Error: - return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} -} - -// Return the first number n such that n*base >= 1<<64. -func cutoff64(base int) uint64 { - if base < 2 { - return 0 - } - return (1<<64-1)/uint64(base) + 1 -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/headermap.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/headermap.go deleted file mode 100644 index c2805f6a..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/headermap.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "net/http" - "strings" -) - -var ( - commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case - commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case -) - -func init() { - for _, v := range []string{ - "accept", - "accept-charset", - "accept-encoding", - "accept-language", - "accept-ranges", - "age", - "access-control-allow-origin", - "allow", - "authorization", - "cache-control", - "content-disposition", - "content-encoding", - "content-language", - "content-length", - "content-location", - "content-range", - "content-type", - "cookie", - "date", - "etag", - "expect", - "expires", - "from", - "host", - "if-match", - "if-modified-since", - "if-none-match", - "if-unmodified-since", - "last-modified", - "link", - "location", - "max-forwards", - "proxy-authenticate", - "proxy-authorization", - "range", - "referer", - "refresh", - "retry-after", - "server", - "set-cookie", - "strict-transport-security", - "trailer", - "transfer-encoding", - "user-agent", - "vary", - "via", - "www-authenticate", - } { - chk := http.CanonicalHeaderKey(v) - commonLowerHeader[chk] = v - commonCanonHeader[v] = chk - } -} - -func lowerHeader(v string) string { - if s, ok := commonLowerHeader[v]; ok { - return s - } - return strings.ToLower(v) -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/encode.go deleted file mode 100644 index f9bb0339..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/encode.go +++ /dev/null @@ -1,251 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -import ( - "io" -) - -const ( - uint32Max = ^uint32(0) - initialHeaderTableSize = 4096 -) - -type Encoder struct { - dynTab dynamicTable - // minSize is the minimum table size set by - // SetMaxDynamicTableSize after the previous Header Table Size - // Update. - minSize uint32 - // maxSizeLimit is the maximum table size this encoder - // supports. This will protect the encoder from too large - // size. - maxSizeLimit uint32 - // tableSizeUpdate indicates whether "Header Table Size - // Update" is required. - tableSizeUpdate bool - w io.Writer - buf []byte -} - -// NewEncoder returns a new Encoder which performs HPACK encoding. An -// encoded data is written to w. -func NewEncoder(w io.Writer) *Encoder { - e := &Encoder{ - minSize: uint32Max, - maxSizeLimit: initialHeaderTableSize, - tableSizeUpdate: false, - w: w, - } - e.dynTab.setMaxSize(initialHeaderTableSize) - return e -} - -// WriteField encodes f into a single Write to e's underlying Writer. -// This function may also produce bytes for "Header Table Size Update" -// if necessary. If produced, it is done before encoding f. -func (e *Encoder) WriteField(f HeaderField) error { - e.buf = e.buf[:0] - - if e.tableSizeUpdate { - e.tableSizeUpdate = false - if e.minSize < e.dynTab.maxSize { - e.buf = appendTableSize(e.buf, e.minSize) - } - e.minSize = uint32Max - e.buf = appendTableSize(e.buf, e.dynTab.maxSize) - } - - idx, nameValueMatch := e.searchTable(f) - if nameValueMatch { - e.buf = appendIndexed(e.buf, idx) - } else { - indexing := e.shouldIndex(f) - if indexing { - e.dynTab.add(f) - } - - if idx == 0 { - e.buf = appendNewName(e.buf, f, indexing) - } else { - e.buf = appendIndexedName(e.buf, f, idx, indexing) - } - } - n, err := e.w.Write(e.buf) - if err == nil && n != len(e.buf) { - err = io.ErrShortWrite - } - return err -} - -// searchTable searches f in both stable and dynamic header tables. -// The static header table is searched first. Only when there is no -// exact match for both name and value, the dynamic header table is -// then searched. If there is no match, i is 0. If both name and value -// match, i is the matched index and nameValueMatch becomes true. If -// only name matches, i points to that index and nameValueMatch -// becomes false. -func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { - for idx, hf := range staticTable { - if !constantTimeStringCompare(hf.Name, f.Name) { - continue - } - if i == 0 { - i = uint64(idx + 1) - } - if f.Sensitive { - continue - } - if !constantTimeStringCompare(hf.Value, f.Value) { - continue - } - i = uint64(idx + 1) - nameValueMatch = true - return - } - - j, nameValueMatch := e.dynTab.search(f) - if nameValueMatch || (i == 0 && j != 0) { - i = j + uint64(len(staticTable)) - } - return -} - -// SetMaxDynamicTableSize changes the dynamic header table size to v. -// The actual size is bounded by the value passed to -// SetMaxDynamicTableSizeLimit. -func (e *Encoder) SetMaxDynamicTableSize(v uint32) { - if v > e.maxSizeLimit { - v = e.maxSizeLimit - } - if v < e.minSize { - e.minSize = v - } - e.tableSizeUpdate = true - e.dynTab.setMaxSize(v) -} - -// SetMaxDynamicTableSizeLimit changes the maximum value that can be -// specified in SetMaxDynamicTableSize to v. By default, it is set to -// 4096, which is the same size of the default dynamic header table -// size described in HPACK specification. If the current maximum -// dynamic header table size is strictly greater than v, "Header Table -// Size Update" will be done in the next WriteField call and the -// maximum dynamic header table size is truncated to v. -func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { - e.maxSizeLimit = v - if e.dynTab.maxSize > v { - e.tableSizeUpdate = true - e.dynTab.setMaxSize(v) - } -} - -// shouldIndex reports whether f should be indexed. -func (e *Encoder) shouldIndex(f HeaderField) bool { - return !f.Sensitive && f.Size() <= e.dynTab.maxSize -} - -// appendIndexed appends index i, as encoded in "Indexed Header Field" -// representation, to dst and returns the extended buffer. -func appendIndexed(dst []byte, i uint64) []byte { - first := len(dst) - dst = appendVarInt(dst, 7, i) - dst[first] |= 0x80 - return dst -} - -// appendNewName appends f, as encoded in one of "Literal Header field -// - New Name" representation variants, to dst and returns the -// extended buffer. -// -// If f.Sensitive is true, "Never Indexed" representation is used. If -// f.Sensitive is false and indexing is true, "Inremental Indexing" -// representation is used. -func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { - dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) - dst = appendHpackString(dst, f.Name) - return appendHpackString(dst, f.Value) -} - -// appendIndexedName appends f and index i referring indexed name -// entry, as encoded in one of "Literal Header field - Indexed Name" -// representation variants, to dst and returns the extended buffer. -// -// If f.Sensitive is true, "Never Indexed" representation is used. If -// f.Sensitive is false and indexing is true, "Incremental Indexing" -// representation is used. -func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { - first := len(dst) - var n byte - if indexing { - n = 6 - } else { - n = 4 - } - dst = appendVarInt(dst, n, i) - dst[first] |= encodeTypeByte(indexing, f.Sensitive) - return appendHpackString(dst, f.Value) -} - -// appendTableSize appends v, as encoded in "Header Table Size Update" -// representation, to dst and returns the extended buffer. -func appendTableSize(dst []byte, v uint32) []byte { - first := len(dst) - dst = appendVarInt(dst, 5, uint64(v)) - dst[first] |= 0x20 - return dst -} - -// appendVarInt appends i, as encoded in variable integer form using n -// bit prefix, to dst and returns the extended buffer. -// -// See -// http://http2.github.io/http2-spec/compression.html#integer.representation -func appendVarInt(dst []byte, n byte, i uint64) []byte { - k := uint64((1 << n) - 1) - if i < k { - return append(dst, byte(i)) - } - dst = append(dst, byte(k)) - i -= k - for ; i >= 128; i >>= 7 { - dst = append(dst, byte(0x80|(i&0x7f))) - } - return append(dst, byte(i)) -} - -// appendHpackString appends s, as encoded in "String Literal" -// representation, to dst and returns the the extended buffer. -// -// s will be encoded in Huffman codes only when it produces strictly -// shorter byte string. -func appendHpackString(dst []byte, s string) []byte { - huffmanLength := HuffmanEncodeLength(s) - if huffmanLength < uint64(len(s)) { - first := len(dst) - dst = appendVarInt(dst, 7, huffmanLength) - dst = AppendHuffmanString(dst, s) - dst[first] |= 0x80 - } else { - dst = appendVarInt(dst, 7, uint64(len(s))) - dst = append(dst, s...) - } - return dst -} - -// encodeTypeByte returns type byte. If sensitive is true, type byte -// for "Never Indexed" representation is returned. If sensitive is -// false and indexing is true, type byte for "Incremental Indexing" -// representation is returned. Otherwise, type byte for "Without -// Indexing" is returned. -func encodeTypeByte(indexing, sensitive bool) byte { - if sensitive { - return 0x10 - } - if indexing { - return 0x40 - } - return 0 -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/hpack.go deleted file mode 100644 index dcf257af..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/hpack.go +++ /dev/null @@ -1,542 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package hpack implements HPACK, a compression format for -// efficiently representing HTTP header fields in the context of HTTP/2. -// -// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 -package hpack - -import ( - "bytes" - "errors" - "fmt" -) - -// A DecodingError is something the spec defines as a decoding error. -type DecodingError struct { - Err error -} - -func (de DecodingError) Error() string { - return fmt.Sprintf("decoding error: %v", de.Err) -} - -// An InvalidIndexError is returned when an encoder references a table -// entry before the static table or after the end of the dynamic table. -type InvalidIndexError int - -func (e InvalidIndexError) Error() string { - return fmt.Sprintf("invalid indexed representation index %d", int(e)) -} - -// A HeaderField is a name-value pair. Both the name and value are -// treated as opaque sequences of octets. -type HeaderField struct { - Name, Value string - - // Sensitive means that this header field should never be - // indexed. - Sensitive bool -} - -// IsPseudo reports whether the header field is an http2 pseudo header. -// That is, it reports whether it starts with a colon. -// It is not otherwise guaranteed to be a valid psuedo header field, -// though. -func (hf HeaderField) IsPseudo() bool { - return len(hf.Name) != 0 && hf.Name[0] == ':' -} - -func (hf HeaderField) String() string { - var suffix string - if hf.Sensitive { - suffix = " (sensitive)" - } - return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) -} - -// Size returns the size of an entry per RFC 7540 section 5.2. -func (hf HeaderField) Size() uint32 { - // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 - // "The size of the dynamic table is the sum of the size of - // its entries. The size of an entry is the sum of its name's - // length in octets (as defined in Section 5.2), its value's - // length in octets (see Section 5.2), plus 32. The size of - // an entry is calculated using the length of the name and - // value without any Huffman encoding applied." - - // This can overflow if somebody makes a large HeaderField - // Name and/or Value by hand, but we don't care, because that - // won't happen on the wire because the encoding doesn't allow - // it. - return uint32(len(hf.Name) + len(hf.Value) + 32) -} - -// A Decoder is the decoding context for incremental processing of -// header blocks. -type Decoder struct { - dynTab dynamicTable - emit func(f HeaderField) - - emitEnabled bool // whether calls to emit are enabled - maxStrLen int // 0 means unlimited - - // buf is the unparsed buffer. It's only written to - // saveBuf if it was truncated in the middle of a header - // block. Because it's usually not owned, we can only - // process it under Write. - buf []byte // not owned; only valid during Write - - // saveBuf is previous data passed to Write which we weren't able - // to fully parse before. Unlike buf, we own this data. - saveBuf bytes.Buffer -} - -// NewDecoder returns a new decoder with the provided maximum dynamic -// table size. The emitFunc will be called for each valid field -// parsed, in the same goroutine as calls to Write, before Write returns. -func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { - d := &Decoder{ - emit: emitFunc, - emitEnabled: true, - } - d.dynTab.allowedMaxSize = maxDynamicTableSize - d.dynTab.setMaxSize(maxDynamicTableSize) - return d -} - -// ErrStringLength is returned by Decoder.Write when the max string length -// (as configured by Decoder.SetMaxStringLength) would be violated. -var ErrStringLength = errors.New("hpack: string too long") - -// SetMaxStringLength sets the maximum size of a HeaderField name or -// value string. If a string exceeds this length (even after any -// decompression), Write will return ErrStringLength. -// A value of 0 means unlimited and is the default from NewDecoder. -func (d *Decoder) SetMaxStringLength(n int) { - d.maxStrLen = n -} - -// SetEmitFunc changes the callback used when new header fields -// are decoded. -// It must be non-nil. It does not affect EmitEnabled. -func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { - d.emit = emitFunc -} - -// SetEmitEnabled controls whether the emitFunc provided to NewDecoder -// should be called. The default is true. -// -// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE -// while still decoding and keeping in-sync with decoder state, but -// without doing unnecessary decompression or generating unnecessary -// garbage for header fields past the limit. -func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } - -// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder -// are currently enabled. The default is true. -func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } - -// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their -// underlying buffers for garbage reasons. - -func (d *Decoder) SetMaxDynamicTableSize(v uint32) { - d.dynTab.setMaxSize(v) -} - -// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded -// stream (via dynamic table size updates) may set the maximum size -// to. -func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { - d.dynTab.allowedMaxSize = v -} - -type dynamicTable struct { - // ents is the FIFO described at - // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 - // The newest (low index) is append at the end, and items are - // evicted from the front. - ents []HeaderField - size uint32 - maxSize uint32 // current maxSize - allowedMaxSize uint32 // maxSize may go up to this, inclusive -} - -func (dt *dynamicTable) setMaxSize(v uint32) { - dt.maxSize = v - dt.evict() -} - -// TODO: change dynamicTable to be a struct with a slice and a size int field, -// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1: -// -// -// Then make add increment the size. maybe the max size should move from Decoder to -// dynamicTable and add should return an ok bool if there was enough space. -// -// Later we'll need a remove operation on dynamicTable. - -func (dt *dynamicTable) add(f HeaderField) { - dt.ents = append(dt.ents, f) - dt.size += f.Size() - dt.evict() -} - -// If we're too big, evict old stuff (front of the slice) -func (dt *dynamicTable) evict() { - base := dt.ents // keep base pointer of slice - for dt.size > dt.maxSize { - dt.size -= dt.ents[0].Size() - dt.ents = dt.ents[1:] - } - - // Shift slice contents down if we evicted things. - if len(dt.ents) != len(base) { - copy(base, dt.ents) - dt.ents = base[:len(dt.ents)] - } -} - -// constantTimeStringCompare compares string a and b in a constant -// time manner. -func constantTimeStringCompare(a, b string) bool { - if len(a) != len(b) { - return false - } - - c := byte(0) - - for i := 0; i < len(a); i++ { - c |= a[i] ^ b[i] - } - - return c == 0 -} - -// Search searches f in the table. The return value i is 0 if there is -// no name match. If there is name match or name/value match, i is the -// index of that entry (1-based). If both name and value match, -// nameValueMatch becomes true. -func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) { - l := len(dt.ents) - for j := l - 1; j >= 0; j-- { - ent := dt.ents[j] - if !constantTimeStringCompare(ent.Name, f.Name) { - continue - } - if i == 0 { - i = uint64(l - j) - } - if f.Sensitive { - continue - } - if !constantTimeStringCompare(ent.Value, f.Value) { - continue - } - i = uint64(l - j) - nameValueMatch = true - return - } - return -} - -func (d *Decoder) maxTableIndex() int { - return len(d.dynTab.ents) + len(staticTable) -} - -func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { - if i < 1 { - return - } - if i > uint64(d.maxTableIndex()) { - return - } - if i <= uint64(len(staticTable)) { - return staticTable[i-1], true - } - dents := d.dynTab.ents - return dents[len(dents)-(int(i)-len(staticTable))], true -} - -// Decode decodes an entire block. -// -// TODO: remove this method and make it incremental later? This is -// easier for debugging now. -func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { - var hf []HeaderField - saveFunc := d.emit - defer func() { d.emit = saveFunc }() - d.emit = func(f HeaderField) { hf = append(hf, f) } - if _, err := d.Write(p); err != nil { - return nil, err - } - if err := d.Close(); err != nil { - return nil, err - } - return hf, nil -} - -func (d *Decoder) Close() error { - if d.saveBuf.Len() > 0 { - d.saveBuf.Reset() - return DecodingError{errors.New("truncated headers")} - } - return nil -} - -func (d *Decoder) Write(p []byte) (n int, err error) { - if len(p) == 0 { - // Prevent state machine CPU attacks (making us redo - // work up to the point of finding out we don't have - // enough data) - return - } - // Only copy the data if we have to. Optimistically assume - // that p will contain a complete header block. - if d.saveBuf.Len() == 0 { - d.buf = p - } else { - d.saveBuf.Write(p) - d.buf = d.saveBuf.Bytes() - d.saveBuf.Reset() - } - - for len(d.buf) > 0 { - err = d.parseHeaderFieldRepr() - if err == errNeedMore { - // Extra paranoia, making sure saveBuf won't - // get too large. All the varint and string - // reading code earlier should already catch - // overlong things and return ErrStringLength, - // but keep this as a last resort. - const varIntOverhead = 8 // conservative - if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { - return 0, ErrStringLength - } - d.saveBuf.Write(d.buf) - return len(p), nil - } - if err != nil { - break - } - } - return len(p), err -} - -// errNeedMore is an internal sentinel error value that means the -// buffer is truncated and we need to read more data before we can -// continue parsing. -var errNeedMore = errors.New("need more data") - -type indexType int - -const ( - indexedTrue indexType = iota - indexedFalse - indexedNever -) - -func (v indexType) indexed() bool { return v == indexedTrue } -func (v indexType) sensitive() bool { return v == indexedNever } - -// returns errNeedMore if there isn't enough data available. -// any other error is fatal. -// consumes d.buf iff it returns nil. -// precondition: must be called with len(d.buf) > 0 -func (d *Decoder) parseHeaderFieldRepr() error { - b := d.buf[0] - switch { - case b&128 != 0: - // Indexed representation. - // High bit set? - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 - return d.parseFieldIndexed() - case b&192 == 64: - // 6.2.1 Literal Header Field with Incremental Indexing - // 0b10xxxxxx: top two bits are 10 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 - return d.parseFieldLiteral(6, indexedTrue) - case b&240 == 0: - // 6.2.2 Literal Header Field without Indexing - // 0b0000xxxx: top four bits are 0000 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 - return d.parseFieldLiteral(4, indexedFalse) - case b&240 == 16: - // 6.2.3 Literal Header Field never Indexed - // 0b0001xxxx: top four bits are 0001 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 - return d.parseFieldLiteral(4, indexedNever) - case b&224 == 32: - // 6.3 Dynamic Table Size Update - // Top three bits are '001'. - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 - return d.parseDynamicTableSizeUpdate() - } - - return DecodingError{errors.New("invalid encoding")} -} - -// (same invariants and behavior as parseHeaderFieldRepr) -func (d *Decoder) parseFieldIndexed() error { - buf := d.buf - idx, buf, err := readVarInt(7, buf) - if err != nil { - return err - } - hf, ok := d.at(idx) - if !ok { - return DecodingError{InvalidIndexError(idx)} - } - d.buf = buf - return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) -} - -// (same invariants and behavior as parseHeaderFieldRepr) -func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { - buf := d.buf - nameIdx, buf, err := readVarInt(n, buf) - if err != nil { - return err - } - - var hf HeaderField - wantStr := d.emitEnabled || it.indexed() - if nameIdx > 0 { - ihf, ok := d.at(nameIdx) - if !ok { - return DecodingError{InvalidIndexError(nameIdx)} - } - hf.Name = ihf.Name - } else { - hf.Name, buf, err = d.readString(buf, wantStr) - if err != nil { - return err - } - } - hf.Value, buf, err = d.readString(buf, wantStr) - if err != nil { - return err - } - d.buf = buf - if it.indexed() { - d.dynTab.add(hf) - } - hf.Sensitive = it.sensitive() - return d.callEmit(hf) -} - -func (d *Decoder) callEmit(hf HeaderField) error { - if d.maxStrLen != 0 { - if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { - return ErrStringLength - } - } - if d.emitEnabled { - d.emit(hf) - } - return nil -} - -// (same invariants and behavior as parseHeaderFieldRepr) -func (d *Decoder) parseDynamicTableSizeUpdate() error { - buf := d.buf - size, buf, err := readVarInt(5, buf) - if err != nil { - return err - } - if size > uint64(d.dynTab.allowedMaxSize) { - return DecodingError{errors.New("dynamic table size update too large")} - } - d.dynTab.setMaxSize(uint32(size)) - d.buf = buf - return nil -} - -var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} - -// readVarInt reads an unsigned variable length integer off the -// beginning of p. n is the parameter as described in -// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. -// -// n must always be between 1 and 8. -// -// The returned remain buffer is either a smaller suffix of p, or err != nil. -// The error is errNeedMore if p doesn't contain a complete integer. -func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { - if n < 1 || n > 8 { - panic("bad n") - } - if len(p) == 0 { - return 0, p, errNeedMore - } - i = uint64(p[0]) - if n < 8 { - i &= (1 << uint64(n)) - 1 - } - if i < (1< 0 { - b := p[0] - p = p[1:] - i += uint64(b&127) << m - if b&128 == 0 { - return i, p, nil - } - m += 7 - if m >= 63 { // TODO: proper overflow check. making this up. - return 0, origP, errVarintOverflow - } - } - return 0, origP, errNeedMore -} - -// readString decodes an hpack string from p. -// -// wantStr is whether s will be used. If false, decompression and -// []byte->string garbage are skipped if s will be ignored -// anyway. This does mean that huffman decoding errors for non-indexed -// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server -// is returning an error anyway, and because they're not indexed, the error -// won't affect the decoding state. -func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { - if len(p) == 0 { - return "", p, errNeedMore - } - isHuff := p[0]&128 != 0 - strLen, p, err := readVarInt(7, p) - if err != nil { - return "", p, err - } - if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { - return "", nil, ErrStringLength - } - if uint64(len(p)) < strLen { - return "", p, errNeedMore - } - if !isHuff { - if wantStr { - s = string(p[:strLen]) - } - return s, p[strLen:], nil - } - - if wantStr { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() // don't trust others - defer bufPool.Put(buf) - if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { - buf.Reset() - return "", nil, err - } - s = buf.String() - buf.Reset() // be nice to GC - } - return s, p[strLen:], nil -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/huffman.go deleted file mode 100644 index eb4b1f05..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/huffman.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -import ( - "bytes" - "errors" - "io" - "sync" -) - -var bufPool = sync.Pool{ - New: func() interface{} { return new(bytes.Buffer) }, -} - -// HuffmanDecode decodes the string in v and writes the expanded -// result to w, returning the number of bytes written to w and the -// Write call's return value. At most one Write call is made. -func HuffmanDecode(w io.Writer, v []byte) (int, error) { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - defer bufPool.Put(buf) - if err := huffmanDecode(buf, 0, v); err != nil { - return 0, err - } - return w.Write(buf.Bytes()) -} - -// HuffmanDecodeToString decodes the string in v. -func HuffmanDecodeToString(v []byte) (string, error) { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - defer bufPool.Put(buf) - if err := huffmanDecode(buf, 0, v); err != nil { - return "", err - } - return buf.String(), nil -} - -// ErrInvalidHuffman is returned for errors found decoding -// Huffman-encoded strings. -var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") - -// huffmanDecode decodes v to buf. -// If maxLen is greater than 0, attempts to write more to buf than -// maxLen bytes will return ErrStringLength. -func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { - n := rootHuffmanNode - cur, nbits := uint(0), uint8(0) - for _, b := range v { - cur = cur<<8 | uint(b) - nbits += 8 - for nbits >= 8 { - idx := byte(cur >> (nbits - 8)) - n = n.children[idx] - if n == nil { - return ErrInvalidHuffman - } - if n.children == nil { - if maxLen != 0 && buf.Len() == maxLen { - return ErrStringLength - } - buf.WriteByte(n.sym) - nbits -= n.codeLen - n = rootHuffmanNode - } else { - nbits -= 8 - } - } - } - for nbits > 0 { - n = n.children[byte(cur<<(8-nbits))] - if n.children != nil || n.codeLen > nbits { - break - } - buf.WriteByte(n.sym) - nbits -= n.codeLen - n = rootHuffmanNode - } - return nil -} - -type node struct { - // children is non-nil for internal nodes - children []*node - - // The following are only valid if children is nil: - codeLen uint8 // number of bits that led to the output of sym - sym byte // output symbol -} - -func newInternalNode() *node { - return &node{children: make([]*node, 256)} -} - -var rootHuffmanNode = newInternalNode() - -func init() { - if len(huffmanCodes) != 256 { - panic("unexpected size") - } - for i, code := range huffmanCodes { - addDecoderNode(byte(i), code, huffmanCodeLen[i]) - } -} - -func addDecoderNode(sym byte, code uint32, codeLen uint8) { - cur := rootHuffmanNode - for codeLen > 8 { - codeLen -= 8 - i := uint8(code >> codeLen) - if cur.children[i] == nil { - cur.children[i] = newInternalNode() - } - cur = cur.children[i] - } - shift := 8 - codeLen - start, end := int(uint8(code<> (nbits - rembits)) - dst[len(dst)-1] |= t - } - - return dst -} - -// HuffmanEncodeLength returns the number of bytes required to encode -// s in Huffman codes. The result is round up to byte boundary. -func HuffmanEncodeLength(s string) uint64 { - n := uint64(0) - for i := 0; i < len(s); i++ { - n += uint64(huffmanCodeLen[s[i]]) - } - return (n + 7) / 8 -} - -// appendByteToHuffmanCode appends Huffman code for c to dst and -// returns the extended buffer and the remaining bits in the last -// element. The appending is not byte aligned and the remaining bits -// in the last element of dst is given in rembits. -func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { - code := huffmanCodes[c] - nbits := huffmanCodeLen[c] - - for { - if rembits > nbits { - t := uint8(code << (rembits - nbits)) - dst[len(dst)-1] |= t - rembits -= nbits - break - } - - t := uint8(code >> (nbits - rembits)) - dst[len(dst)-1] |= t - - nbits -= rembits - rembits = 8 - - if nbits == 0 { - break - } - - dst = append(dst, 0) - } - - return dst, rembits -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/tables.go deleted file mode 100644 index b9283a02..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/hpack/tables.go +++ /dev/null @@ -1,352 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -func pair(name, value string) HeaderField { - return HeaderField{Name: name, Value: value} -} - -// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B -var staticTable = [...]HeaderField{ - pair(":authority", ""), // index 1 (1-based) - pair(":method", "GET"), - pair(":method", "POST"), - pair(":path", "/"), - pair(":path", "/index.html"), - pair(":scheme", "http"), - pair(":scheme", "https"), - pair(":status", "200"), - pair(":status", "204"), - pair(":status", "206"), - pair(":status", "304"), - pair(":status", "400"), - pair(":status", "404"), - pair(":status", "500"), - pair("accept-charset", ""), - pair("accept-encoding", "gzip, deflate"), - pair("accept-language", ""), - pair("accept-ranges", ""), - pair("accept", ""), - pair("access-control-allow-origin", ""), - pair("age", ""), - pair("allow", ""), - pair("authorization", ""), - pair("cache-control", ""), - pair("content-disposition", ""), - pair("content-encoding", ""), - pair("content-language", ""), - pair("content-length", ""), - pair("content-location", ""), - pair("content-range", ""), - pair("content-type", ""), - pair("cookie", ""), - pair("date", ""), - pair("etag", ""), - pair("expect", ""), - pair("expires", ""), - pair("from", ""), - pair("host", ""), - pair("if-match", ""), - pair("if-modified-since", ""), - pair("if-none-match", ""), - pair("if-range", ""), - pair("if-unmodified-since", ""), - pair("last-modified", ""), - pair("link", ""), - pair("location", ""), - pair("max-forwards", ""), - pair("proxy-authenticate", ""), - pair("proxy-authorization", ""), - pair("range", ""), - pair("referer", ""), - pair("refresh", ""), - pair("retry-after", ""), - pair("server", ""), - pair("set-cookie", ""), - pair("strict-transport-security", ""), - pair("transfer-encoding", ""), - pair("user-agent", ""), - pair("vary", ""), - pair("via", ""), - pair("www-authenticate", ""), -} - -var huffmanCodes = [256]uint32{ - 0x1ff8, - 0x7fffd8, - 0xfffffe2, - 0xfffffe3, - 0xfffffe4, - 0xfffffe5, - 0xfffffe6, - 0xfffffe7, - 0xfffffe8, - 0xffffea, - 0x3ffffffc, - 0xfffffe9, - 0xfffffea, - 0x3ffffffd, - 0xfffffeb, - 0xfffffec, - 0xfffffed, - 0xfffffee, - 0xfffffef, - 0xffffff0, - 0xffffff1, - 0xffffff2, - 0x3ffffffe, - 0xffffff3, - 0xffffff4, - 0xffffff5, - 0xffffff6, - 0xffffff7, - 0xffffff8, - 0xffffff9, - 0xffffffa, - 0xffffffb, - 0x14, - 0x3f8, - 0x3f9, - 0xffa, - 0x1ff9, - 0x15, - 0xf8, - 0x7fa, - 0x3fa, - 0x3fb, - 0xf9, - 0x7fb, - 0xfa, - 0x16, - 0x17, - 0x18, - 0x0, - 0x1, - 0x2, - 0x19, - 0x1a, - 0x1b, - 0x1c, - 0x1d, - 0x1e, - 0x1f, - 0x5c, - 0xfb, - 0x7ffc, - 0x20, - 0xffb, - 0x3fc, - 0x1ffa, - 0x21, - 0x5d, - 0x5e, - 0x5f, - 0x60, - 0x61, - 0x62, - 0x63, - 0x64, - 0x65, - 0x66, - 0x67, - 0x68, - 0x69, - 0x6a, - 0x6b, - 0x6c, - 0x6d, - 0x6e, - 0x6f, - 0x70, - 0x71, - 0x72, - 0xfc, - 0x73, - 0xfd, - 0x1ffb, - 0x7fff0, - 0x1ffc, - 0x3ffc, - 0x22, - 0x7ffd, - 0x3, - 0x23, - 0x4, - 0x24, - 0x5, - 0x25, - 0x26, - 0x27, - 0x6, - 0x74, - 0x75, - 0x28, - 0x29, - 0x2a, - 0x7, - 0x2b, - 0x76, - 0x2c, - 0x8, - 0x9, - 0x2d, - 0x77, - 0x78, - 0x79, - 0x7a, - 0x7b, - 0x7ffe, - 0x7fc, - 0x3ffd, - 0x1ffd, - 0xffffffc, - 0xfffe6, - 0x3fffd2, - 0xfffe7, - 0xfffe8, - 0x3fffd3, - 0x3fffd4, - 0x3fffd5, - 0x7fffd9, - 0x3fffd6, - 0x7fffda, - 0x7fffdb, - 0x7fffdc, - 0x7fffdd, - 0x7fffde, - 0xffffeb, - 0x7fffdf, - 0xffffec, - 0xffffed, - 0x3fffd7, - 0x7fffe0, - 0xffffee, - 0x7fffe1, - 0x7fffe2, - 0x7fffe3, - 0x7fffe4, - 0x1fffdc, - 0x3fffd8, - 0x7fffe5, - 0x3fffd9, - 0x7fffe6, - 0x7fffe7, - 0xffffef, - 0x3fffda, - 0x1fffdd, - 0xfffe9, - 0x3fffdb, - 0x3fffdc, - 0x7fffe8, - 0x7fffe9, - 0x1fffde, - 0x7fffea, - 0x3fffdd, - 0x3fffde, - 0xfffff0, - 0x1fffdf, - 0x3fffdf, - 0x7fffeb, - 0x7fffec, - 0x1fffe0, - 0x1fffe1, - 0x3fffe0, - 0x1fffe2, - 0x7fffed, - 0x3fffe1, - 0x7fffee, - 0x7fffef, - 0xfffea, - 0x3fffe2, - 0x3fffe3, - 0x3fffe4, - 0x7ffff0, - 0x3fffe5, - 0x3fffe6, - 0x7ffff1, - 0x3ffffe0, - 0x3ffffe1, - 0xfffeb, - 0x7fff1, - 0x3fffe7, - 0x7ffff2, - 0x3fffe8, - 0x1ffffec, - 0x3ffffe2, - 0x3ffffe3, - 0x3ffffe4, - 0x7ffffde, - 0x7ffffdf, - 0x3ffffe5, - 0xfffff1, - 0x1ffffed, - 0x7fff2, - 0x1fffe3, - 0x3ffffe6, - 0x7ffffe0, - 0x7ffffe1, - 0x3ffffe7, - 0x7ffffe2, - 0xfffff2, - 0x1fffe4, - 0x1fffe5, - 0x3ffffe8, - 0x3ffffe9, - 0xffffffd, - 0x7ffffe3, - 0x7ffffe4, - 0x7ffffe5, - 0xfffec, - 0xfffff3, - 0xfffed, - 0x1fffe6, - 0x3fffe9, - 0x1fffe7, - 0x1fffe8, - 0x7ffff3, - 0x3fffea, - 0x3fffeb, - 0x1ffffee, - 0x1ffffef, - 0xfffff4, - 0xfffff5, - 0x3ffffea, - 0x7ffff4, - 0x3ffffeb, - 0x7ffffe6, - 0x3ffffec, - 0x3ffffed, - 0x7ffffe7, - 0x7ffffe8, - 0x7ffffe9, - 0x7ffffea, - 0x7ffffeb, - 0xffffffe, - 0x7ffffec, - 0x7ffffed, - 0x7ffffee, - 0x7ffffef, - 0x7fffff0, - 0x3ffffee, -} - -var huffmanCodeLen = [256]uint8{ - 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, - 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, - 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, - 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, - 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, - 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, - 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, - 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, - 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, - 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, - 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, - 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, - 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, - 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, - 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/http2.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/http2.go deleted file mode 100644 index 0529b63e..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/http2.go +++ /dev/null @@ -1,463 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package http2 implements the HTTP/2 protocol. -// -// This package is low-level and intended to be used directly by very -// few people. Most users will use it indirectly through the automatic -// use by the net/http package (from Go 1.6 and later). -// For use in earlier Go versions see ConfigureServer. (Transport support -// requires Go 1.6 or later) -// -// See https://http2.github.io/ for more information on HTTP/2. -// -// See https://http2.golang.org/ for a test server running this code. -package http2 - -import ( - "bufio" - "crypto/tls" - "errors" - "fmt" - "io" - "net/http" - "os" - "sort" - "strconv" - "strings" - "sync" -) - -var ( - VerboseLogs bool - logFrameWrites bool - logFrameReads bool -) - -func init() { - e := os.Getenv("GODEBUG") - if strings.Contains(e, "http2debug=1") { - VerboseLogs = true - } - if strings.Contains(e, "http2debug=2") { - VerboseLogs = true - logFrameWrites = true - logFrameReads = true - } -} - -const ( - // ClientPreface is the string that must be sent by new - // connections from clients. - ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" - - // SETTINGS_MAX_FRAME_SIZE default - // http://http2.github.io/http2-spec/#rfc.section.6.5.2 - initialMaxFrameSize = 16384 - - // NextProtoTLS is the NPN/ALPN protocol negotiated during - // HTTP/2's TLS setup. - NextProtoTLS = "h2" - - // http://http2.github.io/http2-spec/#SettingValues - initialHeaderTableSize = 4096 - - initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size - - defaultMaxReadFrameSize = 1 << 20 -) - -var ( - clientPreface = []byte(ClientPreface) -) - -type streamState int - -const ( - stateIdle streamState = iota - stateOpen - stateHalfClosedLocal - stateHalfClosedRemote - stateResvLocal - stateResvRemote - stateClosed -) - -var stateName = [...]string{ - stateIdle: "Idle", - stateOpen: "Open", - stateHalfClosedLocal: "HalfClosedLocal", - stateHalfClosedRemote: "HalfClosedRemote", - stateResvLocal: "ResvLocal", - stateResvRemote: "ResvRemote", - stateClosed: "Closed", -} - -func (st streamState) String() string { - return stateName[st] -} - -// Setting is a setting parameter: which setting it is, and its value. -type Setting struct { - // ID is which setting is being set. - // See http://http2.github.io/http2-spec/#SettingValues - ID SettingID - - // Val is the value. - Val uint32 -} - -func (s Setting) String() string { - return fmt.Sprintf("[%v = %d]", s.ID, s.Val) -} - -// Valid reports whether the setting is valid. -func (s Setting) Valid() error { - // Limits and error codes from 6.5.2 Defined SETTINGS Parameters - switch s.ID { - case SettingEnablePush: - if s.Val != 1 && s.Val != 0 { - return ConnectionError(ErrCodeProtocol) - } - case SettingInitialWindowSize: - if s.Val > 1<<31-1 { - return ConnectionError(ErrCodeFlowControl) - } - case SettingMaxFrameSize: - if s.Val < 16384 || s.Val > 1<<24-1 { - return ConnectionError(ErrCodeProtocol) - } - } - return nil -} - -// A SettingID is an HTTP/2 setting as defined in -// http://http2.github.io/http2-spec/#iana-settings -type SettingID uint16 - -const ( - SettingHeaderTableSize SettingID = 0x1 - SettingEnablePush SettingID = 0x2 - SettingMaxConcurrentStreams SettingID = 0x3 - SettingInitialWindowSize SettingID = 0x4 - SettingMaxFrameSize SettingID = 0x5 - SettingMaxHeaderListSize SettingID = 0x6 -) - -var settingName = map[SettingID]string{ - SettingHeaderTableSize: "HEADER_TABLE_SIZE", - SettingEnablePush: "ENABLE_PUSH", - SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", - SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", - SettingMaxFrameSize: "MAX_FRAME_SIZE", - SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", -} - -func (s SettingID) String() string { - if v, ok := settingName[s]; ok { - return v - } - return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) -} - -var ( - errInvalidHeaderFieldName = errors.New("http2: invalid header field name") - errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") -) - -// validHeaderFieldName reports whether v is a valid header field name (key). -// RFC 7230 says: -// header-field = field-name ":" OWS field-value OWS -// field-name = token -// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / -// "^" / "_" / " -// Further, http2 says: -// "Just as in HTTP/1.x, header field names are strings of ASCII -// characters that are compared in a case-insensitive -// fashion. However, header field names MUST be converted to -// lowercase prior to their encoding in HTTP/2. " -func validHeaderFieldName(v string) bool { - if len(v) == 0 { - return false - } - for _, r := range v { - if int(r) >= len(isTokenTable) || ('A' <= r && r <= 'Z') { - return false - } - if !isTokenTable[byte(r)] { - return false - } - } - return true -} - -// validHeaderFieldValue reports whether v is a valid header field value. -// -// RFC 7230 says: -// field-value = *( field-content / obs-fold ) -// obj-fold = N/A to http2, and deprecated -// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] -// field-vchar = VCHAR / obs-text -// obs-text = %x80-FF -// VCHAR = "any visible [USASCII] character" -// -// http2 further says: "Similarly, HTTP/2 allows header field values -// that are not valid. While most of the values that can be encoded -// will not alter header field parsing, carriage return (CR, ASCII -// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII -// 0x0) might be exploited by an attacker if they are translated -// verbatim. Any request or response that contains a character not -// permitted in a header field value MUST be treated as malformed -// (Section 8.1.2.6). Valid characters are defined by the -// field-content ABNF rule in Section 3.2 of [RFC7230]." -// -// This function does not (yet?) properly handle the rejection of -// strings that begin or end with SP or HTAB. -func validHeaderFieldValue(v string) bool { - for i := 0; i < len(v); i++ { - if b := v[i]; b < ' ' && b != '\t' || b == 0x7f { - return false - } - } - return true -} - -var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) - -func init() { - for i := 100; i <= 999; i++ { - if v := http.StatusText(i); v != "" { - httpCodeStringCommon[i] = strconv.Itoa(i) - } - } -} - -func httpCodeString(code int) string { - if s, ok := httpCodeStringCommon[code]; ok { - return s - } - return strconv.Itoa(code) -} - -// from pkg io -type stringWriter interface { - WriteString(s string) (n int, err error) -} - -// A gate lets two goroutines coordinate their activities. -type gate chan struct{} - -func (g gate) Done() { g <- struct{}{} } -func (g gate) Wait() { <-g } - -// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). -type closeWaiter chan struct{} - -// Init makes a closeWaiter usable. -// It exists because so a closeWaiter value can be placed inside a -// larger struct and have the Mutex and Cond's memory in the same -// allocation. -func (cw *closeWaiter) Init() { - *cw = make(chan struct{}) -} - -// Close marks the closeWaiter as closed and unblocks any waiters. -func (cw closeWaiter) Close() { - close(cw) -} - -// Wait waits for the closeWaiter to become closed. -func (cw closeWaiter) Wait() { - <-cw -} - -// bufferedWriter is a buffered writer that writes to w. -// Its buffered writer is lazily allocated as needed, to minimize -// idle memory usage with many connections. -type bufferedWriter struct { - w io.Writer // immutable - bw *bufio.Writer // non-nil when data is buffered -} - -func newBufferedWriter(w io.Writer) *bufferedWriter { - return &bufferedWriter{w: w} -} - -var bufWriterPool = sync.Pool{ - New: func() interface{} { - // TODO: pick something better? this is a bit under - // (3 x typical 1500 byte MTU) at least. - return bufio.NewWriterSize(nil, 4<<10) - }, -} - -func (w *bufferedWriter) Write(p []byte) (n int, err error) { - if w.bw == nil { - bw := bufWriterPool.Get().(*bufio.Writer) - bw.Reset(w.w) - w.bw = bw - } - return w.bw.Write(p) -} - -func (w *bufferedWriter) Flush() error { - bw := w.bw - if bw == nil { - return nil - } - err := bw.Flush() - bw.Reset(nil) - bufWriterPool.Put(bw) - w.bw = nil - return err -} - -func mustUint31(v int32) uint32 { - if v < 0 || v > 2147483647 { - panic("out of range") - } - return uint32(v) -} - -// bodyAllowedForStatus reports whether a given response status code -// permits a body. See RFC2616, section 4.4. -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -type httpError struct { - msg string - timeout bool -} - -func (e *httpError) Error() string { return e.msg } -func (e *httpError) Timeout() bool { return e.timeout } -func (e *httpError) Temporary() bool { return true } - -var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} - -var isTokenTable = [127]bool{ - '!': true, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '*': true, - '+': true, - '-': true, - '.': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'W': true, - 'V': true, - 'X': true, - 'Y': true, - 'Z': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '|': true, - '~': true, -} - -type connectionStater interface { - ConnectionState() tls.ConnectionState -} - -var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} - -type sorter struct { - v []string // owned by sorter -} - -func (s *sorter) Len() int { return len(s.v) } -func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } -func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } - -// Keys returns the sorted keys of h. -// -// The returned slice is only valid until s used again or returned to -// its pool. -func (s *sorter) Keys(h http.Header) []string { - keys := s.v[:0] - for k := range h { - keys = append(keys, k) - } - s.v = keys - sort.Sort(s) - return keys -} - -func (s *sorter) SortStrings(ss []string) { - // Our sorter works on s.v, which sorter owners, so - // stash it away while we sort the user's buffer. - save := s.v - s.v = ss - sort.Sort(s) - s.v = save -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go15.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go15.go deleted file mode 100644 index d0fa5c89..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go15.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.5 - -package http2 - -import "net/http" - -func requestCancel(req *http.Request) <-chan struct{} { return nil } diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go16.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go16.go deleted file mode 100644 index db53c5b8..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/not_go16.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.6 - -package http2 - -import "net/http" - -func configureTransport(t1 *http.Transport) (*Transport, error) { - return nil, errTransportVersion -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/pipe.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/pipe.go deleted file mode 100644 index 69446e7a..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/pipe.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" - "io" - "sync" -) - -// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like -// io.Pipe except there are no PipeReader/PipeWriter halves, and the -// underlying buffer is an interface. (io.Pipe is always unbuffered) -type pipe struct { - mu sync.Mutex - c sync.Cond // c.L lazily initialized to &p.mu - b pipeBuffer - err error // read error once empty. non-nil means closed. - breakErr error // immediate read error (caller doesn't see rest of b) - donec chan struct{} // closed on error - readFn func() // optional code to run in Read before error -} - -type pipeBuffer interface { - Len() int - io.Writer - io.Reader -} - -// Read waits until data is available and copies bytes -// from the buffer into p. -func (p *pipe) Read(d []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - for { - if p.breakErr != nil { - return 0, p.breakErr - } - if p.b.Len() > 0 { - return p.b.Read(d) - } - if p.err != nil { - if p.readFn != nil { - p.readFn() // e.g. copy trailers - p.readFn = nil // not sticky like p.err - } - return 0, p.err - } - p.c.Wait() - } -} - -var errClosedPipeWrite = errors.New("write on closed buffer") - -// Write copies bytes from p into the buffer and wakes a reader. -// It is an error to write more data than the buffer can hold. -func (p *pipe) Write(d []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - defer p.c.Signal() - if p.err != nil { - return 0, errClosedPipeWrite - } - return p.b.Write(d) -} - -// CloseWithError causes the next Read (waking up a current blocked -// Read if needed) to return the provided err after all data has been -// read. -// -// The error must be non-nil. -func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } - -// BreakWithError causes the next Read (waking up a current blocked -// Read if needed) to return the provided err immediately, without -// waiting for unread data. -func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } - -// closeWithErrorAndCode is like CloseWithError but also sets some code to run -// in the caller's goroutine before returning the error. -func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } - -func (p *pipe) closeWithError(dst *error, err error, fn func()) { - if err == nil { - panic("err must be non-nil") - } - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - defer p.c.Signal() - if *dst != nil { - // Already been done. - return - } - p.readFn = fn - *dst = err - p.closeDoneLocked() -} - -// requires p.mu be held. -func (p *pipe) closeDoneLocked() { - if p.donec == nil { - return - } - // Close if unclosed. This isn't racy since we always - // hold p.mu while closing. - select { - case <-p.donec: - default: - close(p.donec) - } -} - -// Err returns the error (if any) first set by BreakWithError or CloseWithError. -func (p *pipe) Err() error { - p.mu.Lock() - defer p.mu.Unlock() - if p.breakErr != nil { - return p.breakErr - } - return p.err -} - -// Done returns a channel which is closed if and when this pipe is closed -// with CloseWithError. -func (p *pipe) Done() <-chan struct{} { - p.mu.Lock() - defer p.mu.Unlock() - if p.donec == nil { - p.donec = make(chan struct{}) - if p.err != nil || p.breakErr != nil { - // Already hit an error. - p.closeDoneLocked() - } - } - return p.donec -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/server.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/server.go deleted file mode 100644 index 1e6980c3..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/server.go +++ /dev/null @@ -1,2178 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// TODO: replace all <-sc.doneServing with reads from the stream's cw -// instead, and make sure that on close we close all open -// streams. then remove doneServing? - -// TODO: re-audit GOAWAY support. Consider each incoming frame type and -// whether it should be ignored during graceful shutdown. - -// TODO: disconnect idle clients. GFE seems to do 4 minutes. make -// configurable? or maximum number of idle clients and remove the -// oldest? - -// TODO: turn off the serve goroutine when idle, so -// an idle conn only has the readFrames goroutine active. (which could -// also be optimized probably to pin less memory in crypto/tls). This -// would involve tracking when the serve goroutine is active (atomic -// int32 read/CAS probably?) and starting it up when frames arrive, -// and shutting it down when all handlers exit. the occasional PING -// packets could use time.AfterFunc to call sc.wakeStartServeLoop() -// (which is a no-op if already running) and then queue the PING write -// as normal. The serve loop would then exit in most cases (if no -// Handlers running) and not be woken up again until the PING packet -// returns. - -// TODO (maybe): add a mechanism for Handlers to going into -// half-closed-local mode (rw.(io.Closer) test?) but not exit their -// handler, and continue to be able to read from the -// Request.Body. This would be a somewhat semantic change from HTTP/1 -// (or at least what we expose in net/http), so I'd probably want to -// add it there too. For now, this package says that returning from -// the Handler ServeHTTP function means you're both done reading and -// done writing, without a way to stop just one or the other. - -package http2 - -import ( - "bufio" - "bytes" - "crypto/tls" - "errors" - "fmt" - "io" - "log" - "net" - "net/http" - "net/textproto" - "net/url" - "os" - "reflect" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/net/http2/hpack" -) - -const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? -) - -var ( - errClientDisconnected = errors.New("client disconnected") - errClosedBody = errors.New("body closed by handler") - errHandlerComplete = errors.New("http2: request body closed due to handler exiting") - errStreamClosed = errors.New("http2: stream closed") -) - -var responseWriterStatePool = sync.Pool{ - New: func() interface{} { - rws := &responseWriterState{} - rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) - return rws - }, -} - -// Test hooks. -var ( - testHookOnConn func() - testHookGetServerConn func(*serverConn) - testHookOnPanicMu *sync.Mutex // nil except in tests - testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) -) - -// Server is an HTTP/2 server. -type Server struct { - // MaxHandlers limits the number of http.Handler ServeHTTP goroutines - // which may run at a time over all connections. - // Negative or zero no limit. - // TODO: implement - MaxHandlers int - - // MaxConcurrentStreams optionally specifies the number of - // concurrent streams that each client may have open at a - // time. This is unrelated to the number of http.Handler goroutines - // which may be active globally, which is MaxHandlers. - // If zero, MaxConcurrentStreams defaults to at least 100, per - // the HTTP/2 spec's recommendations. - MaxConcurrentStreams uint32 - - // MaxReadFrameSize optionally specifies the largest frame - // this server is willing to read. A valid value is between - // 16k and 16M, inclusive. If zero or otherwise invalid, a - // default value is used. - MaxReadFrameSize uint32 - - // PermitProhibitedCipherSuites, if true, permits the use of - // cipher suites prohibited by the HTTP/2 spec. - PermitProhibitedCipherSuites bool -} - -func (s *Server) maxReadFrameSize() uint32 { - if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { - return v - } - return defaultMaxReadFrameSize -} - -func (s *Server) maxConcurrentStreams() uint32 { - if v := s.MaxConcurrentStreams; v > 0 { - return v - } - return defaultMaxStreams -} - -// ConfigureServer adds HTTP/2 support to a net/http Server. -// -// The configuration conf may be nil. -// -// ConfigureServer must be called before s begins serving. -func ConfigureServer(s *http.Server, conf *Server) error { - if conf == nil { - conf = new(Server) - } - - if s.TLSConfig == nil { - s.TLSConfig = new(tls.Config) - } else if s.TLSConfig.CipherSuites != nil { - // If they already provided a CipherSuite list, return - // an error if it has a bad order or is missing - // ECDHE_RSA_WITH_AES_128_GCM_SHA256. - const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - haveRequired := false - sawBad := false - for i, cs := range s.TLSConfig.CipherSuites { - if cs == requiredCipher { - haveRequired = true - } - if isBadCipher(cs) { - sawBad = true - } else if sawBad { - return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) - } - } - if !haveRequired { - return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256") - } - } - - // Note: not setting MinVersion to tls.VersionTLS12, - // as we don't want to interfere with HTTP/1.1 traffic - // on the user's server. We enforce TLS 1.2 later once - // we accept a connection. Ideally this should be done - // during next-proto selection, but using TLS <1.2 with - // HTTP/2 is still the client's bug. - - s.TLSConfig.PreferServerCipherSuites = true - - haveNPN := false - for _, p := range s.TLSConfig.NextProtos { - if p == NextProtoTLS { - haveNPN = true - break - } - } - if !haveNPN { - s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) - } - // h2-14 is temporary (as of 2015-03-05) while we wait for all browsers - // to switch to "h2". - s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "h2-14") - - if s.TLSNextProto == nil { - s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} - } - protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { - if testHookOnConn != nil { - testHookOnConn() - } - conf.ServeConn(c, &ServeConnOpts{ - Handler: h, - BaseConfig: hs, - }) - } - s.TLSNextProto[NextProtoTLS] = protoHandler - s.TLSNextProto["h2-14"] = protoHandler // temporary; see above. - return nil -} - -// ServeConnOpts are options for the Server.ServeConn method. -type ServeConnOpts struct { - // BaseConfig optionally sets the base configuration - // for values. If nil, defaults are used. - BaseConfig *http.Server - - // Handler specifies which handler to use for processing - // requests. If nil, BaseConfig.Handler is used. If BaseConfig - // or BaseConfig.Handler is nil, http.DefaultServeMux is used. - Handler http.Handler -} - -func (o *ServeConnOpts) baseConfig() *http.Server { - if o != nil && o.BaseConfig != nil { - return o.BaseConfig - } - return new(http.Server) -} - -func (o *ServeConnOpts) handler() http.Handler { - if o != nil { - if o.Handler != nil { - return o.Handler - } - if o.BaseConfig != nil && o.BaseConfig.Handler != nil { - return o.BaseConfig.Handler - } - } - return http.DefaultServeMux -} - -// ServeConn serves HTTP/2 requests on the provided connection and -// blocks until the connection is no longer readable. -// -// ServeConn starts speaking HTTP/2 assuming that c has not had any -// reads or writes. It writes its initial settings frame and expects -// to be able to read the preface and settings frame from the -// client. If c has a ConnectionState method like a *tls.Conn, the -// ConnectionState is used to verify the TLS ciphersuite and to set -// the Request.TLS field in Handlers. -// -// ServeConn does not support h2c by itself. Any h2c support must be -// implemented in terms of providing a suitably-behaving net.Conn. -// -// The opts parameter is optional. If nil, default values are used. -func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { - sc := &serverConn{ - srv: s, - hs: opts.baseConfig(), - conn: c, - remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), - handler: opts.handler(), - streams: make(map[uint32]*stream), - readFrameCh: make(chan readFrameResult), - wantWriteFrameCh: make(chan frameWriteMsg, 8), - wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync - bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way - doneServing: make(chan struct{}), - advMaxStreams: s.maxConcurrentStreams(), - writeSched: writeScheduler{ - maxFrameSize: initialMaxFrameSize, - }, - initialWindowSize: initialWindowSize, - headerTableSize: initialHeaderTableSize, - serveG: newGoroutineLock(), - pushEnabled: true, - } - sc.flow.add(initialWindowSize) - sc.inflow.add(initialWindowSize) - sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - - fr := NewFramer(sc.bw, c) - fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) - fr.MaxHeaderListSize = sc.maxHeaderListSize() - fr.SetMaxReadFrameSize(s.maxReadFrameSize()) - sc.framer = fr - - if tc, ok := c.(connectionStater); ok { - sc.tlsState = new(tls.ConnectionState) - *sc.tlsState = tc.ConnectionState() - // 9.2 Use of TLS Features - // An implementation of HTTP/2 over TLS MUST use TLS - // 1.2 or higher with the restrictions on feature set - // and cipher suite described in this section. Due to - // implementation limitations, it might not be - // possible to fail TLS negotiation. An endpoint MUST - // immediately terminate an HTTP/2 connection that - // does not meet the TLS requirements described in - // this section with a connection error (Section - // 5.4.1) of type INADEQUATE_SECURITY. - if sc.tlsState.Version < tls.VersionTLS12 { - sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") - return - } - - if sc.tlsState.ServerName == "" { - // Client must use SNI, but we don't enforce that anymore, - // since it was causing problems when connecting to bare IP - // addresses during development. - // - // TODO: optionally enforce? Or enforce at the time we receive - // a new request, and verify the the ServerName matches the :authority? - // But that precludes proxy situations, perhaps. - // - // So for now, do nothing here again. - } - - if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { - // "Endpoints MAY choose to generate a connection error - // (Section 5.4.1) of type INADEQUATE_SECURITY if one of - // the prohibited cipher suites are negotiated." - // - // We choose that. In my opinion, the spec is weak - // here. It also says both parties must support at least - // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no - // excuses here. If we really must, we could allow an - // "AllowInsecureWeakCiphers" option on the server later. - // Let's see how it plays out first. - sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) - return - } - } - - if hook := testHookGetServerConn; hook != nil { - hook(sc) - } - sc.serve() -} - -// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. -func isBadCipher(cipher uint16) bool { - switch cipher { - case tls.TLS_RSA_WITH_RC4_128_SHA, - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: - // Reject cipher suites from Appendix A. - // "This list includes those cipher suites that do not - // offer an ephemeral key exchange and those that are - // based on the TLS null, stream or block cipher type" - return true - default: - return false - } -} - -func (sc *serverConn) rejectConn(err ErrCode, debug string) { - sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) - // ignoring errors. hanging up anyway. - sc.framer.WriteGoAway(0, err, []byte(debug)) - sc.bw.Flush() - sc.conn.Close() -} - -type serverConn struct { - // Immutable: - srv *Server - hs *http.Server - conn net.Conn - bw *bufferedWriter // writing to conn - handler http.Handler - framer *Framer - doneServing chan struct{} // closed when serverConn.serve ends - readFrameCh chan readFrameResult // written by serverConn.readFrames - wantWriteFrameCh chan frameWriteMsg // from handlers -> serve - wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes - bodyReadCh chan bodyReadMsg // from handlers -> serve - testHookCh chan func(int) // code to run on the serve loop - flow flow // conn-wide (not stream-specific) outbound flow control - inflow flow // conn-wide inbound flow control - tlsState *tls.ConnectionState // shared by all handlers, like net/http - remoteAddrStr string - - // Everything following is owned by the serve loop; use serveG.check(): - serveG goroutineLock // used to verify funcs are on serve() - pushEnabled bool - sawFirstSettings bool // got the initial SETTINGS frame after the preface - needToSendSettingsAck bool - unackedSettings int // how many SETTINGS have we sent without ACKs? - clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) - advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client - curOpenStreams uint32 // client's number of open streams - maxStreamID uint32 // max ever seen - streams map[uint32]*stream - initialWindowSize int32 - headerTableSize uint32 - peerMaxHeaderListSize uint32 // zero means unknown (default) - canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case - writingFrame bool // started write goroutine but haven't heard back on wroteFrameCh - needsFrameFlush bool // last frame write wasn't a flush - writeSched writeScheduler - inGoAway bool // we've started to or sent GOAWAY - needToSendGoAway bool // we need to schedule a GOAWAY frame write - goAwayCode ErrCode - shutdownTimerCh <-chan time.Time // nil until used - shutdownTimer *time.Timer // nil until used - freeRequestBodyBuf []byte // if non-nil, a free initialWindowSize buffer for getRequestBodyBuf - - // Owned by the writeFrameAsync goroutine: - headerWriteBuf bytes.Buffer - hpackEncoder *hpack.Encoder -} - -func (sc *serverConn) maxHeaderListSize() uint32 { - n := sc.hs.MaxHeaderBytes - if n <= 0 { - n = http.DefaultMaxHeaderBytes - } - // http2's count is in a slightly different unit and includes 32 bytes per pair. - // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. - const perFieldOverhead = 32 // per http2 spec - const typicalHeaders = 10 // conservative - return uint32(n + typicalHeaders*perFieldOverhead) -} - -// stream represents a stream. This is the minimal metadata needed by -// the serve goroutine. Most of the actual stream state is owned by -// the http.Handler's goroutine in the responseWriter. Because the -// responseWriter's responseWriterState is recycled at the end of a -// handler, this struct intentionally has no pointer to the -// *responseWriter{,State} itself, as the Handler ending nils out the -// responseWriter's state field. -type stream struct { - // immutable: - sc *serverConn - id uint32 - body *pipe // non-nil if expecting DATA frames - cw closeWaiter // closed wait stream transitions to closed state - - // owned by serverConn's serve loop: - bodyBytes int64 // body bytes seen so far - declBodyBytes int64 // or -1 if undeclared - flow flow // limits writing from Handler to client - inflow flow // what the client is allowed to POST/etc to us - parent *stream // or nil - numTrailerValues int64 - weight uint8 - state streamState - sentReset bool // only true once detached from streams map - gotReset bool // only true once detacted from streams map - gotTrailerHeader bool // HEADER frame for trailers was seen - reqBuf []byte - - trailer http.Header // accumulated trailers - reqTrailer http.Header // handler's Request.Trailer -} - -func (sc *serverConn) Framer() *Framer { return sc.framer } -func (sc *serverConn) CloseConn() error { return sc.conn.Close() } -func (sc *serverConn) Flush() error { return sc.bw.Flush() } -func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { - return sc.hpackEncoder, &sc.headerWriteBuf -} - -func (sc *serverConn) state(streamID uint32) (streamState, *stream) { - sc.serveG.check() - // http://http2.github.io/http2-spec/#rfc.section.5.1 - if st, ok := sc.streams[streamID]; ok { - return st.state, st - } - // "The first use of a new stream identifier implicitly closes all - // streams in the "idle" state that might have been initiated by - // that peer with a lower-valued stream identifier. For example, if - // a client sends a HEADERS frame on stream 7 without ever sending a - // frame on stream 5, then stream 5 transitions to the "closed" - // state when the first frame for stream 7 is sent or received." - if streamID <= sc.maxStreamID { - return stateClosed, nil - } - return stateIdle, nil -} - -// setConnState calls the net/http ConnState hook for this connection, if configured. -// Note that the net/http package does StateNew and StateClosed for us. -// There is currently no plan for StateHijacked or hijacking HTTP/2 connections. -func (sc *serverConn) setConnState(state http.ConnState) { - if sc.hs.ConnState != nil { - sc.hs.ConnState(sc.conn, state) - } -} - -func (sc *serverConn) vlogf(format string, args ...interface{}) { - if VerboseLogs { - sc.logf(format, args...) - } -} - -func (sc *serverConn) logf(format string, args ...interface{}) { - if lg := sc.hs.ErrorLog; lg != nil { - lg.Printf(format, args...) - } else { - log.Printf(format, args...) - } -} - -// errno returns v's underlying uintptr, else 0. -// -// TODO: remove this helper function once http2 can use build -// tags. See comment in isClosedConnError. -func errno(v error) uintptr { - if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { - return uintptr(rv.Uint()) - } - return 0 -} - -// isClosedConnError reports whether err is an error from use of a closed -// network connection. -func isClosedConnError(err error) bool { - if err == nil { - return false - } - - // TODO: remove this string search and be more like the Windows - // case below. That might involve modifying the standard library - // to return better error types. - str := err.Error() - if strings.Contains(str, "use of closed network connection") { - return true - } - - // TODO(bradfitz): x/tools/cmd/bundle doesn't really support - // build tags, so I can't make an http2_windows.go file with - // Windows-specific stuff. Fix that and move this, once we - // have a way to bundle this into std's net/http somehow. - if runtime.GOOS == "windows" { - if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { - if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { - const WSAECONNABORTED = 10053 - const WSAECONNRESET = 10054 - if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { - return true - } - } - } - } - return false -} - -func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { - if err == nil { - return - } - if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) { - // Boring, expected errors. - sc.vlogf(format, args...) - } else { - sc.logf(format, args...) - } -} - -func (sc *serverConn) canonicalHeader(v string) string { - sc.serveG.check() - cv, ok := commonCanonHeader[v] - if ok { - return cv - } - cv, ok = sc.canonHeader[v] - if ok { - return cv - } - if sc.canonHeader == nil { - sc.canonHeader = make(map[string]string) - } - cv = http.CanonicalHeaderKey(v) - sc.canonHeader[v] = cv - return cv -} - -type readFrameResult struct { - f Frame // valid until readMore is called - err error - - // readMore should be called once the consumer no longer needs or - // retains f. After readMore, f is invalid and more frames can be - // read. - readMore func() -} - -// readFrames is the loop that reads incoming frames. -// It takes care to only read one frame at a time, blocking until the -// consumer is done with the frame. -// It's run on its own goroutine. -func (sc *serverConn) readFrames() { - gate := make(gate) - gateDone := gate.Done - for { - f, err := sc.framer.ReadFrame() - select { - case sc.readFrameCh <- readFrameResult{f, err, gateDone}: - case <-sc.doneServing: - return - } - select { - case <-gate: - case <-sc.doneServing: - return - } - if terminalReadFrameError(err) { - return - } - } -} - -// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. -type frameWriteResult struct { - wm frameWriteMsg // what was written (or attempted) - err error // result of the writeFrame call -} - -// writeFrameAsync runs in its own goroutine and writes a single frame -// and then reports when it's done. -// At most one goroutine can be running writeFrameAsync at a time per -// serverConn. -func (sc *serverConn) writeFrameAsync(wm frameWriteMsg) { - err := wm.write.writeFrame(sc) - sc.wroteFrameCh <- frameWriteResult{wm, err} -} - -func (sc *serverConn) closeAllStreamsOnConnClose() { - sc.serveG.check() - for _, st := range sc.streams { - sc.closeStream(st, errClientDisconnected) - } -} - -func (sc *serverConn) stopShutdownTimer() { - sc.serveG.check() - if t := sc.shutdownTimer; t != nil { - t.Stop() - } -} - -func (sc *serverConn) notePanic() { - // Note: this is for serverConn.serve panicking, not http.Handler code. - if testHookOnPanicMu != nil { - testHookOnPanicMu.Lock() - defer testHookOnPanicMu.Unlock() - } - if testHookOnPanic != nil { - if e := recover(); e != nil { - if testHookOnPanic(sc, e) { - panic(e) - } - } - } -} - -func (sc *serverConn) serve() { - sc.serveG.check() - defer sc.notePanic() - defer sc.conn.Close() - defer sc.closeAllStreamsOnConnClose() - defer sc.stopShutdownTimer() - defer close(sc.doneServing) // unblocks handlers trying to send - - if VerboseLogs { - sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) - } - - sc.writeFrame(frameWriteMsg{ - write: writeSettings{ - {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, - {SettingMaxConcurrentStreams, sc.advMaxStreams}, - {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - - // TODO: more actual settings, notably - // SettingInitialWindowSize, but then we also - // want to bump up the conn window size the - // same amount here right after the settings - }, - }) - sc.unackedSettings++ - - if err := sc.readPreface(); err != nil { - sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) - return - } - // Now that we've got the preface, get us out of the - // "StateNew" state. We can't go directly to idle, though. - // Active means we read some data and anticipate a request. We'll - // do another Active when we get a HEADERS frame. - sc.setConnState(http.StateActive) - sc.setConnState(http.StateIdle) - - go sc.readFrames() // closed by defer sc.conn.Close above - - settingsTimer := time.NewTimer(firstSettingsTimeout) - loopNum := 0 - for { - loopNum++ - select { - case wm := <-sc.wantWriteFrameCh: - sc.writeFrame(wm) - case res := <-sc.wroteFrameCh: - sc.wroteFrame(res) - case res := <-sc.readFrameCh: - if !sc.processFrameFromReader(res) { - return - } - res.readMore() - if settingsTimer.C != nil { - settingsTimer.Stop() - settingsTimer.C = nil - } - case m := <-sc.bodyReadCh: - sc.noteBodyRead(m.st, m.n) - case <-settingsTimer.C: - sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) - return - case <-sc.shutdownTimerCh: - sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) - return - case fn := <-sc.testHookCh: - fn(loopNum) - } - } -} - -// readPreface reads the ClientPreface greeting from the peer -// or returns an error on timeout or an invalid greeting. -func (sc *serverConn) readPreface() error { - errc := make(chan error, 1) - go func() { - // Read the client preface - buf := make([]byte, len(ClientPreface)) - if _, err := io.ReadFull(sc.conn, buf); err != nil { - errc <- err - } else if !bytes.Equal(buf, clientPreface) { - errc <- fmt.Errorf("bogus greeting %q", buf) - } else { - errc <- nil - } - }() - timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? - defer timer.Stop() - select { - case <-timer.C: - return errors.New("timeout waiting for client preface") - case err := <-errc: - if err == nil { - if VerboseLogs { - sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) - } - } - return err - } -} - -var errChanPool = sync.Pool{ - New: func() interface{} { return make(chan error, 1) }, -} - -var writeDataPool = sync.Pool{ - New: func() interface{} { return new(writeData) }, -} - -// writeDataFromHandler writes DATA response frames from a handler on -// the given stream. -func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { - ch := errChanPool.Get().(chan error) - writeArg := writeDataPool.Get().(*writeData) - *writeArg = writeData{stream.id, data, endStream} - err := sc.writeFrameFromHandler(frameWriteMsg{ - write: writeArg, - stream: stream, - done: ch, - }) - if err != nil { - return err - } - var frameWriteDone bool // the frame write is done (successfully or not) - select { - case err = <-ch: - frameWriteDone = true - case <-sc.doneServing: - return errClientDisconnected - case <-stream.cw: - // If both ch and stream.cw were ready (as might - // happen on the final Write after an http.Handler - // ends), prefer the write result. Otherwise this - // might just be us successfully closing the stream. - // The writeFrameAsync and serve goroutines guarantee - // that the ch send will happen before the stream.cw - // close. - select { - case err = <-ch: - frameWriteDone = true - default: - return errStreamClosed - } - } - errChanPool.Put(ch) - if frameWriteDone { - writeDataPool.Put(writeArg) - } - return err -} - -// writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts -// if the connection has gone away. -// -// This must not be run from the serve goroutine itself, else it might -// deadlock writing to sc.wantWriteFrameCh (which is only mildly -// buffered and is read by serve itself). If you're on the serve -// goroutine, call writeFrame instead. -func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error { - sc.serveG.checkNotOn() // NOT - select { - case sc.wantWriteFrameCh <- wm: - return nil - case <-sc.doneServing: - // Serve loop is gone. - // Client has closed their connection to the server. - return errClientDisconnected - } -} - -// writeFrame schedules a frame to write and sends it if there's nothing -// already being written. -// -// There is no pushback here (the serve goroutine never blocks). It's -// the http.Handlers that block, waiting for their previous frames to -// make it onto the wire -// -// If you're not on the serve goroutine, use writeFrameFromHandler instead. -func (sc *serverConn) writeFrame(wm frameWriteMsg) { - sc.serveG.check() - sc.writeSched.add(wm) - sc.scheduleFrameWrite() -} - -// startFrameWrite starts a goroutine to write wm (in a separate -// goroutine since that might block on the network), and updates the -// serve goroutine's state about the world, updated from info in wm. -func (sc *serverConn) startFrameWrite(wm frameWriteMsg) { - sc.serveG.check() - if sc.writingFrame { - panic("internal error: can only be writing one frame at a time") - } - - st := wm.stream - if st != nil { - switch st.state { - case stateHalfClosedLocal: - panic("internal error: attempt to send frame on half-closed-local stream") - case stateClosed: - if st.sentReset || st.gotReset { - // Skip this frame. - sc.scheduleFrameWrite() - return - } - panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wm)) - } - } - - sc.writingFrame = true - sc.needsFrameFlush = true - go sc.writeFrameAsync(wm) -} - -// errHandlerPanicked is the error given to any callers blocked in a read from -// Request.Body when the main goroutine panics. Since most handlers read in the -// the main ServeHTTP goroutine, this will show up rarely. -var errHandlerPanicked = errors.New("http2: handler panicked") - -// wroteFrame is called on the serve goroutine with the result of -// whatever happened on writeFrameAsync. -func (sc *serverConn) wroteFrame(res frameWriteResult) { - sc.serveG.check() - if !sc.writingFrame { - panic("internal error: expected to be already writing a frame") - } - sc.writingFrame = false - - wm := res.wm - st := wm.stream - - closeStream := endsStream(wm.write) - - if _, ok := wm.write.(handlerPanicRST); ok { - sc.closeStream(st, errHandlerPanicked) - } - - // Reply (if requested) to the blocked ServeHTTP goroutine. - if ch := wm.done; ch != nil { - select { - case ch <- res.err: - default: - panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write)) - } - } - wm.write = nil // prevent use (assume it's tainted after wm.done send) - - if closeStream { - if st == nil { - panic("internal error: expecting non-nil stream") - } - switch st.state { - case stateOpen: - // Here we would go to stateHalfClosedLocal in - // theory, but since our handler is done and - // the net/http package provides no mechanism - // for finishing writing to a ResponseWriter - // while still reading data (see possible TODO - // at top of this file), we go into closed - // state here anyway, after telling the peer - // we're hanging up on them. - st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream - errCancel := StreamError{st.id, ErrCodeCancel} - sc.resetStream(errCancel) - case stateHalfClosedRemote: - sc.closeStream(st, errHandlerComplete) - } - } - - sc.scheduleFrameWrite() -} - -// scheduleFrameWrite tickles the frame writing scheduler. -// -// If a frame is already being written, nothing happens. This will be called again -// when the frame is done being written. -// -// If a frame isn't being written we need to send one, the best frame -// to send is selected, preferring first things that aren't -// stream-specific (e.g. ACKing settings), and then finding the -// highest priority stream. -// -// If a frame isn't being written and there's nothing else to send, we -// flush the write buffer. -func (sc *serverConn) scheduleFrameWrite() { - sc.serveG.check() - if sc.writingFrame { - return - } - if sc.needToSendGoAway { - sc.needToSendGoAway = false - sc.startFrameWrite(frameWriteMsg{ - write: &writeGoAway{ - maxStreamID: sc.maxStreamID, - code: sc.goAwayCode, - }, - }) - return - } - if sc.needToSendSettingsAck { - sc.needToSendSettingsAck = false - sc.startFrameWrite(frameWriteMsg{write: writeSettingsAck{}}) - return - } - if !sc.inGoAway { - if wm, ok := sc.writeSched.take(); ok { - sc.startFrameWrite(wm) - return - } - } - if sc.needsFrameFlush { - sc.startFrameWrite(frameWriteMsg{write: flushFrameWriter{}}) - sc.needsFrameFlush = false // after startFrameWrite, since it sets this true - return - } -} - -func (sc *serverConn) goAway(code ErrCode) { - sc.serveG.check() - if sc.inGoAway { - return - } - if code != ErrCodeNo { - sc.shutDownIn(250 * time.Millisecond) - } else { - // TODO: configurable - sc.shutDownIn(1 * time.Second) - } - sc.inGoAway = true - sc.needToSendGoAway = true - sc.goAwayCode = code - sc.scheduleFrameWrite() -} - -func (sc *serverConn) shutDownIn(d time.Duration) { - sc.serveG.check() - sc.shutdownTimer = time.NewTimer(d) - sc.shutdownTimerCh = sc.shutdownTimer.C -} - -func (sc *serverConn) resetStream(se StreamError) { - sc.serveG.check() - sc.writeFrame(frameWriteMsg{write: se}) - if st, ok := sc.streams[se.StreamID]; ok { - st.sentReset = true - sc.closeStream(st, se) - } -} - -// processFrameFromReader processes the serve loop's read from readFrameCh from the -// frame-reading goroutine. -// processFrameFromReader returns whether the connection should be kept open. -func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { - sc.serveG.check() - err := res.err - if err != nil { - if err == ErrFrameTooLarge { - sc.goAway(ErrCodeFrameSize) - return true // goAway will close the loop - } - clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) - if clientGone { - // TODO: could we also get into this state if - // the peer does a half close - // (e.g. CloseWrite) because they're done - // sending frames but they're still wanting - // our open replies? Investigate. - // TODO: add CloseWrite to crypto/tls.Conn first - // so we have a way to test this? I suppose - // just for testing we could have a non-TLS mode. - return false - } - } else { - f := res.f - if VerboseLogs { - sc.vlogf("http2: server read frame %v", summarizeFrame(f)) - } - err = sc.processFrame(f) - if err == nil { - return true - } - } - - switch ev := err.(type) { - case StreamError: - sc.resetStream(ev) - return true - case goAwayFlowError: - sc.goAway(ErrCodeFlowControl) - return true - case ConnectionError: - sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) - sc.goAway(ErrCode(ev)) - return true // goAway will handle shutdown - default: - if res.err != nil { - sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) - } else { - sc.logf("http2: server closing client connection: %v", err) - } - return false - } -} - -func (sc *serverConn) processFrame(f Frame) error { - sc.serveG.check() - - // First frame received must be SETTINGS. - if !sc.sawFirstSettings { - if _, ok := f.(*SettingsFrame); !ok { - return ConnectionError(ErrCodeProtocol) - } - sc.sawFirstSettings = true - } - - switch f := f.(type) { - case *SettingsFrame: - return sc.processSettings(f) - case *MetaHeadersFrame: - return sc.processHeaders(f) - case *WindowUpdateFrame: - return sc.processWindowUpdate(f) - case *PingFrame: - return sc.processPing(f) - case *DataFrame: - return sc.processData(f) - case *RSTStreamFrame: - return sc.processResetStream(f) - case *PriorityFrame: - return sc.processPriority(f) - case *PushPromiseFrame: - // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE - // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. - return ConnectionError(ErrCodeProtocol) - default: - sc.vlogf("http2: server ignoring frame: %v", f.Header()) - return nil - } -} - -func (sc *serverConn) processPing(f *PingFrame) error { - sc.serveG.check() - if f.IsAck() { - // 6.7 PING: " An endpoint MUST NOT respond to PING frames - // containing this flag." - return nil - } - if f.StreamID != 0 { - // "PING frames are not associated with any individual - // stream. If a PING frame is received with a stream - // identifier field value other than 0x0, the recipient MUST - // respond with a connection error (Section 5.4.1) of type - // PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) - } - sc.writeFrame(frameWriteMsg{write: writePingAck{f}}) - return nil -} - -func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { - sc.serveG.check() - switch { - case f.StreamID != 0: // stream-level flow control - st := sc.streams[f.StreamID] - if st == nil { - // "WINDOW_UPDATE can be sent by a peer that has sent a - // frame bearing the END_STREAM flag. This means that a - // receiver could receive a WINDOW_UPDATE frame on a "half - // closed (remote)" or "closed" stream. A receiver MUST - // NOT treat this as an error, see Section 5.1." - return nil - } - if !st.flow.add(int32(f.Increment)) { - return StreamError{f.StreamID, ErrCodeFlowControl} - } - default: // connection-level flow control - if !sc.flow.add(int32(f.Increment)) { - return goAwayFlowError{} - } - } - sc.scheduleFrameWrite() - return nil -} - -func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { - sc.serveG.check() - - state, st := sc.state(f.StreamID) - if state == stateIdle { - // 6.4 "RST_STREAM frames MUST NOT be sent for a - // stream in the "idle" state. If a RST_STREAM frame - // identifying an idle stream is received, the - // recipient MUST treat this as a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - return ConnectionError(ErrCodeProtocol) - } - if st != nil { - st.gotReset = true - sc.closeStream(st, StreamError{f.StreamID, f.ErrCode}) - } - return nil -} - -func (sc *serverConn) closeStream(st *stream, err error) { - sc.serveG.check() - if st.state == stateIdle || st.state == stateClosed { - panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) - } - st.state = stateClosed - sc.curOpenStreams-- - if sc.curOpenStreams == 0 { - sc.setConnState(http.StateIdle) - } - delete(sc.streams, st.id) - if p := st.body; p != nil { - p.CloseWithError(err) - } - st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc - sc.writeSched.forgetStream(st.id) - if st.reqBuf != nil { - // Stash this request body buffer (64k) away for reuse - // by a future POST/PUT/etc. - // - // TODO(bradfitz): share on the server? sync.Pool? - // Server requires locks and might hurt contention. - // sync.Pool might work, or might be worse, depending - // on goroutine CPU migrations. (get and put on - // separate CPUs). Maybe a mix of strategies. But - // this is an easy win for now. - sc.freeRequestBodyBuf = st.reqBuf - } -} - -func (sc *serverConn) processSettings(f *SettingsFrame) error { - sc.serveG.check() - if f.IsAck() { - sc.unackedSettings-- - if sc.unackedSettings < 0 { - // Why is the peer ACKing settings we never sent? - // The spec doesn't mention this case, but - // hang up on them anyway. - return ConnectionError(ErrCodeProtocol) - } - return nil - } - if err := f.ForeachSetting(sc.processSetting); err != nil { - return err - } - sc.needToSendSettingsAck = true - sc.scheduleFrameWrite() - return nil -} - -func (sc *serverConn) processSetting(s Setting) error { - sc.serveG.check() - if err := s.Valid(); err != nil { - return err - } - if VerboseLogs { - sc.vlogf("http2: server processing setting %v", s) - } - switch s.ID { - case SettingHeaderTableSize: - sc.headerTableSize = s.Val - sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) - case SettingEnablePush: - sc.pushEnabled = s.Val != 0 - case SettingMaxConcurrentStreams: - sc.clientMaxStreams = s.Val - case SettingInitialWindowSize: - return sc.processSettingInitialWindowSize(s.Val) - case SettingMaxFrameSize: - sc.writeSched.maxFrameSize = s.Val - case SettingMaxHeaderListSize: - sc.peerMaxHeaderListSize = s.Val - default: - // Unknown setting: "An endpoint that receives a SETTINGS - // frame with any unknown or unsupported identifier MUST - // ignore that setting." - if VerboseLogs { - sc.vlogf("http2: server ignoring unknown setting %v", s) - } - } - return nil -} - -func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { - sc.serveG.check() - // Note: val already validated to be within range by - // processSetting's Valid call. - - // "A SETTINGS frame can alter the initial flow control window - // size for all current streams. When the value of - // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST - // adjust the size of all stream flow control windows that it - // maintains by the difference between the new value and the - // old value." - old := sc.initialWindowSize - sc.initialWindowSize = int32(val) - growth := sc.initialWindowSize - old // may be negative - for _, st := range sc.streams { - if !st.flow.add(growth) { - // 6.9.2 Initial Flow Control Window Size - // "An endpoint MUST treat a change to - // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow - // control window to exceed the maximum size as a - // connection error (Section 5.4.1) of type - // FLOW_CONTROL_ERROR." - return ConnectionError(ErrCodeFlowControl) - } - } - return nil -} - -func (sc *serverConn) processData(f *DataFrame) error { - sc.serveG.check() - // "If a DATA frame is received whose stream is not in "open" - // or "half closed (local)" state, the recipient MUST respond - // with a stream error (Section 5.4.2) of type STREAM_CLOSED." - id := f.Header().StreamID - st, ok := sc.streams[id] - if !ok || st.state != stateOpen || st.gotTrailerHeader { - // This includes sending a RST_STREAM if the stream is - // in stateHalfClosedLocal (which currently means that - // the http.Handler returned, so it's done reading & - // done writing). Try to stop the client from sending - // more DATA. - return StreamError{id, ErrCodeStreamClosed} - } - if st.body == nil { - panic("internal error: should have a body in this state") - } - data := f.Data() - - // Sender sending more than they'd declared? - if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { - st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) - return StreamError{id, ErrCodeStreamClosed} - } - if len(data) > 0 { - // Check whether the client has flow control quota. - if int(st.inflow.available()) < len(data) { - return StreamError{id, ErrCodeFlowControl} - } - st.inflow.take(int32(len(data))) - wrote, err := st.body.Write(data) - if err != nil { - return StreamError{id, ErrCodeStreamClosed} - } - if wrote != len(data) { - panic("internal error: bad Writer") - } - st.bodyBytes += int64(len(data)) - } - if f.StreamEnded() { - st.endStream() - } - return nil -} - -// endStream closes a Request.Body's pipe. It is called when a DATA -// frame says a request body is over (or after trailers). -func (st *stream) endStream() { - sc := st.sc - sc.serveG.check() - - if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { - st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", - st.declBodyBytes, st.bodyBytes)) - } else { - st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) - st.body.CloseWithError(io.EOF) - } - st.state = stateHalfClosedRemote -} - -// copyTrailersToHandlerRequest is run in the Handler's goroutine in -// its Request.Body.Read just before it gets io.EOF. -func (st *stream) copyTrailersToHandlerRequest() { - for k, vv := range st.trailer { - if _, ok := st.reqTrailer[k]; ok { - // Only copy it over it was pre-declared. - st.reqTrailer[k] = vv - } - } -} - -func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { - sc.serveG.check() - id := f.Header().StreamID - if sc.inGoAway { - // Ignore. - return nil - } - // http://http2.github.io/http2-spec/#rfc.section.5.1.1 - // Streams initiated by a client MUST use odd-numbered stream - // identifiers. [...] An endpoint that receives an unexpected - // stream identifier MUST respond with a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - if id%2 != 1 { - return ConnectionError(ErrCodeProtocol) - } - // A HEADERS frame can be used to create a new stream or - // send a trailer for an open one. If we already have a stream - // open, let it process its own HEADERS frame (trailers at this - // point, if it's valid). - st := sc.streams[f.Header().StreamID] - if st != nil { - return st.processTrailerHeaders(f) - } - - // [...] The identifier of a newly established stream MUST be - // numerically greater than all streams that the initiating - // endpoint has opened or reserved. [...] An endpoint that - // receives an unexpected stream identifier MUST respond with - // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. - if id <= sc.maxStreamID { - return ConnectionError(ErrCodeProtocol) - } - sc.maxStreamID = id - - st = &stream{ - sc: sc, - id: id, - state: stateOpen, - } - if f.StreamEnded() { - st.state = stateHalfClosedRemote - } - st.cw.Init() - - st.flow.conn = &sc.flow // link to conn-level counter - st.flow.add(sc.initialWindowSize) - st.inflow.conn = &sc.inflow // link to conn-level counter - st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings - - sc.streams[id] = st - if f.HasPriority() { - adjustStreamPriority(sc.streams, st.id, f.Priority) - } - sc.curOpenStreams++ - if sc.curOpenStreams == 1 { - sc.setConnState(http.StateActive) - } - if sc.curOpenStreams > sc.advMaxStreams { - // "Endpoints MUST NOT exceed the limit set by their - // peer. An endpoint that receives a HEADERS frame - // that causes their advertised concurrent stream - // limit to be exceeded MUST treat this as a stream - // error (Section 5.4.2) of type PROTOCOL_ERROR or - // REFUSED_STREAM." - if sc.unackedSettings == 0 { - // They should know better. - return StreamError{st.id, ErrCodeProtocol} - } - // Assume it's a network race, where they just haven't - // received our last SETTINGS update. But actually - // this can't happen yet, because we don't yet provide - // a way for users to adjust server parameters at - // runtime. - return StreamError{st.id, ErrCodeRefusedStream} - } - - rw, req, err := sc.newWriterAndRequest(st, f) - if err != nil { - return err - } - st.reqTrailer = req.Trailer - if st.reqTrailer != nil { - st.trailer = make(http.Header) - } - st.body = req.Body.(*requestBody).pipe // may be nil - st.declBodyBytes = req.ContentLength - - handler := sc.handler.ServeHTTP - if f.Truncated { - // Their header list was too long. Send a 431 error. - handler = handleHeaderListTooLong - } - - go sc.runHandler(rw, req, handler) - return nil -} - -func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { - sc := st.sc - sc.serveG.check() - if st.gotTrailerHeader { - return ConnectionError(ErrCodeProtocol) - } - st.gotTrailerHeader = true - if !f.StreamEnded() { - return StreamError{st.id, ErrCodeProtocol} - } - - if len(f.PseudoFields()) > 0 { - return StreamError{st.id, ErrCodeProtocol} - } - if st.trailer != nil { - for _, hf := range f.RegularFields() { - key := sc.canonicalHeader(hf.Name) - st.trailer[key] = append(st.trailer[key], hf.Value) - } - } - st.endStream() - return nil -} - -func (sc *serverConn) processPriority(f *PriorityFrame) error { - adjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam) - return nil -} - -func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority PriorityParam) { - st, ok := streams[streamID] - if !ok { - // TODO: not quite correct (this streamID might - // already exist in the dep tree, but be closed), but - // close enough for now. - return - } - st.weight = priority.Weight - parent := streams[priority.StreamDep] // might be nil - if parent == st { - // if client tries to set this stream to be the parent of itself - // ignore and keep going - return - } - - // section 5.3.3: If a stream is made dependent on one of its - // own dependencies, the formerly dependent stream is first - // moved to be dependent on the reprioritized stream's previous - // parent. The moved dependency retains its weight. - for piter := parent; piter != nil; piter = piter.parent { - if piter == st { - parent.parent = st.parent - break - } - } - st.parent = parent - if priority.Exclusive && (st.parent != nil || priority.StreamDep == 0) { - for _, openStream := range streams { - if openStream != st && openStream.parent == st.parent { - openStream.parent = st - } - } - } -} - -func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { - sc.serveG.check() - - method := f.PseudoValue("method") - path := f.PseudoValue("path") - scheme := f.PseudoValue("scheme") - authority := f.PseudoValue("authority") - - isConnect := method == "CONNECT" - if isConnect { - if path != "" || scheme != "" || authority == "" { - return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} - } - } else if method == "" || path == "" || - (scheme != "https" && scheme != "http") { - // See 8.1.2.6 Malformed Requests and Responses: - // - // Malformed requests or responses that are detected - // MUST be treated as a stream error (Section 5.4.2) - // of type PROTOCOL_ERROR." - // - // 8.1.2.3 Request Pseudo-Header Fields - // "All HTTP/2 requests MUST include exactly one valid - // value for the :method, :scheme, and :path - // pseudo-header fields" - return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} - } - - bodyOpen := !f.StreamEnded() - if method == "HEAD" && bodyOpen { - // HEAD requests can't have bodies - return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} - } - var tlsState *tls.ConnectionState // nil if not scheme https - - if scheme == "https" { - tlsState = sc.tlsState - } - - header := make(http.Header) - for _, hf := range f.RegularFields() { - header.Add(sc.canonicalHeader(hf.Name), hf.Value) - } - - if authority == "" { - authority = header.Get("Host") - } - needsContinue := header.Get("Expect") == "100-continue" - if needsContinue { - header.Del("Expect") - } - // Merge Cookie headers into one "; "-delimited value. - if cookies := header["Cookie"]; len(cookies) > 1 { - header.Set("Cookie", strings.Join(cookies, "; ")) - } - - // Setup Trailers - var trailer http.Header - for _, v := range header["Trailer"] { - for _, key := range strings.Split(v, ",") { - key = http.CanonicalHeaderKey(strings.TrimSpace(key)) - switch key { - case "Transfer-Encoding", "Trailer", "Content-Length": - // Bogus. (copy of http1 rules) - // Ignore. - default: - if trailer == nil { - trailer = make(http.Header) - } - trailer[key] = nil - } - } - } - delete(header, "Trailer") - - body := &requestBody{ - conn: sc, - stream: st, - needsContinue: needsContinue, - } - var url_ *url.URL - var requestURI string - if isConnect { - url_ = &url.URL{Host: authority} - requestURI = authority // mimic HTTP/1 server behavior - } else { - var err error - url_, err = url.ParseRequestURI(path) - if err != nil { - return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} - } - requestURI = path - } - req := &http.Request{ - Method: method, - URL: url_, - RemoteAddr: sc.remoteAddrStr, - Header: header, - RequestURI: requestURI, - Proto: "HTTP/2.0", - ProtoMajor: 2, - ProtoMinor: 0, - TLS: tlsState, - Host: authority, - Body: body, - Trailer: trailer, - } - if bodyOpen { - st.reqBuf = sc.getRequestBodyBuf() - body.pipe = &pipe{ - b: &fixedBuffer{buf: st.reqBuf}, - } - - if vv, ok := header["Content-Length"]; ok { - req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) - } else { - req.ContentLength = -1 - } - } - - rws := responseWriterStatePool.Get().(*responseWriterState) - bwSave := rws.bw - *rws = responseWriterState{} // zero all the fields - rws.conn = sc - rws.bw = bwSave - rws.bw.Reset(chunkWriter{rws}) - rws.stream = st - rws.req = req - rws.body = body - - rw := &responseWriter{rws: rws} - return rw, req, nil -} - -func (sc *serverConn) getRequestBodyBuf() []byte { - sc.serveG.check() - if buf := sc.freeRequestBodyBuf; buf != nil { - sc.freeRequestBodyBuf = nil - return buf - } - return make([]byte, initialWindowSize) -} - -// Run on its own goroutine. -func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { - didPanic := true - defer func() { - if didPanic { - e := recover() - // Same as net/http: - const size = 64 << 10 - buf := make([]byte, size) - buf = buf[:runtime.Stack(buf, false)] - sc.writeFrameFromHandler(frameWriteMsg{ - write: handlerPanicRST{rw.rws.stream.id}, - stream: rw.rws.stream, - }) - sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) - return - } - rw.handlerDone() - }() - handler(rw, req) - didPanic = false -} - -func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { - // 10.5.1 Limits on Header Block Size: - // .. "A server that receives a larger header block than it is - // willing to handle can send an HTTP 431 (Request Header Fields Too - // Large) status code" - const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ - w.WriteHeader(statusRequestHeaderFieldsTooLarge) - io.WriteString(w, "

HTTP Error 431

Request Header Field(s) Too Large

") -} - -// called from handler goroutines. -// h may be nil. -func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { - sc.serveG.checkNotOn() // NOT on - var errc chan error - if headerData.h != nil { - // If there's a header map (which we don't own), so we have to block on - // waiting for this frame to be written, so an http.Flush mid-handler - // writes out the correct value of keys, before a handler later potentially - // mutates it. - errc = errChanPool.Get().(chan error) - } - if err := sc.writeFrameFromHandler(frameWriteMsg{ - write: headerData, - stream: st, - done: errc, - }); err != nil { - return err - } - if errc != nil { - select { - case err := <-errc: - errChanPool.Put(errc) - return err - case <-sc.doneServing: - return errClientDisconnected - case <-st.cw: - return errStreamClosed - } - } - return nil -} - -// called from handler goroutines. -func (sc *serverConn) write100ContinueHeaders(st *stream) { - sc.writeFrameFromHandler(frameWriteMsg{ - write: write100ContinueHeadersFrame{st.id}, - stream: st, - }) -} - -// A bodyReadMsg tells the server loop that the http.Handler read n -// bytes of the DATA from the client on the given stream. -type bodyReadMsg struct { - st *stream - n int -} - -// called from handler goroutines. -// Notes that the handler for the given stream ID read n bytes of its body -// and schedules flow control tokens to be sent. -func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) { - sc.serveG.checkNotOn() // NOT on - select { - case sc.bodyReadCh <- bodyReadMsg{st, n}: - case <-sc.doneServing: - } -} - -func (sc *serverConn) noteBodyRead(st *stream, n int) { - sc.serveG.check() - sc.sendWindowUpdate(nil, n) // conn-level - if st.state != stateHalfClosedRemote && st.state != stateClosed { - // Don't send this WINDOW_UPDATE if the stream is closed - // remotely. - sc.sendWindowUpdate(st, n) - } -} - -// st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate(st *stream, n int) { - sc.serveG.check() - // "The legal range for the increment to the flow control - // window is 1 to 2^31-1 (2,147,483,647) octets." - // A Go Read call on 64-bit machines could in theory read - // a larger Read than this. Very unlikely, but we handle it here - // rather than elsewhere for now. - const maxUint31 = 1<<31 - 1 - for n >= maxUint31 { - sc.sendWindowUpdate32(st, maxUint31) - n -= maxUint31 - } - sc.sendWindowUpdate32(st, int32(n)) -} - -// st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { - sc.serveG.check() - if n == 0 { - return - } - if n < 0 { - panic("negative update") - } - var streamID uint32 - if st != nil { - streamID = st.id - } - sc.writeFrame(frameWriteMsg{ - write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, - stream: st, - }) - var ok bool - if st == nil { - ok = sc.inflow.add(n) - } else { - ok = st.inflow.add(n) - } - if !ok { - panic("internal error; sent too many window updates without decrements?") - } -} - -type requestBody struct { - stream *stream - conn *serverConn - closed bool - pipe *pipe // non-nil if we have a HTTP entity message body - needsContinue bool // need to send a 100-continue -} - -func (b *requestBody) Close() error { - if b.pipe != nil { - b.pipe.CloseWithError(errClosedBody) - } - b.closed = true - return nil -} - -func (b *requestBody) Read(p []byte) (n int, err error) { - if b.needsContinue { - b.needsContinue = false - b.conn.write100ContinueHeaders(b.stream) - } - if b.pipe == nil { - return 0, io.EOF - } - n, err = b.pipe.Read(p) - if n > 0 { - b.conn.noteBodyReadFromHandler(b.stream, n) - } - return -} - -// responseWriter is the http.ResponseWriter implementation. It's -// intentionally small (1 pointer wide) to minimize garbage. The -// responseWriterState pointer inside is zeroed at the end of a -// request (in handlerDone) and calls on the responseWriter thereafter -// simply crash (caller's mistake), but the much larger responseWriterState -// and buffers are reused between multiple requests. -type responseWriter struct { - rws *responseWriterState -} - -// Optional http.ResponseWriter interfaces implemented. -var ( - _ http.CloseNotifier = (*responseWriter)(nil) - _ http.Flusher = (*responseWriter)(nil) - _ stringWriter = (*responseWriter)(nil) -) - -type responseWriterState struct { - // immutable within a request: - stream *stream - req *http.Request - body *requestBody // to close at end of request, if DATA frames didn't - conn *serverConn - - // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc - bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} - - // mutated by http.Handler goroutine: - handlerHeader http.Header // nil until called - snapHeader http.Header // snapshot of handlerHeader at WriteHeader time - trailers []string // set in writeChunk - status int // status code passed to WriteHeader - wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. - sentHeader bool // have we sent the header frame? - handlerDone bool // handler has finished - - sentContentLen int64 // non-zero if handler set a Content-Length header - wroteBytes int64 - - closeNotifierMu sync.Mutex // guards closeNotifierCh - closeNotifierCh chan bool // nil until first used -} - -type chunkWriter struct{ rws *responseWriterState } - -func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } - -func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } - -// declareTrailer is called for each Trailer header when the -// response header is written. It notes that a header will need to be -// written in the trailers at the end of the response. -func (rws *responseWriterState) declareTrailer(k string) { - k = http.CanonicalHeaderKey(k) - switch k { - case "Transfer-Encoding", "Content-Length", "Trailer": - // Forbidden by RFC 2616 14.40. - return - } - if !strSliceContains(rws.trailers, k) { - rws.trailers = append(rws.trailers, k) - } -} - -// writeChunk writes chunks from the bufio.Writer. But because -// bufio.Writer may bypass its chunking, sometimes p may be -// arbitrarily large. -// -// writeChunk is also responsible (on the first chunk) for sending the -// HEADER response. -func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { - if !rws.wroteHeader { - rws.writeHeader(200) - } - - isHeadResp := rws.req.Method == "HEAD" - if !rws.sentHeader { - rws.sentHeader = true - var ctype, clen string - if clen = rws.snapHeader.Get("Content-Length"); clen != "" { - rws.snapHeader.Del("Content-Length") - clen64, err := strconv.ParseInt(clen, 10, 64) - if err == nil && clen64 >= 0 { - rws.sentContentLen = clen64 - } else { - clen = "" - } - } - if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { - clen = strconv.Itoa(len(p)) - } - _, hasContentType := rws.snapHeader["Content-Type"] - if !hasContentType && bodyAllowedForStatus(rws.status) { - ctype = http.DetectContentType(p) - } - var date string - if _, ok := rws.snapHeader["Date"]; !ok { - // TODO(bradfitz): be faster here, like net/http? measure. - date = time.Now().UTC().Format(http.TimeFormat) - } - - for _, v := range rws.snapHeader["Trailer"] { - foreachHeaderElement(v, rws.declareTrailer) - } - - endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp - err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ - streamID: rws.stream.id, - httpResCode: rws.status, - h: rws.snapHeader, - endStream: endStream, - contentType: ctype, - contentLength: clen, - date: date, - }) - if err != nil { - return 0, err - } - if endStream { - return 0, nil - } - } - if isHeadResp { - return len(p), nil - } - if len(p) == 0 && !rws.handlerDone { - return 0, nil - } - - if rws.handlerDone { - rws.promoteUndeclaredTrailers() - } - - endStream := rws.handlerDone && !rws.hasTrailers() - if len(p) > 0 || endStream { - // only send a 0 byte DATA frame if we're ending the stream. - if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { - return 0, err - } - } - - if rws.handlerDone && rws.hasTrailers() { - err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ - streamID: rws.stream.id, - h: rws.handlerHeader, - trailers: rws.trailers, - endStream: true, - }) - return len(p), err - } - return len(p), nil -} - -// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys -// that, if present, signals that the map entry is actually for -// the response trailers, and not the response headers. The prefix -// is stripped after the ServeHTTP call finishes and the values are -// sent in the trailers. -// -// This mechanism is intended only for trailers that are not known -// prior to the headers being written. If the set of trailers is fixed -// or known before the header is written, the normal Go trailers mechanism -// is preferred: -// https://golang.org/pkg/net/http/#ResponseWriter -// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers -const TrailerPrefix = "Trailer:" - -// promoteUndeclaredTrailers permits http.Handlers to set trailers -// after the header has already been flushed. Because the Go -// ResponseWriter interface has no way to set Trailers (only the -// Header), and because we didn't want to expand the ResponseWriter -// interface, and because nobody used trailers, and because RFC 2616 -// says you SHOULD (but not must) predeclare any trailers in the -// header, the official ResponseWriter rules said trailers in Go must -// be predeclared, and then we reuse the same ResponseWriter.Header() -// map to mean both Headers and Trailers. When it's time to write the -// Trailers, we pick out the fields of Headers that were declared as -// trailers. That worked for a while, until we found the first major -// user of Trailers in the wild: gRPC (using them only over http2), -// and gRPC libraries permit setting trailers mid-stream without -// predeclarnig them. So: change of plans. We still permit the old -// way, but we also permit this hack: if a Header() key begins with -// "Trailer:", the suffix of that key is a Trailer. Because ':' is an -// invalid token byte anyway, there is no ambiguity. (And it's already -// filtered out) It's mildly hacky, but not terrible. -// -// This method runs after the Handler is done and promotes any Header -// fields to be trailers. -func (rws *responseWriterState) promoteUndeclaredTrailers() { - for k, vv := range rws.handlerHeader { - if !strings.HasPrefix(k, TrailerPrefix) { - continue - } - trailerKey := strings.TrimPrefix(k, TrailerPrefix) - rws.declareTrailer(trailerKey) - rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv - } - - if len(rws.trailers) > 1 { - sorter := sorterPool.Get().(*sorter) - sorter.SortStrings(rws.trailers) - sorterPool.Put(sorter) - } -} - -func (w *responseWriter) Flush() { - rws := w.rws - if rws == nil { - panic("Header called after Handler finished") - } - if rws.bw.Buffered() > 0 { - if err := rws.bw.Flush(); err != nil { - // Ignore the error. The frame writer already knows. - return - } - } else { - // The bufio.Writer won't call chunkWriter.Write - // (writeChunk with zero bytes, so we have to do it - // ourselves to force the HTTP response header and/or - // final DATA frame (with END_STREAM) to be sent. - rws.writeChunk(nil) - } -} - -func (w *responseWriter) CloseNotify() <-chan bool { - rws := w.rws - if rws == nil { - panic("CloseNotify called after Handler finished") - } - rws.closeNotifierMu.Lock() - ch := rws.closeNotifierCh - if ch == nil { - ch = make(chan bool, 1) - rws.closeNotifierCh = ch - go func() { - rws.stream.cw.Wait() // wait for close - ch <- true - }() - } - rws.closeNotifierMu.Unlock() - return ch -} - -func (w *responseWriter) Header() http.Header { - rws := w.rws - if rws == nil { - panic("Header called after Handler finished") - } - if rws.handlerHeader == nil { - rws.handlerHeader = make(http.Header) - } - return rws.handlerHeader -} - -func (w *responseWriter) WriteHeader(code int) { - rws := w.rws - if rws == nil { - panic("WriteHeader called after Handler finished") - } - rws.writeHeader(code) -} - -func (rws *responseWriterState) writeHeader(code int) { - if !rws.wroteHeader { - rws.wroteHeader = true - rws.status = code - if len(rws.handlerHeader) > 0 { - rws.snapHeader = cloneHeader(rws.handlerHeader) - } - } -} - -func cloneHeader(h http.Header) http.Header { - h2 := make(http.Header, len(h)) - for k, vv := range h { - vv2 := make([]string, len(vv)) - copy(vv2, vv) - h2[k] = vv2 - } - return h2 -} - -// The Life Of A Write is like this: -// -// * Handler calls w.Write or w.WriteString -> -// * -> rws.bw (*bufio.Writer) -> -// * (Handler migth call Flush) -// * -> chunkWriter{rws} -// * -> responseWriterState.writeChunk(p []byte) -// * -> responseWriterState.writeChunk (most of the magic; see comment there) -func (w *responseWriter) Write(p []byte) (n int, err error) { - return w.write(len(p), p, "") -} - -func (w *responseWriter) WriteString(s string) (n int, err error) { - return w.write(len(s), nil, s) -} - -// either dataB or dataS is non-zero. -func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { - rws := w.rws - if rws == nil { - panic("Write called after Handler finished") - } - if !rws.wroteHeader { - w.WriteHeader(200) - } - if !bodyAllowedForStatus(rws.status) { - return 0, http.ErrBodyNotAllowed - } - rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set - if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { - // TODO: send a RST_STREAM - return 0, errors.New("http2: handler wrote more than declared Content-Length") - } - - if dataB != nil { - return rws.bw.Write(dataB) - } else { - return rws.bw.WriteString(dataS) - } -} - -func (w *responseWriter) handlerDone() { - rws := w.rws - rws.handlerDone = true - w.Flush() - w.rws = nil - responseWriterStatePool.Put(rws) -} - -// foreachHeaderElement splits v according to the "#rule" construction -// in RFC 2616 section 2.1 and calls fn for each non-empty element. -func foreachHeaderElement(v string, fn func(string)) { - v = textproto.TrimString(v) - if v == "" { - return - } - if !strings.Contains(v, ",") { - fn(v) - return - } - for _, f := range strings.Split(v, ",") { - if f = textproto.TrimString(f); f != "" { - fn(f) - } - } -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/transport.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/transport.go deleted file mode 100644 index 7d558a4b..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/transport.go +++ /dev/null @@ -1,1666 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Transport code. - -package http2 - -import ( - "bufio" - "bytes" - "compress/gzip" - "crypto/tls" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "sort" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/net/http2/hpack" -) - -const ( - // transportDefaultConnFlow is how many connection-level flow control - // tokens we give the server at start-up, past the default 64k. - transportDefaultConnFlow = 1 << 30 - - // transportDefaultStreamFlow is how many stream-level flow - // control tokens we announce to the peer, and how many bytes - // we buffer per stream. - transportDefaultStreamFlow = 4 << 20 - - // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send - // a stream-level WINDOW_UPDATE for at a time. - transportDefaultStreamMinRefresh = 4 << 10 - - defaultUserAgent = "Go-http-client/2.0" -) - -// Transport is an HTTP/2 Transport. -// -// A Transport internally caches connections to servers. It is safe -// for concurrent use by multiple goroutines. -type Transport struct { - // DialTLS specifies an optional dial function for creating - // TLS connections for requests. - // - // If DialTLS is nil, tls.Dial is used. - // - // If the returned net.Conn has a ConnectionState method like tls.Conn, - // it will be used to set http.Response.TLS. - DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) - - // TLSClientConfig specifies the TLS configuration to use with - // tls.Client. If nil, the default configuration is used. - TLSClientConfig *tls.Config - - // ConnPool optionally specifies an alternate connection pool to use. - // If nil, the default is used. - ConnPool ClientConnPool - - // DisableCompression, if true, prevents the Transport from - // requesting compression with an "Accept-Encoding: gzip" - // request header when the Request contains no existing - // Accept-Encoding value. If the Transport requests gzip on - // its own and gets a gzipped response, it's transparently - // decoded in the Response.Body. However, if the user - // explicitly requested gzip it is not automatically - // uncompressed. - DisableCompression bool - - // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to - // send in the initial settings frame. It is how many bytes - // of response headers are allow. Unlike the http2 spec, zero here - // means to use a default limit (currently 10MB). If you actually - // want to advertise an ulimited value to the peer, Transport - // interprets the highest possible value here (0xffffffff or 1<<32-1) - // to mean no limit. - MaxHeaderListSize uint32 - - // t1, if non-nil, is the standard library Transport using - // this transport. Its settings are used (but not its - // RoundTrip method, etc). - t1 *http.Transport - - connPoolOnce sync.Once - connPoolOrDef ClientConnPool // non-nil version of ConnPool -} - -func (t *Transport) maxHeaderListSize() uint32 { - if t.MaxHeaderListSize == 0 { - return 10 << 20 - } - if t.MaxHeaderListSize == 0xffffffff { - return 0 - } - return t.MaxHeaderListSize -} - -func (t *Transport) disableCompression() bool { - return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) -} - -var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") - -// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. -// It requires Go 1.6 or later and returns an error if the net/http package is too old -// or if t1 has already been HTTP/2-enabled. -func ConfigureTransport(t1 *http.Transport) error { - _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go - return err -} - -func (t *Transport) connPool() ClientConnPool { - t.connPoolOnce.Do(t.initConnPool) - return t.connPoolOrDef -} - -func (t *Transport) initConnPool() { - if t.ConnPool != nil { - t.connPoolOrDef = t.ConnPool - } else { - t.connPoolOrDef = &clientConnPool{t: t} - } -} - -// ClientConn is the state of a single HTTP/2 client connection to an -// HTTP/2 server. -type ClientConn struct { - t *Transport - tconn net.Conn // usually *tls.Conn, except specialized impls - tlsState *tls.ConnectionState // nil only for specialized impls - - // readLoop goroutine fields: - readerDone chan struct{} // closed on error - readerErr error // set before readerDone is closed - - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow flow // our conn-level flow control quota (cs.flow is per stream) - inflow flow // peer's conn-level flow control - closed bool - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - streams map[uint32]*clientStream // client-initiated - nextStreamID uint32 - bw *bufio.Writer - br *bufio.Reader - fr *Framer - // Settings from peer: - maxFrameSize uint32 - maxConcurrentStreams uint32 - initialWindowSize uint32 - hbuf bytes.Buffer // HPACK encoder writes into this - henc *hpack.Encoder - freeBuf [][]byte - - wmu sync.Mutex // held while writing; acquire AFTER mu if holding both - werr error // first write error that has occurred -} - -// clientStream is the state for a single HTTP/2 stream. One of these -// is created for each Transport.RoundTrip call. -type clientStream struct { - cc *ClientConn - req *http.Request - ID uint32 - resc chan resAndError - bufPipe pipe // buffered pipe with the flow-controlled response payload - requestedGzip bool - - flow flow // guarded by cc.mu - inflow flow // guarded by cc.mu - bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read - readErr error // sticky read error; owned by transportResponseBody.Read - stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu - - peerReset chan struct{} // closed on peer reset - resetErr error // populated before peerReset is closed - - done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu - - // owned by clientConnReadLoop: - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) - - trailer http.Header // accumulated trailers - resTrailer *http.Header // client's Response.Trailer -} - -// awaitRequestCancel runs in its own goroutine and waits for the user -// to either cancel a RoundTrip request (using the provided -// Request.Cancel channel), or for the request to be done (any way it -// might be removed from the cc.streams map: peer reset, successful -// completion, TCP connection breakage, etc) -func (cs *clientStream) awaitRequestCancel(cancel <-chan struct{}) { - if cancel == nil { - return - } - select { - case <-cancel: - cs.bufPipe.CloseWithError(errRequestCanceled) - cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - case <-cs.done: - } -} - -// checkReset reports any error sent in a RST_STREAM frame by the -// server. -func (cs *clientStream) checkReset() error { - select { - case <-cs.peerReset: - return cs.resetErr - default: - return nil - } -} - -func (cs *clientStream) abortRequestBodyWrite(err error) { - if err == nil { - panic("nil error") - } - cc := cs.cc - cc.mu.Lock() - cs.stopReqBody = err - cc.cond.Broadcast() - cc.mu.Unlock() -} - -type stickyErrWriter struct { - w io.Writer - err *error -} - -func (sew stickyErrWriter) Write(p []byte) (n int, err error) { - if *sew.err != nil { - return 0, *sew.err - } - n, err = sew.w.Write(p) - *sew.err = err - return -} - -var ErrNoCachedConn = errors.New("http2: no cached connection was available") - -// RoundTripOpt are options for the Transport.RoundTripOpt method. -type RoundTripOpt struct { - // OnlyCachedConn controls whether RoundTripOpt may - // create a new TCP connection. If set true and - // no cached connection is available, RoundTripOpt - // will return ErrNoCachedConn. - OnlyCachedConn bool -} - -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - return t.RoundTripOpt(req, RoundTripOpt{}) -} - -// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) -// and returns a host:port. The port 443 is added if needed. -func authorityAddr(authority string) (addr string) { - if _, _, err := net.SplitHostPort(authority); err == nil { - return authority - } - return net.JoinHostPort(authority, "443") -} - -// RoundTripOpt is like RoundTrip, but takes options. -func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { - if req.URL.Scheme != "https" { - return nil, errors.New("http2: unsupported scheme") - } - - addr := authorityAddr(req.URL.Host) - for { - cc, err := t.connPool().GetClientConn(req, addr) - if err != nil { - t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) - return nil, err - } - res, err := cc.RoundTrip(req) - if shouldRetryRequest(req, err) { - continue - } - if err != nil { - t.vlogf("RoundTrip failure: %v", err) - return nil, err - } - return res, nil - } -} - -// CloseIdleConnections closes any connections which were previously -// connected from previous requests but are now sitting idle. -// It does not interrupt any connections currently in use. -func (t *Transport) CloseIdleConnections() { - if cp, ok := t.connPool().(*clientConnPool); ok { - cp.closeIdleConnections() - } -} - -var ( - errClientConnClosed = errors.New("http2: client conn is closed") - errClientConnUnusable = errors.New("http2: client conn not usable") -) - -func shouldRetryRequest(req *http.Request, err error) bool { - // TODO: retry GET requests (no bodies) more aggressively, if shutdown - // before response. - return err == errClientConnUnusable -} - -func (t *Transport) dialClientConn(addr string) (*ClientConn, error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) - if err != nil { - return nil, err - } - return t.NewClientConn(tconn) -} - -func (t *Transport) newTLSConfig(host string) *tls.Config { - cfg := new(tls.Config) - if t.TLSClientConfig != nil { - *cfg = *t.TLSClientConfig - } - if !strSliceContains(cfg.NextProtos, NextProtoTLS) { - cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) - } - if cfg.ServerName == "" { - cfg.ServerName = host - } - return cfg -} - -func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { - if t.DialTLS != nil { - return t.DialTLS - } - return t.dialTLSDefault -} - -func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { - cn, err := tls.Dial(network, addr, cfg) - if err != nil { - return nil, err - } - if err := cn.Handshake(); err != nil { - return nil, err - } - if !cfg.InsecureSkipVerify { - if err := cn.VerifyHostname(cfg.ServerName); err != nil { - return nil, err - } - } - state := cn.ConnectionState() - if p := state.NegotiatedProtocol; p != NextProtoTLS { - return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) - } - if !state.NegotiatedProtocolIsMutual { - return nil, errors.New("http2: could not negotiate protocol mutually") - } - return cn, nil -} - -// disableKeepAlives reports whether connections should be closed as -// soon as possible after handling the first request. -func (t *Transport) disableKeepAlives() bool { - return t.t1 != nil && t.t1.DisableKeepAlives -} - -func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - if VerboseLogs { - t.vlogf("http2: Transport creating client conn to %v", c.RemoteAddr()) - } - if _, err := c.Write(clientPreface); err != nil { - t.vlogf("client preface write error: %v", err) - return nil, err - } - - cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. - streams: make(map[uint32]*clientStream), - } - cc.cond = sync.NewCond(&cc.mu) - cc.flow.add(int32(initialWindowSize)) - - // TODO: adjust this writer size to account for frame size + - // MTU + crypto/tls record padding. - cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) - cc.br = bufio.NewReader(c) - cc.fr = NewFramer(cc.bw, cc.br) - cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) - cc.fr.MaxHeaderListSize = t.maxHeaderListSize() - - // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on - // henc in response to SETTINGS frames? - cc.henc = hpack.NewEncoder(&cc.hbuf) - - if cs, ok := c.(connectionStater); ok { - state := cs.ConnectionState() - cc.tlsState = &state - } - - initialSettings := []Setting{ - {ID: SettingEnablePush, Val: 0}, - {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - } - if max := t.maxHeaderListSize(); max != 0 { - initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) - } - cc.fr.WriteSettings(initialSettings...) - cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.add(transportDefaultConnFlow + initialWindowSize) - cc.bw.Flush() - if cc.werr != nil { - return nil, cc.werr - } - - // Read the obligatory SETTINGS frame - f, err := cc.fr.ReadFrame() - if err != nil { - return nil, err - } - sf, ok := f.(*SettingsFrame) - if !ok { - return nil, fmt.Errorf("expected settings frame, got: %T", f) - } - cc.fr.WriteSettingsAck() - cc.bw.Flush() - - sf.ForeachSetting(func(s Setting) error { - switch s.ID { - case SettingMaxFrameSize: - cc.maxFrameSize = s.Val - case SettingMaxConcurrentStreams: - cc.maxConcurrentStreams = s.Val - case SettingInitialWindowSize: - cc.initialWindowSize = s.Val - default: - // TODO(bradfitz): handle more; at least SETTINGS_HEADER_TABLE_SIZE? - t.vlogf("Unhandled Setting: %v", s) - } - return nil - }) - - go cc.readLoop() - return cc, nil -} - -func (cc *ClientConn) setGoAway(f *GoAwayFrame) { - cc.mu.Lock() - defer cc.mu.Unlock() - cc.goAway = f -} - -func (cc *ClientConn) CanTakeNewRequest() bool { - cc.mu.Lock() - defer cc.mu.Unlock() - return cc.canTakeNewRequestLocked() -} - -func (cc *ClientConn) canTakeNewRequestLocked() bool { - return cc.goAway == nil && !cc.closed && - int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && - cc.nextStreamID < 2147483647 -} - -func (cc *ClientConn) closeIfIdle() { - cc.mu.Lock() - if len(cc.streams) > 0 { - cc.mu.Unlock() - return - } - cc.closed = true - // TODO: do clients send GOAWAY too? maybe? Just Close: - cc.mu.Unlock() - - cc.tconn.Close() -} - -const maxAllocFrameSize = 512 << 10 - -// frameBuffer returns a scratch buffer suitable for writing DATA frames. -// They're capped at the min of the peer's max frame size or 512KB -// (kinda arbitrarily), but definitely capped so we don't allocate 4GB -// bufers. -func (cc *ClientConn) frameScratchBuffer() []byte { - cc.mu.Lock() - size := cc.maxFrameSize - if size > maxAllocFrameSize { - size = maxAllocFrameSize - } - for i, buf := range cc.freeBuf { - if len(buf) >= int(size) { - cc.freeBuf[i] = nil - cc.mu.Unlock() - return buf[:size] - } - } - cc.mu.Unlock() - return make([]byte, size) -} - -func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { - cc.mu.Lock() - defer cc.mu.Unlock() - const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. - if len(cc.freeBuf) < maxBufs { - cc.freeBuf = append(cc.freeBuf, buf) - return - } - for i, old := range cc.freeBuf { - if old == nil { - cc.freeBuf[i] = buf - return - } - } - // forget about it. -} - -// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not -// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. -var errRequestCanceled = errors.New("net/http: request canceled") - -func commaSeparatedTrailers(req *http.Request) (string, error) { - keys := make([]string, 0, len(req.Trailer)) - for k := range req.Trailer { - k = http.CanonicalHeaderKey(k) - switch k { - case "Transfer-Encoding", "Trailer", "Content-Length": - return "", &badStringError{"invalid Trailer key", k} - } - keys = append(keys, k) - } - if len(keys) > 0 { - sort.Strings(keys) - // TODO: could do better allocation-wise here, but trailers are rare, - // so being lazy for now. - return strings.Join(keys, ","), nil - } - return "", nil -} - -func (cc *ClientConn) responseHeaderTimeout() time.Duration { - if cc.t.t1 != nil { - return cc.t.t1.ResponseHeaderTimeout - } - // No way to do this (yet?) with just an http2.Transport. Probably - // no need. Request.Cancel this is the new way. We only need to support - // this for compatibility with the old http.Transport fields when - // we're doing transparent http2. - return 0 -} - -// checkConnHeaders checks whether req has any invalid connection-level headers. -// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. -// Certain headers are special-cased as okay but not transmitted later. -func checkConnHeaders(req *http.Request) error { - if v := req.Header.Get("Upgrade"); v != "" { - return errors.New("http2: invalid Upgrade request header") - } - if v := req.Header.Get("Transfer-Encoding"); (v != "" && v != "chunked") || len(req.Header["Transfer-Encoding"]) > 1 { - return errors.New("http2: invalid Transfer-Encoding request header") - } - if v := req.Header.Get("Connection"); (v != "" && v != "close" && v != "keep-alive") || len(req.Header["Connection"]) > 1 { - return errors.New("http2: invalid Connection request header") - } - return nil -} - -func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { - if err := checkConnHeaders(req); err != nil { - return nil, err - } - - trailers, err := commaSeparatedTrailers(req) - if err != nil { - return nil, err - } - hasTrailers := trailers != "" - - var body io.Reader = req.Body - contentLen := req.ContentLength - if req.Body != nil && contentLen == 0 { - // Test to see if it's actually zero or just unset. - var buf [1]byte - n, rerr := io.ReadFull(body, buf[:]) - if rerr != nil && rerr != io.EOF { - contentLen = -1 - body = errorReader{rerr} - } else if n == 1 { - // Oh, guess there is data in this Body Reader after all. - // The ContentLength field just wasn't set. - // Stich the Body back together again, re-attaching our - // consumed byte. - contentLen = -1 - body = io.MultiReader(bytes.NewReader(buf[:]), body) - } else { - // Body is actually empty. - body = nil - } - } - - cc.mu.Lock() - if cc.closed || !cc.canTakeNewRequestLocked() { - cc.mu.Unlock() - return nil, errClientConnUnusable - } - - cs := cc.newStream() - cs.req = req - hasBody := body != nil - - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - req.Method != "HEAD" { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - cs.requestedGzip = true - } - - // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is - // sent by writeRequestBody below, along with any Trailers, - // again in form HEADERS{1}, CONTINUATION{0,}) - hdrs := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen) - cc.wmu.Lock() - endStream := !hasBody && !hasTrailers - werr := cc.writeHeaders(cs.ID, endStream, hdrs) - cc.wmu.Unlock() - cc.mu.Unlock() - - if werr != nil { - if hasBody { - req.Body.Close() // per RoundTripper contract - } - cc.forgetStreamID(cs.ID) - // Don't bother sending a RST_STREAM (our write already failed; - // no need to keep writing) - return nil, werr - } - - var respHeaderTimer <-chan time.Time - var bodyCopyErrc chan error // result of body copy - if hasBody { - bodyCopyErrc = make(chan error, 1) - go func() { - bodyCopyErrc <- cs.writeRequestBody(body, req.Body) - }() - } else { - if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) - defer timer.Stop() - respHeaderTimer = timer.C - } - } - - readLoopResCh := cs.resc - requestCanceledCh := requestCancel(req) - bodyWritten := false - - for { - select { - case re := <-readLoopResCh: - res := re.res - if re.err != nil || res.StatusCode > 299 { - // On error or status code 3xx, 4xx, 5xx, etc abort any - // ongoing write, assuming that the server doesn't care - // about our request body. If the server replied with 1xx or - // 2xx, however, then assume the server DOES potentially - // want our body (e.g. full-duplex streaming: - // golang.org/issue/13444). If it turns out the server - // doesn't, they'll RST_STREAM us soon enough. This is a - // heuristic to avoid adding knobs to Transport. Hopefully - // we can keep it. - cs.abortRequestBodyWrite(errStopReqBodyWrite) - } - if re.err != nil { - cc.forgetStreamID(cs.ID) - return nil, re.err - } - res.Request = req - res.TLS = cc.tlsState - return res, nil - case <-respHeaderTimer: - cc.forgetStreamID(cs.ID) - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - } - return nil, errTimeout - case <-requestCanceledCh: - cc.forgetStreamID(cs.ID) - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - } - return nil, errRequestCanceled - case <-cs.peerReset: - // processResetStream already removed the - // stream from the streams map; no need for - // forgetStreamID. - return nil, cs.resetErr - case err := <-bodyCopyErrc: - if err != nil { - return nil, err - } - bodyWritten = true - if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) - defer timer.Stop() - respHeaderTimer = timer.C - } - } - } -} - -// requires cc.wmu be held -func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error { - first := true // first frame written (HEADERS is first, then CONTINUATION) - frameSize := int(cc.maxFrameSize) - for len(hdrs) > 0 && cc.werr == nil { - chunk := hdrs - if len(chunk) > frameSize { - chunk = chunk[:frameSize] - } - hdrs = hdrs[len(chunk):] - endHeaders := len(hdrs) == 0 - if first { - cc.fr.WriteHeaders(HeadersFrameParam{ - StreamID: streamID, - BlockFragment: chunk, - EndStream: endStream, - EndHeaders: endHeaders, - }) - first = false - } else { - cc.fr.WriteContinuation(streamID, endHeaders, chunk) - } - } - // TODO(bradfitz): this Flush could potentially block (as - // could the WriteHeaders call(s) above), which means they - // wouldn't respond to Request.Cancel being readable. That's - // rare, but this should probably be in a goroutine. - cc.bw.Flush() - return cc.werr -} - -// internal error values; they don't escape to callers -var ( - // abort request body write; don't send cancel - errStopReqBodyWrite = errors.New("http2: aborting request body write") - - // abort request body write, but send stream reset of cancel. - errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") -) - -func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { - cc := cs.cc - sentEnd := false // whether we sent the final DATA frame w/ END_STREAM - buf := cc.frameScratchBuffer() - defer cc.putFrameScratchBuffer(buf) - - defer func() { - // TODO: write h12Compare test showing whether - // Request.Body is closed by the Transport, - // and in multiple cases: server replies <=299 and >299 - // while still writing request body - cerr := bodyCloser.Close() - if err == nil { - err = cerr - } - }() - - req := cs.req - hasTrailers := req.Trailer != nil - - var sawEOF bool - for !sawEOF { - n, err := body.Read(buf) - if err == io.EOF { - sawEOF = true - err = nil - } else if err != nil { - return err - } - - remain := buf[:n] - for len(remain) > 0 && err == nil { - var allowed int32 - allowed, err = cs.awaitFlowControl(len(remain)) - switch { - case err == errStopReqBodyWrite: - return err - case err == errStopReqBodyWriteAndCancel: - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - return err - case err != nil: - return err - } - cc.wmu.Lock() - data := remain[:allowed] - remain = remain[allowed:] - sentEnd = sawEOF && len(remain) == 0 && !hasTrailers - err = cc.fr.WriteData(cs.ID, sentEnd, data) - if err == nil { - // TODO(bradfitz): this flush is for latency, not bandwidth. - // Most requests won't need this. Make this opt-in or opt-out? - // Use some heuristic on the body type? Nagel-like timers? - // Based on 'n'? Only last chunk of this for loop, unless flow control - // tokens are low? For now, always: - err = cc.bw.Flush() - } - cc.wmu.Unlock() - } - if err != nil { - return err - } - } - - cc.wmu.Lock() - if !sentEnd { - var trls []byte - if hasTrailers { - cc.mu.Lock() - trls = cc.encodeTrailers(req) - cc.mu.Unlock() - } - - // Avoid forgetting to send an END_STREAM if the encoded - // trailers are 0 bytes. Both results produce and END_STREAM. - if len(trls) > 0 { - err = cc.writeHeaders(cs.ID, true, trls) - } else { - err = cc.fr.WriteData(cs.ID, true, nil) - } - } - if ferr := cc.bw.Flush(); ferr != nil && err == nil { - err = ferr - } - cc.wmu.Unlock() - - return err -} - -// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow -// control tokens from the server. -// It returns either the non-zero number of tokens taken or an error -// if the stream is dead. -func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { - cc := cs.cc - cc.mu.Lock() - defer cc.mu.Unlock() - for { - if cc.closed { - return 0, errClientConnClosed - } - if cs.stopReqBody != nil { - return 0, cs.stopReqBody - } - if err := cs.checkReset(); err != nil { - return 0, err - } - if a := cs.flow.available(); a > 0 { - take := a - if int(take) > maxBytes { - - take = int32(maxBytes) // can't truncate int; take is int32 - } - if take > int32(cc.maxFrameSize) { - take = int32(cc.maxFrameSize) - } - cs.flow.take(take) - return take, nil - } - cc.cond.Wait() - } -} - -type badStringError struct { - what string - str string -} - -func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } - -// requires cc.mu be held. -func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) []byte { - cc.hbuf.Reset() - - host := req.Host - if host == "" { - host = req.URL.Host - } - - // 8.1.2.3 Request Pseudo-Header Fields - // The :path pseudo-header field includes the path and query parts of the - // target URI (the path-absolute production and optionally a '?' character - // followed by the query production (see Sections 3.3 and 3.4 of - // [RFC3986]). - cc.writeHeader(":authority", host) - cc.writeHeader(":method", req.Method) - if req.Method != "CONNECT" { - cc.writeHeader(":path", req.URL.RequestURI()) - cc.writeHeader(":scheme", "https") - } - if trailers != "" { - cc.writeHeader("trailer", trailers) - } - - var didUA bool - for k, vv := range req.Header { - lowKey := strings.ToLower(k) - switch lowKey { - case "host", "content-length": - // Host is :authority, already sent. - // Content-Length is automatic, set below. - continue - case "connection", "proxy-connection", "transfer-encoding", "upgrade": - // Per 8.1.2.2 Connection-Specific Header - // Fields, don't send connection-specific - // fields. We deal with these earlier in - // RoundTrip, deciding whether they're - // error-worthy, but we don't want to mutate - // the user's *Request so at this point, just - // skip over them at this point. - continue - case "user-agent": - // Match Go's http1 behavior: at most one - // User-Agent. If set to nil or empty string, - // then omit it. Otherwise if not mentioned, - // include the default (below). - didUA = true - if len(vv) < 1 { - continue - } - vv = vv[:1] - if vv[0] == "" { - continue - } - } - for _, v := range vv { - cc.writeHeader(lowKey, v) - } - } - if shouldSendReqContentLength(req.Method, contentLength) { - cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10)) - } - if addGzipHeader { - cc.writeHeader("accept-encoding", "gzip") - } - if !didUA { - cc.writeHeader("user-agent", defaultUserAgent) - } - return cc.hbuf.Bytes() -} - -// shouldSendReqContentLength reports whether the http2.Transport should send -// a "content-length" request header. This logic is basically a copy of the net/http -// transferWriter.shouldSendContentLength. -// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). -// -1 means unknown. -func shouldSendReqContentLength(method string, contentLength int64) bool { - if contentLength > 0 { - return true - } - if contentLength < 0 { - return false - } - // For zero bodies, whether we send a content-length depends on the method. - // It also kinda doesn't matter for http2 either way, with END_STREAM. - switch method { - case "POST", "PUT", "PATCH": - return true - default: - return false - } -} - -// requires cc.mu be held. -func (cc *ClientConn) encodeTrailers(req *http.Request) []byte { - cc.hbuf.Reset() - for k, vv := range req.Trailer { - // Transfer-Encoding, etc.. have already been filter at the - // start of RoundTrip - lowKey := strings.ToLower(k) - for _, v := range vv { - cc.writeHeader(lowKey, v) - } - } - return cc.hbuf.Bytes() -} - -func (cc *ClientConn) writeHeader(name, value string) { - if VerboseLogs { - log.Printf("http2: Transport encoding header %q = %q", name, value) - } - cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) -} - -type resAndError struct { - res *http.Response - err error -} - -// requires cc.mu be held. -func (cc *ClientConn) newStream() *clientStream { - cs := &clientStream{ - cc: cc, - ID: cc.nextStreamID, - resc: make(chan resAndError, 1), - peerReset: make(chan struct{}), - done: make(chan struct{}), - } - cs.flow.add(int32(cc.initialWindowSize)) - cs.flow.setConnFlow(&cc.flow) - cs.inflow.add(transportDefaultStreamFlow) - cs.inflow.setConnFlow(&cc.inflow) - cc.nextStreamID += 2 - cc.streams[cs.ID] = cs - return cs -} - -func (cc *ClientConn) forgetStreamID(id uint32) { - cc.streamByID(id, true) -} - -func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { - cc.mu.Lock() - defer cc.mu.Unlock() - cs := cc.streams[id] - if andRemove && cs != nil && !cc.closed { - delete(cc.streams, id) - close(cs.done) - } - return cs -} - -// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. -type clientConnReadLoop struct { - cc *ClientConn - activeRes map[uint32]*clientStream // keyed by streamID - closeWhenIdle bool -} - -// readLoop runs in its own goroutine and reads and dispatches frames. -func (cc *ClientConn) readLoop() { - rl := &clientConnReadLoop{ - cc: cc, - activeRes: make(map[uint32]*clientStream), - } - - defer rl.cleanup() - cc.readerErr = rl.run() - if ce, ok := cc.readerErr.(ConnectionError); ok { - cc.wmu.Lock() - cc.fr.WriteGoAway(0, ErrCode(ce), nil) - cc.wmu.Unlock() - } -} - -func (rl *clientConnReadLoop) cleanup() { - cc := rl.cc - defer cc.tconn.Close() - defer cc.t.connPool().MarkDead(cc) - defer close(cc.readerDone) - - // Close any response bodies if the server closes prematurely. - // TODO: also do this if we've written the headers but not - // gotten a response yet. - err := cc.readerErr - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - cc.mu.Lock() - for _, cs := range rl.activeRes { - cs.bufPipe.CloseWithError(err) - } - for _, cs := range cc.streams { - select { - case cs.resc <- resAndError{err: err}: - default: - } - close(cs.done) - } - cc.closed = true - cc.cond.Broadcast() - cc.mu.Unlock() -} - -func (rl *clientConnReadLoop) run() error { - cc := rl.cc - rl.closeWhenIdle = cc.t.disableKeepAlives() - gotReply := false // ever saw a reply - for { - f, err := cc.fr.ReadFrame() - if err != nil { - cc.vlogf("Transport readFrame error: (%T) %v", err, err) - } - if se, ok := err.(StreamError); ok { - if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil { - rl.endStreamError(cs, cc.fr.errDetail) - } - continue - } else if err != nil { - return err - } - if VerboseLogs { - cc.vlogf("http2: Transport received %s", summarizeFrame(f)) - } - maybeIdle := false // whether frame might transition us to idle - - switch f := f.(type) { - case *MetaHeadersFrame: - err = rl.processHeaders(f) - maybeIdle = true - gotReply = true - case *DataFrame: - err = rl.processData(f) - maybeIdle = true - case *GoAwayFrame: - err = rl.processGoAway(f) - maybeIdle = true - case *RSTStreamFrame: - err = rl.processResetStream(f) - maybeIdle = true - case *SettingsFrame: - err = rl.processSettings(f) - case *PushPromiseFrame: - err = rl.processPushPromise(f) - case *WindowUpdateFrame: - err = rl.processWindowUpdate(f) - case *PingFrame: - err = rl.processPing(f) - default: - cc.logf("Transport: unhandled response frame type %T", f) - } - if err != nil { - return err - } - if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { - cc.closeIfIdle() - } - } -} - -func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, f.StreamEnded()) - if cs == nil { - // We'd get here if we canceled a request while the - // server had its response still in flight. So if this - // was just something we canceled, ignore it. - return nil - } - if !cs.pastHeaders { - cs.pastHeaders = true - } else { - return rl.processTrailers(cs, f) - } - - res, err := rl.handleResponse(cs, f) - if err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // Any other error type is a stream error. - cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) - cs.resc <- resAndError{err: err} - return nil // return nil from process* funcs to keep conn alive - } - if res == nil { - // (nil, nil) special case. See handleResponse docs. - return nil - } - if res.Body != noBody { - rl.activeRes[cs.ID] = cs - } - cs.resTrailer = &res.Trailer - cs.resc <- resAndError{res: res} - return nil -} - -// may return error types nil, or ConnectionError. Any other error value -// is a StreamError of type ErrCodeProtocol. The returned error in that case -// is the detail. -// -// As a special case, handleResponse may return (nil, nil) to skip the -// frame (currently only used for 100 expect continue). This special -// case is going away after Issue 13851 is fixed. -func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { - if f.Truncated { - return nil, errResponseHeaderListSize - } - - status := f.PseudoValue("status") - if status == "" { - return nil, errors.New("missing status pseudo header") - } - statusCode, err := strconv.Atoi(status) - if err != nil { - return nil, errors.New("malformed non-numeric status pseudo header") - } - - if statusCode == 100 { - // Just skip 100-continue response headers for now. - // TODO: golang.org/issue/13851 for doing it properly. - cs.pastHeaders = false // do it all again - return nil, nil - } - - header := make(http.Header) - res := &http.Response{ - Proto: "HTTP/2.0", - ProtoMajor: 2, - Header: header, - StatusCode: statusCode, - Status: status + " " + http.StatusText(statusCode), - } - for _, hf := range f.RegularFields() { - key := http.CanonicalHeaderKey(hf.Name) - if key == "Trailer" { - t := res.Trailer - if t == nil { - t = make(http.Header) - res.Trailer = t - } - foreachHeaderElement(hf.Value, func(v string) { - t[http.CanonicalHeaderKey(v)] = nil - }) - } else { - header[key] = append(header[key], hf.Value) - } - } - - streamEnded := f.StreamEnded() - if !streamEnded || cs.req.Method == "HEAD" { - res.ContentLength = -1 - if clens := res.Header["Content-Length"]; len(clens) == 1 { - if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { - res.ContentLength = clen64 - } else { - // TODO: care? unlike http/1, it won't mess up our framing, so it's - // more safe smuggling-wise to ignore. - } - } else if len(clens) > 1 { - // TODO: care? unlike http/1, it won't mess up our framing, so it's - // more safe smuggling-wise to ignore. - } - } - - if streamEnded { - res.Body = noBody - return res, nil - } - - buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage - cs.bufPipe = pipe{b: buf} - cs.bytesRemain = res.ContentLength - res.Body = transportResponseBody{cs} - go cs.awaitRequestCancel(requestCancel(cs.req)) - - if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { - res.Header.Del("Content-Encoding") - res.Header.Del("Content-Length") - res.ContentLength = -1 - res.Body = &gzipReader{body: res.Body} - } - return res, nil -} - -func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { - if cs.pastTrailers { - // Too many HEADERS frames for this stream. - return ConnectionError(ErrCodeProtocol) - } - cs.pastTrailers = true - if !f.StreamEnded() { - // We expect that any headers for trailers also - // has END_STREAM. - return ConnectionError(ErrCodeProtocol) - } - if len(f.PseudoFields()) > 0 { - // No pseudo header fields are defined for trailers. - // TODO: ConnectionError might be overly harsh? Check. - return ConnectionError(ErrCodeProtocol) - } - - trailer := make(http.Header) - for _, hf := range f.RegularFields() { - key := http.CanonicalHeaderKey(hf.Name) - trailer[key] = append(trailer[key], hf.Value) - } - cs.trailer = trailer - - rl.endStream(cs) - return nil -} - -// transportResponseBody is the concrete type of Transport.RoundTrip's -// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. -// On Close it sends RST_STREAM if EOF wasn't already seen. -type transportResponseBody struct { - cs *clientStream -} - -func (b transportResponseBody) Read(p []byte) (n int, err error) { - cs := b.cs - cc := cs.cc - - if cs.readErr != nil { - return 0, cs.readErr - } - n, err = b.cs.bufPipe.Read(p) - if cs.bytesRemain != -1 { - if int64(n) > cs.bytesRemain { - n = int(cs.bytesRemain) - if err == nil { - err = errors.New("net/http: server replied with more than declared Content-Length; truncated") - cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) - } - cs.readErr = err - return int(cs.bytesRemain), err - } - cs.bytesRemain -= int64(n) - if err == io.EOF && cs.bytesRemain > 0 { - err = io.ErrUnexpectedEOF - cs.readErr = err - return n, err - } - } - if n == 0 { - // No flow control tokens to send back. - return - } - - cc.mu.Lock() - defer cc.mu.Unlock() - - var connAdd, streamAdd int32 - // Check the conn-level first, before the stream-level. - if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { - connAdd = transportDefaultConnFlow - v - cc.inflow.add(connAdd) - } - if err == nil { // No need to refresh if the stream is over or failed. - if v := cs.inflow.available(); v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { - streamAdd = transportDefaultStreamFlow - v - cs.inflow.add(streamAdd) - } - } - if connAdd != 0 || streamAdd != 0 { - cc.wmu.Lock() - defer cc.wmu.Unlock() - if connAdd != 0 { - cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) - } - if streamAdd != 0 { - cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) - } - cc.bw.Flush() - } - return -} - -var errClosedResponseBody = errors.New("http2: response body closed") - -func (b transportResponseBody) Close() error { - cs := b.cs - if cs.bufPipe.Err() != io.EOF { - // TODO: write test for this - cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } - cs.bufPipe.BreakWithError(errClosedResponseBody) - return nil -} - -func (rl *clientConnReadLoop) processData(f *DataFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, f.StreamEnded()) - if cs == nil { - cc.mu.Lock() - neverSent := cc.nextStreamID - cc.mu.Unlock() - if f.StreamID >= neverSent { - // We never asked for this. - cc.logf("http2: Transport received unsolicited DATA frame; closing connection") - return ConnectionError(ErrCodeProtocol) - } - // We probably did ask for this, but canceled. Just ignore it. - // TODO: be stricter here? only silently ignore things which - // we canceled, but not things which were closed normally - // by the peer? Tough without accumulating too much state. - return nil - } - if data := f.Data(); len(data) > 0 { - if cs.bufPipe.b == nil { - // Data frame after it's already closed? - cc.logf("http2: Transport received DATA frame for closed stream; closing connection") - return ConnectionError(ErrCodeProtocol) - } - - // Check connection-level flow control. - cc.mu.Lock() - if cs.inflow.available() >= int32(len(data)) { - cs.inflow.take(int32(len(data))) - } else { - cc.mu.Unlock() - return ConnectionError(ErrCodeFlowControl) - } - cc.mu.Unlock() - - if _, err := cs.bufPipe.Write(data); err != nil { - rl.endStreamError(cs, err) - return err - } - } - - if f.StreamEnded() { - rl.endStream(cs) - } - return nil -} - -var errInvalidTrailers = errors.New("http2: invalid trailers") - -func (rl *clientConnReadLoop) endStream(cs *clientStream) { - // TODO: check that any declared content-length matches, like - // server.go's (*stream).endStream method. - rl.endStreamError(cs, nil) -} - -func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { - var code func() - if err == nil { - err = io.EOF - code = cs.copyTrailers - } - cs.bufPipe.closeWithErrorAndCode(err, code) - delete(rl.activeRes, cs.ID) - if cs.req.Close || cs.req.Header.Get("Connection") == "close" { - rl.closeWhenIdle = true - } -} - -func (cs *clientStream) copyTrailers() { - for k, vv := range cs.trailer { - t := cs.resTrailer - if *t == nil { - *t = make(http.Header) - } - (*t)[k] = vv - } -} - -func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { - cc := rl.cc - cc.t.connPool().MarkDead(cc) - if f.ErrCode != 0 { - // TODO: deal with GOAWAY more. particularly the error code - cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) - } - cc.setGoAway(f) - return nil -} - -func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { - cc := rl.cc - cc.mu.Lock() - defer cc.mu.Unlock() - return f.ForeachSetting(func(s Setting) error { - switch s.ID { - case SettingMaxFrameSize: - cc.maxFrameSize = s.Val - case SettingMaxConcurrentStreams: - cc.maxConcurrentStreams = s.Val - case SettingInitialWindowSize: - // TODO: error if this is too large. - - // TODO: adjust flow control of still-open - // frames by the difference of the old initial - // window size and this one. - cc.initialWindowSize = s.Val - default: - // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. - cc.vlogf("Unhandled Setting: %v", s) - } - return nil - }) -} - -func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, false) - if f.StreamID != 0 && cs == nil { - return nil - } - - cc.mu.Lock() - defer cc.mu.Unlock() - - fl := &cc.flow - if cs != nil { - fl = &cs.flow - } - if !fl.add(int32(f.Increment)) { - return ConnectionError(ErrCodeFlowControl) - } - cc.cond.Broadcast() - return nil -} - -func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.cc.streamByID(f.StreamID, true) - if cs == nil { - // TODO: return error if server tries to RST_STEAM an idle stream - return nil - } - select { - case <-cs.peerReset: - // Already reset. - // This is the only goroutine - // which closes this, so there - // isn't a race. - default: - err := StreamError{cs.ID, f.ErrCode} - cs.resetErr = err - close(cs.peerReset) - cs.bufPipe.CloseWithError(err) - cs.cc.cond.Broadcast() // wake up checkReset via clientStream.awaitFlowControl - } - delete(rl.activeRes, cs.ID) - return nil -} - -func (rl *clientConnReadLoop) processPing(f *PingFrame) error { - if f.IsAck() { - // 6.7 PING: " An endpoint MUST NOT respond to PING frames - // containing this flag." - return nil - } - cc := rl.cc - cc.wmu.Lock() - defer cc.wmu.Unlock() - if err := cc.fr.WritePing(true, f.Data); err != nil { - return err - } - return cc.bw.Flush() -} - -func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { - // We told the peer we don't want them. - // Spec says: - // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH - // setting of the peer endpoint is set to 0. An endpoint that - // has set this setting and has received acknowledgement MUST - // treat the receipt of a PUSH_PROMISE frame as a connection - // error (Section 5.4.1) of type PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) -} - -func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { - // TODO: do something with err? send it as a debug frame to the peer? - // But that's only in GOAWAY. Invent a new frame type? Is there one already? - cc.wmu.Lock() - cc.fr.WriteRSTStream(streamID, code) - cc.bw.Flush() - cc.wmu.Unlock() -} - -var ( - errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") - errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") -) - -func (cc *ClientConn) logf(format string, args ...interface{}) { - cc.t.logf(format, args...) -} - -func (cc *ClientConn) vlogf(format string, args ...interface{}) { - cc.t.vlogf(format, args...) -} - -func (t *Transport) vlogf(format string, args ...interface{}) { - if VerboseLogs { - t.logf(format, args...) - } -} - -func (t *Transport) logf(format string, args ...interface{}) { - log.Printf(format, args...) -} - -var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) - -func strSliceContains(ss []string, s string) bool { - for _, v := range ss { - if v == s { - return true - } - } - return false -} - -type erringRoundTripper struct{ err error } - -func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } - -// gzipReader wraps a response body so it can lazily -// call gzip.NewReader on the first call to Read -type gzipReader struct { - body io.ReadCloser // underlying Response.Body - zr *gzip.Reader // lazily-initialized gzip reader - zerr error // sticky error -} - -func (gz *gzipReader) Read(p []byte) (n int, err error) { - if gz.zerr != nil { - return 0, gz.zerr - } - if gz.zr == nil { - gz.zr, err = gzip.NewReader(gz.body) - if err != nil { - gz.zerr = err - return 0, err - } - } - return gz.zr.Read(p) -} - -func (gz *gzipReader) Close() error { - return gz.body.Close() -} - -type errorReader struct{ err error } - -func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/write.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/write.go deleted file mode 100644 index 0143b24c..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/write.go +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "bytes" - "fmt" - "log" - "net/http" - "time" - - "golang.org/x/net/http2/hpack" -) - -// writeFramer is implemented by any type that is used to write frames. -type writeFramer interface { - writeFrame(writeContext) error -} - -// writeContext is the interface needed by the various frame writer -// types below. All the writeFrame methods below are scheduled via the -// frame writing scheduler (see writeScheduler in writesched.go). -// -// This interface is implemented by *serverConn. -// -// TODO: decide whether to a) use this in the client code (which didn't -// end up using this yet, because it has a simpler design, not -// currently implementing priorities), or b) delete this and -// make the server code a bit more concrete. -type writeContext interface { - Framer() *Framer - Flush() error - CloseConn() error - // HeaderEncoder returns an HPACK encoder that writes to the - // returned buffer. - HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) -} - -// endsStream reports whether the given frame writer w will locally -// close the stream. -func endsStream(w writeFramer) bool { - switch v := w.(type) { - case *writeData: - return v.endStream - case *writeResHeaders: - return v.endStream - case nil: - // This can only happen if the caller reuses w after it's - // been intentionally nil'ed out to prevent use. Keep this - // here to catch future refactoring breaking it. - panic("endsStream called on nil writeFramer") - } - return false -} - -type flushFrameWriter struct{} - -func (flushFrameWriter) writeFrame(ctx writeContext) error { - return ctx.Flush() -} - -type writeSettings []Setting - -func (s writeSettings) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteSettings([]Setting(s)...) -} - -type writeGoAway struct { - maxStreamID uint32 - code ErrCode -} - -func (p *writeGoAway) writeFrame(ctx writeContext) error { - err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) - if p.code != 0 { - ctx.Flush() // ignore error: we're hanging up on them anyway - time.Sleep(50 * time.Millisecond) - ctx.CloseConn() - } - return err -} - -type writeData struct { - streamID uint32 - p []byte - endStream bool -} - -func (w *writeData) String() string { - return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) -} - -func (w *writeData) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) -} - -// handlerPanicRST is the message sent from handler goroutines when -// the handler panics. -type handlerPanicRST struct { - StreamID uint32 -} - -func (hp handlerPanicRST) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) -} - -func (se StreamError) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) -} - -type writePingAck struct{ pf *PingFrame } - -func (w writePingAck) writeFrame(ctx writeContext) error { - return ctx.Framer().WritePing(true, w.pf.Data) -} - -type writeSettingsAck struct{} - -func (writeSettingsAck) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteSettingsAck() -} - -// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames -// for HTTP response headers or trailers from a server handler. -type writeResHeaders struct { - streamID uint32 - httpResCode int // 0 means no ":status" line - h http.Header // may be nil - trailers []string // if non-nil, which keys of h to write. nil means all. - endStream bool - - date string - contentType string - contentLength string -} - -func encKV(enc *hpack.Encoder, k, v string) { - if VerboseLogs { - log.Printf("http2: server encoding header %q = %q", k, v) - } - enc.WriteField(hpack.HeaderField{Name: k, Value: v}) -} - -func (w *writeResHeaders) writeFrame(ctx writeContext) error { - enc, buf := ctx.HeaderEncoder() - buf.Reset() - - if w.httpResCode != 0 { - encKV(enc, ":status", httpCodeString(w.httpResCode)) - } - - encodeHeaders(enc, w.h, w.trailers) - - if w.contentType != "" { - encKV(enc, "content-type", w.contentType) - } - if w.contentLength != "" { - encKV(enc, "content-length", w.contentLength) - } - if w.date != "" { - encKV(enc, "date", w.date) - } - - headerBlock := buf.Bytes() - if len(headerBlock) == 0 && w.trailers == nil { - panic("unexpected empty hpack") - } - - // For now we're lazy and just pick the minimum MAX_FRAME_SIZE - // that all peers must support (16KB). Later we could care - // more and send larger frames if the peer advertised it, but - // there's little point. Most headers are small anyway (so we - // generally won't have CONTINUATION frames), and extra frames - // only waste 9 bytes anyway. - const maxFrameSize = 16384 - - first := true - for len(headerBlock) > 0 { - frag := headerBlock - if len(frag) > maxFrameSize { - frag = frag[:maxFrameSize] - } - headerBlock = headerBlock[len(frag):] - endHeaders := len(headerBlock) == 0 - var err error - if first { - first = false - err = ctx.Framer().WriteHeaders(HeadersFrameParam{ - StreamID: w.streamID, - BlockFragment: frag, - EndStream: w.endStream, - EndHeaders: endHeaders, - }) - } else { - err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag) - } - if err != nil { - return err - } - } - return nil -} - -type write100ContinueHeadersFrame struct { - streamID uint32 -} - -func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { - enc, buf := ctx.HeaderEncoder() - buf.Reset() - encKV(enc, ":status", "100") - return ctx.Framer().WriteHeaders(HeadersFrameParam{ - StreamID: w.streamID, - BlockFragment: buf.Bytes(), - EndStream: false, - EndHeaders: true, - }) -} - -type writeWindowUpdate struct { - streamID uint32 // or 0 for conn-level - n uint32 -} - -func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) -} - -func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { - if keys == nil { - sorter := sorterPool.Get().(*sorter) - // Using defer here, since the returned keys from the - // sorter.Keys method is only valid until the sorter - // is returned: - defer sorterPool.Put(sorter) - keys = sorter.Keys(h) - } - for _, k := range keys { - vv := h[k] - k = lowerHeader(k) - if !validHeaderFieldName(k) { - // TODO: return an error? golang.org/issue/14048 - // For now just omit it. - continue - } - isTE := k == "transfer-encoding" - for _, v := range vv { - if !validHeaderFieldValue(v) { - // TODO: return an error? golang.org/issue/14048 - // For now just omit it. - continue - } - // TODO: more of "8.1.2.2 Connection-Specific Header Fields" - if isTE && v != "trailers" { - continue - } - encKV(enc, k, v) - } - } -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/writesched.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/writesched.go deleted file mode 100644 index c24316ce..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/http2/writesched.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import "fmt" - -// frameWriteMsg is a request to write a frame. -type frameWriteMsg struct { - // write is the interface value that does the writing, once the - // writeScheduler (below) has decided to select this frame - // to write. The write functions are all defined in write.go. - write writeFramer - - stream *stream // used for prioritization. nil for non-stream frames. - - // done, if non-nil, must be a buffered channel with space for - // 1 message and is sent the return value from write (or an - // earlier error) when the frame has been written. - done chan error -} - -// for debugging only: -func (wm frameWriteMsg) String() string { - var streamID uint32 - if wm.stream != nil { - streamID = wm.stream.id - } - var des string - if s, ok := wm.write.(fmt.Stringer); ok { - des = s.String() - } else { - des = fmt.Sprintf("%T", wm.write) - } - return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des) -} - -// writeScheduler tracks pending frames to write, priorities, and decides -// the next one to use. It is not thread-safe. -type writeScheduler struct { - // zero are frames not associated with a specific stream. - // They're sent before any stream-specific freams. - zero writeQueue - - // maxFrameSize is the maximum size of a DATA frame - // we'll write. Must be non-zero and between 16K-16M. - maxFrameSize uint32 - - // sq contains the stream-specific queues, keyed by stream ID. - // when a stream is idle, it's deleted from the map. - sq map[uint32]*writeQueue - - // canSend is a slice of memory that's reused between frame - // scheduling decisions to hold the list of writeQueues (from sq) - // which have enough flow control data to send. After canSend is - // built, the best is selected. - canSend []*writeQueue - - // pool of empty queues for reuse. - queuePool []*writeQueue -} - -func (ws *writeScheduler) putEmptyQueue(q *writeQueue) { - if len(q.s) != 0 { - panic("queue must be empty") - } - ws.queuePool = append(ws.queuePool, q) -} - -func (ws *writeScheduler) getEmptyQueue() *writeQueue { - ln := len(ws.queuePool) - if ln == 0 { - return new(writeQueue) - } - q := ws.queuePool[ln-1] - ws.queuePool = ws.queuePool[:ln-1] - return q -} - -func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 } - -func (ws *writeScheduler) add(wm frameWriteMsg) { - st := wm.stream - if st == nil { - ws.zero.push(wm) - } else { - ws.streamQueue(st.id).push(wm) - } -} - -func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue { - if q, ok := ws.sq[streamID]; ok { - return q - } - if ws.sq == nil { - ws.sq = make(map[uint32]*writeQueue) - } - q := ws.getEmptyQueue() - ws.sq[streamID] = q - return q -} - -// take returns the most important frame to write and removes it from the scheduler. -// It is illegal to call this if the scheduler is empty or if there are no connection-level -// flow control bytes available. -func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) { - if ws.maxFrameSize == 0 { - panic("internal error: ws.maxFrameSize not initialized or invalid") - } - - // If there any frames not associated with streams, prefer those first. - // These are usually SETTINGS, etc. - if !ws.zero.empty() { - return ws.zero.shift(), true - } - if len(ws.sq) == 0 { - return - } - - // Next, prioritize frames on streams that aren't DATA frames (no cost). - for id, q := range ws.sq { - if q.firstIsNoCost() { - return ws.takeFrom(id, q) - } - } - - // Now, all that remains are DATA frames with non-zero bytes to - // send. So pick the best one. - if len(ws.canSend) != 0 { - panic("should be empty") - } - for _, q := range ws.sq { - if n := ws.streamWritableBytes(q); n > 0 { - ws.canSend = append(ws.canSend, q) - } - } - if len(ws.canSend) == 0 { - return - } - defer ws.zeroCanSend() - - // TODO: find the best queue - q := ws.canSend[0] - - return ws.takeFrom(q.streamID(), q) -} - -// zeroCanSend is defered from take. -func (ws *writeScheduler) zeroCanSend() { - for i := range ws.canSend { - ws.canSend[i] = nil - } - ws.canSend = ws.canSend[:0] -} - -// streamWritableBytes returns the number of DATA bytes we could write -// from the given queue's stream, if this stream/queue were -// selected. It is an error to call this if q's head isn't a -// *writeData. -func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 { - wm := q.head() - ret := wm.stream.flow.available() // max we can write - if ret == 0 { - return 0 - } - if int32(ws.maxFrameSize) < ret { - ret = int32(ws.maxFrameSize) - } - if ret == 0 { - panic("internal error: ws.maxFrameSize not initialized or invalid") - } - wd := wm.write.(*writeData) - if len(wd.p) < int(ret) { - ret = int32(len(wd.p)) - } - return ret -} - -func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) { - wm = q.head() - // If the first item in this queue costs flow control tokens - // and we don't have enough, write as much as we can. - if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 { - allowed := wm.stream.flow.available() // max we can write - if allowed == 0 { - // No quota available. Caller can try the next stream. - return frameWriteMsg{}, false - } - if int32(ws.maxFrameSize) < allowed { - allowed = int32(ws.maxFrameSize) - } - // TODO: further restrict the allowed size, because even if - // the peer says it's okay to write 16MB data frames, we might - // want to write smaller ones to properly weight competing - // streams' priorities. - - if len(wd.p) > int(allowed) { - wm.stream.flow.take(allowed) - chunk := wd.p[:allowed] - wd.p = wd.p[allowed:] - // Make up a new write message of a valid size, rather - // than shifting one off the queue. - return frameWriteMsg{ - stream: wm.stream, - write: &writeData{ - streamID: wd.streamID, - p: chunk, - // even if the original had endStream set, there - // arebytes remaining because len(wd.p) > allowed, - // so we know endStream is false: - endStream: false, - }, - // our caller is blocking on the final DATA frame, not - // these intermediates, so no need to wait: - done: nil, - }, true - } - wm.stream.flow.take(int32(len(wd.p))) - } - - q.shift() - if q.empty() { - ws.putEmptyQueue(q) - delete(ws.sq, id) - } - return wm, true -} - -func (ws *writeScheduler) forgetStream(id uint32) { - q, ok := ws.sq[id] - if !ok { - return - } - delete(ws.sq, id) - - // But keep it for others later. - for i := range q.s { - q.s[i] = frameWriteMsg{} - } - q.s = q.s[:0] - ws.putEmptyQueue(q) -} - -type writeQueue struct { - s []frameWriteMsg -} - -// streamID returns the stream ID for a non-empty stream-specific queue. -func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id } - -func (q *writeQueue) empty() bool { return len(q.s) == 0 } - -func (q *writeQueue) push(wm frameWriteMsg) { - q.s = append(q.s, wm) -} - -// head returns the next item that would be removed by shift. -func (q *writeQueue) head() frameWriteMsg { - if len(q.s) == 0 { - panic("invalid use of queue") - } - return q.s[0] -} - -func (q *writeQueue) shift() frameWriteMsg { - if len(q.s) == 0 { - panic("invalid use of queue") - } - wm := q.s[0] - // TODO: less copy-happy queue. - copy(q.s, q.s[1:]) - q.s[len(q.s)-1] = frameWriteMsg{} - q.s = q.s[:len(q.s)-1] - return wm -} - -func (q *writeQueue) firstIsNoCost() bool { - if df, ok := q.s[0].write.(*writeData); ok { - return len(df.p) == 0 - } - return true -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/internal/timeseries/timeseries.go deleted file mode 100644 index 3f90b730..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/internal/timeseries/timeseries.go +++ /dev/null @@ -1,525 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package timeseries implements a time series structure for stats collection. -package timeseries - -import ( - "fmt" - "log" - "time" -) - -const ( - timeSeriesNumBuckets = 64 - minuteHourSeriesNumBuckets = 60 -) - -var timeSeriesResolutions = []time.Duration{ - 1 * time.Second, - 10 * time.Second, - 1 * time.Minute, - 10 * time.Minute, - 1 * time.Hour, - 6 * time.Hour, - 24 * time.Hour, // 1 day - 7 * 24 * time.Hour, // 1 week - 4 * 7 * 24 * time.Hour, // 4 weeks - 16 * 7 * 24 * time.Hour, // 16 weeks -} - -var minuteHourSeriesResolutions = []time.Duration{ - 1 * time.Second, - 1 * time.Minute, -} - -// An Observable is a kind of data that can be aggregated in a time series. -type Observable interface { - Multiply(ratio float64) // Multiplies the data in self by a given ratio - Add(other Observable) // Adds the data from a different observation to self - Clear() // Clears the observation so it can be reused. - CopyFrom(other Observable) // Copies the contents of a given observation to self -} - -// Float attaches the methods of Observable to a float64. -type Float float64 - -// NewFloat returns a Float. -func NewFloat() Observable { - f := Float(0) - return &f -} - -// String returns the float as a string. -func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } - -// Value returns the float's value. -func (f *Float) Value() float64 { return float64(*f) } - -func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } - -func (f *Float) Add(other Observable) { - o := other.(*Float) - *f += *o -} - -func (f *Float) Clear() { *f = 0 } - -func (f *Float) CopyFrom(other Observable) { - o := other.(*Float) - *f = *o -} - -// A Clock tells the current time. -type Clock interface { - Time() time.Time -} - -type defaultClock int - -var defaultClockInstance defaultClock - -func (defaultClock) Time() time.Time { return time.Now() } - -// Information kept per level. Each level consists of a circular list of -// observations. The start of the level may be derived from end and the -// len(buckets) * sizeInMillis. -type tsLevel struct { - oldest int // index to oldest bucketed Observable - newest int // index to newest bucketed Observable - end time.Time // end timestamp for this level - size time.Duration // duration of the bucketed Observable - buckets []Observable // collections of observations - provider func() Observable // used for creating new Observable -} - -func (l *tsLevel) Clear() { - l.oldest = 0 - l.newest = len(l.buckets) - 1 - l.end = time.Time{} - for i := range l.buckets { - if l.buckets[i] != nil { - l.buckets[i].Clear() - l.buckets[i] = nil - } - } -} - -func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { - l.size = size - l.provider = f - l.buckets = make([]Observable, numBuckets) -} - -// Keeps a sequence of levels. Each level is responsible for storing data at -// a given resolution. For example, the first level stores data at a one -// minute resolution while the second level stores data at a one hour -// resolution. - -// Each level is represented by a sequence of buckets. Each bucket spans an -// interval equal to the resolution of the level. New observations are added -// to the last bucket. -type timeSeries struct { - provider func() Observable // make more Observable - numBuckets int // number of buckets in each level - levels []*tsLevel // levels of bucketed Observable - lastAdd time.Time // time of last Observable tracked - total Observable // convenient aggregation of all Observable - clock Clock // Clock for getting current time - pending Observable // observations not yet bucketed - pendingTime time.Time // what time are we keeping in pending - dirty bool // if there are pending observations -} - -// init initializes a level according to the supplied criteria. -func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { - ts.provider = f - ts.numBuckets = numBuckets - ts.clock = clock - ts.levels = make([]*tsLevel, len(resolutions)) - - for i := range resolutions { - if i > 0 && resolutions[i-1] >= resolutions[i] { - log.Print("timeseries: resolutions must be monotonically increasing") - break - } - newLevel := new(tsLevel) - newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) - ts.levels[i] = newLevel - } - - ts.Clear() -} - -// Clear removes all observations from the time series. -func (ts *timeSeries) Clear() { - ts.lastAdd = time.Time{} - ts.total = ts.resetObservation(ts.total) - ts.pending = ts.resetObservation(ts.pending) - ts.pendingTime = time.Time{} - ts.dirty = false - - for i := range ts.levels { - ts.levels[i].Clear() - } -} - -// Add records an observation at the current time. -func (ts *timeSeries) Add(observation Observable) { - ts.AddWithTime(observation, ts.clock.Time()) -} - -// AddWithTime records an observation at the specified time. -func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { - - smallBucketDuration := ts.levels[0].size - - if t.After(ts.lastAdd) { - ts.lastAdd = t - } - - if t.After(ts.pendingTime) { - ts.advance(t) - ts.mergePendingUpdates() - ts.pendingTime = ts.levels[0].end - ts.pending.CopyFrom(observation) - ts.dirty = true - } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { - // The observation is close enough to go into the pending bucket. - // This compensates for clock skewing and small scheduling delays - // by letting the update stay in the fast path. - ts.pending.Add(observation) - ts.dirty = true - } else { - ts.mergeValue(observation, t) - } -} - -// mergeValue inserts the observation at the specified time in the past into all levels. -func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { - for _, level := range ts.levels { - index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) - if 0 <= index && index < ts.numBuckets { - bucketNumber := (level.oldest + index) % ts.numBuckets - if level.buckets[bucketNumber] == nil { - level.buckets[bucketNumber] = level.provider() - } - level.buckets[bucketNumber].Add(observation) - } - } - ts.total.Add(observation) -} - -// mergePendingUpdates applies the pending updates into all levels. -func (ts *timeSeries) mergePendingUpdates() { - if ts.dirty { - ts.mergeValue(ts.pending, ts.pendingTime) - ts.pending = ts.resetObservation(ts.pending) - ts.dirty = false - } -} - -// advance cycles the buckets at each level until the latest bucket in -// each level can hold the time specified. -func (ts *timeSeries) advance(t time.Time) { - if !t.After(ts.levels[0].end) { - return - } - for i := 0; i < len(ts.levels); i++ { - level := ts.levels[i] - if !level.end.Before(t) { - break - } - - // If the time is sufficiently far, just clear the level and advance - // directly. - if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { - for _, b := range level.buckets { - ts.resetObservation(b) - } - level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) - } - - for t.After(level.end) { - level.end = level.end.Add(level.size) - level.newest = level.oldest - level.oldest = (level.oldest + 1) % ts.numBuckets - ts.resetObservation(level.buckets[level.newest]) - } - - t = level.end - } -} - -// Latest returns the sum of the num latest buckets from the level. -func (ts *timeSeries) Latest(level, num int) Observable { - now := ts.clock.Time() - if ts.levels[0].end.Before(now) { - ts.advance(now) - } - - ts.mergePendingUpdates() - - result := ts.provider() - l := ts.levels[level] - index := l.newest - - for i := 0; i < num; i++ { - if l.buckets[index] != nil { - result.Add(l.buckets[index]) - } - if index == 0 { - index = ts.numBuckets - } - index-- - } - - return result -} - -// LatestBuckets returns a copy of the num latest buckets from level. -func (ts *timeSeries) LatestBuckets(level, num int) []Observable { - if level < 0 || level > len(ts.levels) { - log.Print("timeseries: bad level argument: ", level) - return nil - } - if num < 0 || num >= ts.numBuckets { - log.Print("timeseries: bad num argument: ", num) - return nil - } - - results := make([]Observable, num) - now := ts.clock.Time() - if ts.levels[0].end.Before(now) { - ts.advance(now) - } - - ts.mergePendingUpdates() - - l := ts.levels[level] - index := l.newest - - for i := 0; i < num; i++ { - result := ts.provider() - results[i] = result - if l.buckets[index] != nil { - result.CopyFrom(l.buckets[index]) - } - - if index == 0 { - index = ts.numBuckets - } - index -= 1 - } - return results -} - -// ScaleBy updates observations by scaling by factor. -func (ts *timeSeries) ScaleBy(factor float64) { - for _, l := range ts.levels { - for i := 0; i < ts.numBuckets; i++ { - l.buckets[i].Multiply(factor) - } - } - - ts.total.Multiply(factor) - ts.pending.Multiply(factor) -} - -// Range returns the sum of observations added over the specified time range. -// If start or finish times don't fall on bucket boundaries of the same -// level, then return values are approximate answers. -func (ts *timeSeries) Range(start, finish time.Time) Observable { - return ts.ComputeRange(start, finish, 1)[0] -} - -// Recent returns the sum of observations from the last delta. -func (ts *timeSeries) Recent(delta time.Duration) Observable { - now := ts.clock.Time() - return ts.Range(now.Add(-delta), now) -} - -// Total returns the total of all observations. -func (ts *timeSeries) Total() Observable { - ts.mergePendingUpdates() - return ts.total -} - -// ComputeRange computes a specified number of values into a slice using -// the observations recorded over the specified time period. The return -// values are approximate if the start or finish times don't fall on the -// bucket boundaries at the same level or if the number of buckets spanning -// the range is not an integral multiple of num. -func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { - if start.After(finish) { - log.Printf("timeseries: start > finish, %v>%v", start, finish) - return nil - } - - if num < 0 { - log.Printf("timeseries: num < 0, %v", num) - return nil - } - - results := make([]Observable, num) - - for _, l := range ts.levels { - if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { - ts.extract(l, start, finish, num, results) - return results - } - } - - // Failed to find a level that covers the desired range. So just - // extract from the last level, even if it doesn't cover the entire - // desired range. - ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) - - return results -} - -// RecentList returns the specified number of values in slice over the most -// recent time period of the specified range. -func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { - if delta < 0 { - return nil - } - now := ts.clock.Time() - return ts.ComputeRange(now.Add(-delta), now, num) -} - -// extract returns a slice of specified number of observations from a given -// level over a given range. -func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { - ts.mergePendingUpdates() - - srcInterval := l.size - dstInterval := finish.Sub(start) / time.Duration(num) - dstStart := start - srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) - - srcIndex := 0 - - // Where should scanning start? - if dstStart.After(srcStart) { - advance := dstStart.Sub(srcStart) / srcInterval - srcIndex += int(advance) - srcStart = srcStart.Add(advance * srcInterval) - } - - // The i'th value is computed as show below. - // interval = (finish/start)/num - // i'th value = sum of observation in range - // [ start + i * interval, - // start + (i + 1) * interval ) - for i := 0; i < num; i++ { - results[i] = ts.resetObservation(results[i]) - dstEnd := dstStart.Add(dstInterval) - for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { - srcEnd := srcStart.Add(srcInterval) - if srcEnd.After(ts.lastAdd) { - srcEnd = ts.lastAdd - } - - if !srcEnd.Before(dstStart) { - srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] - if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { - // dst completely contains src. - if srcValue != nil { - results[i].Add(srcValue) - } - } else { - // dst partially overlaps src. - overlapStart := maxTime(srcStart, dstStart) - overlapEnd := minTime(srcEnd, dstEnd) - base := srcEnd.Sub(srcStart) - fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() - - used := ts.provider() - if srcValue != nil { - used.CopyFrom(srcValue) - } - used.Multiply(fraction) - results[i].Add(used) - } - - if srcEnd.After(dstEnd) { - break - } - } - srcIndex++ - srcStart = srcStart.Add(srcInterval) - } - dstStart = dstStart.Add(dstInterval) - } -} - -// resetObservation clears the content so the struct may be reused. -func (ts *timeSeries) resetObservation(observation Observable) Observable { - if observation == nil { - observation = ts.provider() - } else { - observation.Clear() - } - return observation -} - -// TimeSeries tracks data at granularities from 1 second to 16 weeks. -type TimeSeries struct { - timeSeries -} - -// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. -func NewTimeSeries(f func() Observable) *TimeSeries { - return NewTimeSeriesWithClock(f, defaultClockInstance) -} - -// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for -// assigning timestamps. -func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { - ts := new(TimeSeries) - ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) - return ts -} - -// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. -type MinuteHourSeries struct { - timeSeries -} - -// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. -func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { - return NewMinuteHourSeriesWithClock(f, defaultClockInstance) -} - -// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for -// assigning timestamps. -func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { - ts := new(MinuteHourSeries) - ts.timeSeries.init(minuteHourSeriesResolutions, f, - minuteHourSeriesNumBuckets, clock) - return ts -} - -func (ts *MinuteHourSeries) Minute() Observable { - return ts.timeSeries.Latest(0, 60) -} - -func (ts *MinuteHourSeries) Hour() Observable { - return ts.timeSeries.Latest(1, 60) -} - -func minTime(a, b time.Time) time.Time { - if a.Before(b) { - return a - } - return b -} - -func maxTime(a, b time.Time) time.Time { - if a.After(b) { - return a - } - return b -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/events.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/events.go deleted file mode 100644 index e66c7e32..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/events.go +++ /dev/null @@ -1,524 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package trace - -import ( - "bytes" - "fmt" - "html/template" - "io" - "log" - "net/http" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "text/tabwriter" - "time" -) - -var eventsTmpl = template.Must(template.New("events").Funcs(template.FuncMap{ - "elapsed": elapsed, - "trimSpace": strings.TrimSpace, -}).Parse(eventsHTML)) - -const maxEventsPerLog = 100 - -type bucket struct { - MaxErrAge time.Duration - String string -} - -var buckets = []bucket{ - {0, "total"}, - {10 * time.Second, "errs<10s"}, - {1 * time.Minute, "errs<1m"}, - {10 * time.Minute, "errs<10m"}, - {1 * time.Hour, "errs<1h"}, - {10 * time.Hour, "errs<10h"}, - {24000 * time.Hour, "errors"}, -} - -// RenderEvents renders the HTML page typically served at /debug/events. -// It does not do any auth checking; see AuthRequest for the default auth check -// used by the handler registered on http.DefaultServeMux. -// req may be nil. -func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { - now := time.Now() - data := &struct { - Families []string // family names - Buckets []bucket - Counts [][]int // eventLog count per family/bucket - - // Set when a bucket has been selected. - Family string - Bucket int - EventLogs eventLogs - Expanded bool - }{ - Buckets: buckets, - } - - data.Families = make([]string, 0, len(families)) - famMu.RLock() - for name := range families { - data.Families = append(data.Families, name) - } - famMu.RUnlock() - sort.Strings(data.Families) - - // Count the number of eventLogs in each family for each error age. - data.Counts = make([][]int, len(data.Families)) - for i, name := range data.Families { - // TODO(sameer): move this loop under the family lock. - f := getEventFamily(name) - data.Counts[i] = make([]int, len(data.Buckets)) - for j, b := range data.Buckets { - data.Counts[i][j] = f.Count(now, b.MaxErrAge) - } - } - - if req != nil { - var ok bool - data.Family, data.Bucket, ok = parseEventsArgs(req) - if !ok { - // No-op - } else { - data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) - } - if data.EventLogs != nil { - defer data.EventLogs.Free() - sort.Sort(data.EventLogs) - } - if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { - data.Expanded = exp - } - } - - famMu.RLock() - defer famMu.RUnlock() - if err := eventsTmpl.Execute(w, data); err != nil { - log.Printf("net/trace: Failed executing template: %v", err) - } -} - -func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { - fam, bStr := req.FormValue("fam"), req.FormValue("b") - if fam == "" || bStr == "" { - return "", 0, false - } - b, err := strconv.Atoi(bStr) - if err != nil || b < 0 || b >= len(buckets) { - return "", 0, false - } - return fam, b, true -} - -// An EventLog provides a log of events associated with a specific object. -type EventLog interface { - // Printf formats its arguments with fmt.Sprintf and adds the - // result to the event log. - Printf(format string, a ...interface{}) - - // Errorf is like Printf, but it marks this event as an error. - Errorf(format string, a ...interface{}) - - // Finish declares that this event log is complete. - // The event log should not be used after calling this method. - Finish() -} - -// NewEventLog returns a new EventLog with the specified family name -// and title. -func NewEventLog(family, title string) EventLog { - el := newEventLog() - el.ref() - el.Family, el.Title = family, title - el.Start = time.Now() - el.events = make([]logEntry, 0, maxEventsPerLog) - el.stack = make([]uintptr, 32) - n := runtime.Callers(2, el.stack) - el.stack = el.stack[:n] - - getEventFamily(family).add(el) - return el -} - -func (el *eventLog) Finish() { - getEventFamily(el.Family).remove(el) - el.unref() // matches ref in New -} - -var ( - famMu sync.RWMutex - families = make(map[string]*eventFamily) // family name => family -) - -func getEventFamily(fam string) *eventFamily { - famMu.Lock() - defer famMu.Unlock() - f := families[fam] - if f == nil { - f = &eventFamily{} - families[fam] = f - } - return f -} - -type eventFamily struct { - mu sync.RWMutex - eventLogs eventLogs -} - -func (f *eventFamily) add(el *eventLog) { - f.mu.Lock() - f.eventLogs = append(f.eventLogs, el) - f.mu.Unlock() -} - -func (f *eventFamily) remove(el *eventLog) { - f.mu.Lock() - defer f.mu.Unlock() - for i, el0 := range f.eventLogs { - if el == el0 { - copy(f.eventLogs[i:], f.eventLogs[i+1:]) - f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] - return - } - } -} - -func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { - f.mu.RLock() - defer f.mu.RUnlock() - for _, el := range f.eventLogs { - if el.hasRecentError(now, maxErrAge) { - n++ - } - } - return -} - -func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { - f.mu.RLock() - defer f.mu.RUnlock() - els = make(eventLogs, 0, len(f.eventLogs)) - for _, el := range f.eventLogs { - if el.hasRecentError(now, maxErrAge) { - el.ref() - els = append(els, el) - } - } - return -} - -type eventLogs []*eventLog - -// Free calls unref on each element of the list. -func (els eventLogs) Free() { - for _, el := range els { - el.unref() - } -} - -// eventLogs may be sorted in reverse chronological order. -func (els eventLogs) Len() int { return len(els) } -func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } -func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } - -// A logEntry is a timestamped log entry in an event log. -type logEntry struct { - When time.Time - Elapsed time.Duration // since previous event in log - NewDay bool // whether this event is on a different day to the previous event - What string - IsErr bool -} - -// WhenString returns a string representation of the elapsed time of the event. -// It will include the date if midnight was crossed. -func (e logEntry) WhenString() string { - if e.NewDay { - return e.When.Format("2006/01/02 15:04:05.000000") - } - return e.When.Format("15:04:05.000000") -} - -// An eventLog represents an active event log. -type eventLog struct { - // Family is the top-level grouping of event logs to which this belongs. - Family string - - // Title is the title of this event log. - Title string - - // Timing information. - Start time.Time - - // Call stack where this event log was created. - stack []uintptr - - // Append-only sequence of events. - // - // TODO(sameer): change this to a ring buffer to avoid the array copy - // when we hit maxEventsPerLog. - mu sync.RWMutex - events []logEntry - LastErrorTime time.Time - discarded int - - refs int32 // how many buckets this is in -} - -func (el *eventLog) reset() { - // Clear all but the mutex. Mutexes may not be copied, even when unlocked. - el.Family = "" - el.Title = "" - el.Start = time.Time{} - el.stack = nil - el.events = nil - el.LastErrorTime = time.Time{} - el.discarded = 0 - el.refs = 0 -} - -func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { - if maxErrAge == 0 { - return true - } - el.mu.RLock() - defer el.mu.RUnlock() - return now.Sub(el.LastErrorTime) < maxErrAge -} - -// delta returns the elapsed time since the last event or the log start, -// and whether it spans midnight. -// L >= el.mu -func (el *eventLog) delta(t time.Time) (time.Duration, bool) { - if len(el.events) == 0 { - return t.Sub(el.Start), false - } - prev := el.events[len(el.events)-1].When - return t.Sub(prev), prev.Day() != t.Day() - -} - -func (el *eventLog) Printf(format string, a ...interface{}) { - el.printf(false, format, a...) -} - -func (el *eventLog) Errorf(format string, a ...interface{}) { - el.printf(true, format, a...) -} - -func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { - e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} - el.mu.Lock() - e.Elapsed, e.NewDay = el.delta(e.When) - if len(el.events) < maxEventsPerLog { - el.events = append(el.events, e) - } else { - // Discard the oldest event. - if el.discarded == 0 { - // el.discarded starts at two to count for the event it - // is replacing, plus the next one that we are about to - // drop. - el.discarded = 2 - } else { - el.discarded++ - } - // TODO(sameer): if this causes allocations on a critical path, - // change eventLog.What to be a fmt.Stringer, as in trace.go. - el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) - // The timestamp of the discarded meta-event should be - // the time of the last event it is representing. - el.events[0].When = el.events[1].When - copy(el.events[1:], el.events[2:]) - el.events[maxEventsPerLog-1] = e - } - if e.IsErr { - el.LastErrorTime = e.When - } - el.mu.Unlock() -} - -func (el *eventLog) ref() { - atomic.AddInt32(&el.refs, 1) -} - -func (el *eventLog) unref() { - if atomic.AddInt32(&el.refs, -1) == 0 { - freeEventLog(el) - } -} - -func (el *eventLog) When() string { - return el.Start.Format("2006/01/02 15:04:05.000000") -} - -func (el *eventLog) ElapsedTime() string { - elapsed := time.Since(el.Start) - return fmt.Sprintf("%.6f", elapsed.Seconds()) -} - -func (el *eventLog) Stack() string { - buf := new(bytes.Buffer) - tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) - printStackRecord(tw, el.stack) - tw.Flush() - return buf.String() -} - -// printStackRecord prints the function + source line information -// for a single stack trace. -// Adapted from runtime/pprof/pprof.go. -func printStackRecord(w io.Writer, stk []uintptr) { - for _, pc := range stk { - f := runtime.FuncForPC(pc) - if f == nil { - continue - } - file, line := f.FileLine(pc) - name := f.Name() - // Hide runtime.goexit and any runtime functions at the beginning. - if strings.HasPrefix(name, "runtime.") { - continue - } - fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) - } -} - -func (el *eventLog) Events() []logEntry { - el.mu.RLock() - defer el.mu.RUnlock() - return el.events -} - -// freeEventLogs is a freelist of *eventLog -var freeEventLogs = make(chan *eventLog, 1000) - -// newEventLog returns a event log ready to use. -func newEventLog() *eventLog { - select { - case el := <-freeEventLogs: - return el - default: - return new(eventLog) - } -} - -// freeEventLog adds el to freeEventLogs if there's room. -// This is non-blocking. -func freeEventLog(el *eventLog) { - el.reset() - select { - case freeEventLogs <- el: - default: - } -} - -const eventsHTML = ` - - - events - - - - -

/debug/events

- - - {{range $i, $fam := .Families}} - - - - {{range $j, $bucket := $.Buckets}} - {{$n := index $.Counts $i $j}} - - {{end}} - - {{end}} -
{{$fam}} - {{if $n}}{{end}} - [{{$n}} {{$bucket.String}}] - {{if $n}}{{end}} -
- -{{if $.EventLogs}} -
-

Family: {{$.Family}}

- -{{if $.Expanded}}{{end}} -[Summary]{{if $.Expanded}}{{end}} - -{{if not $.Expanded}}{{end}} -[Expanded]{{if not $.Expanded}}{{end}} - - - - {{range $el := $.EventLogs}} - - - - - {{if $.Expanded}} - - - - - - {{range $el.Events}} - - - - - - {{end}} - {{end}} - {{end}} -
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} -
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
-{{end}} - - -` diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/histogram.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/histogram.go deleted file mode 100644 index bb42aa53..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/histogram.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package trace - -// This file implements histogramming for RPC statistics collection. - -import ( - "bytes" - "fmt" - "html/template" - "log" - "math" - - "golang.org/x/net/internal/timeseries" -) - -const ( - bucketCount = 38 -) - -// histogram keeps counts of values in buckets that are spaced -// out in powers of 2: 0-1, 2-3, 4-7... -// histogram implements timeseries.Observable -type histogram struct { - sum int64 // running total of measurements - sumOfSquares float64 // square of running total - buckets []int64 // bucketed values for histogram - value int // holds a single value as an optimization - valueCount int64 // number of values recorded for single value -} - -// AddMeasurement records a value measurement observation to the histogram. -func (h *histogram) addMeasurement(value int64) { - // TODO: assert invariant - h.sum += value - h.sumOfSquares += float64(value) * float64(value) - - bucketIndex := getBucket(value) - - if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { - h.value = bucketIndex - h.valueCount++ - } else { - h.allocateBuckets() - h.buckets[bucketIndex]++ - } -} - -func (h *histogram) allocateBuckets() { - if h.buckets == nil { - h.buckets = make([]int64, bucketCount) - h.buckets[h.value] = h.valueCount - h.value = 0 - h.valueCount = -1 - } -} - -func log2(i int64) int { - n := 0 - for ; i >= 0x100; i >>= 8 { - n += 8 - } - for ; i > 0; i >>= 1 { - n += 1 - } - return n -} - -func getBucket(i int64) (index int) { - index = log2(i) - 1 - if index < 0 { - index = 0 - } - if index >= bucketCount { - index = bucketCount - 1 - } - return -} - -// Total returns the number of recorded observations. -func (h *histogram) total() (total int64) { - if h.valueCount >= 0 { - total = h.valueCount - } - for _, val := range h.buckets { - total += int64(val) - } - return -} - -// Average returns the average value of recorded observations. -func (h *histogram) average() float64 { - t := h.total() - if t == 0 { - return 0 - } - return float64(h.sum) / float64(t) -} - -// Variance returns the variance of recorded observations. -func (h *histogram) variance() float64 { - t := float64(h.total()) - if t == 0 { - return 0 - } - s := float64(h.sum) / t - return h.sumOfSquares/t - s*s -} - -// StandardDeviation returns the standard deviation of recorded observations. -func (h *histogram) standardDeviation() float64 { - return math.Sqrt(h.variance()) -} - -// PercentileBoundary estimates the value that the given fraction of recorded -// observations are less than. -func (h *histogram) percentileBoundary(percentile float64) int64 { - total := h.total() - - // Corner cases (make sure result is strictly less than Total()) - if total == 0 { - return 0 - } else if total == 1 { - return int64(h.average()) - } - - percentOfTotal := round(float64(total) * percentile) - var runningTotal int64 - - for i := range h.buckets { - value := h.buckets[i] - runningTotal += value - if runningTotal == percentOfTotal { - // We hit an exact bucket boundary. If the next bucket has data, it is a - // good estimate of the value. If the bucket is empty, we interpolate the - // midpoint between the next bucket's boundary and the next non-zero - // bucket. If the remaining buckets are all empty, then we use the - // boundary for the next bucket as the estimate. - j := uint8(i + 1) - min := bucketBoundary(j) - if runningTotal < total { - for h.buckets[j] == 0 { - j++ - } - } - max := bucketBoundary(j) - return min + round(float64(max-min)/2) - } else if runningTotal > percentOfTotal { - // The value is in this bucket. Interpolate the value. - delta := runningTotal - percentOfTotal - percentBucket := float64(value-delta) / float64(value) - bucketMin := bucketBoundary(uint8(i)) - nextBucketMin := bucketBoundary(uint8(i + 1)) - bucketSize := nextBucketMin - bucketMin - return bucketMin + round(percentBucket*float64(bucketSize)) - } - } - return bucketBoundary(bucketCount - 1) -} - -// Median returns the estimated median of the observed values. -func (h *histogram) median() int64 { - return h.percentileBoundary(0.5) -} - -// Add adds other to h. -func (h *histogram) Add(other timeseries.Observable) { - o := other.(*histogram) - if o.valueCount == 0 { - // Other histogram is empty - } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { - // Both have a single bucketed value, aggregate them - h.valueCount += o.valueCount - } else { - // Two different values necessitate buckets in this histogram - h.allocateBuckets() - if o.valueCount >= 0 { - h.buckets[o.value] += o.valueCount - } else { - for i := range h.buckets { - h.buckets[i] += o.buckets[i] - } - } - } - h.sumOfSquares += o.sumOfSquares - h.sum += o.sum -} - -// Clear resets the histogram to an empty state, removing all observed values. -func (h *histogram) Clear() { - h.buckets = nil - h.value = 0 - h.valueCount = 0 - h.sum = 0 - h.sumOfSquares = 0 -} - -// CopyFrom copies from other, which must be a *histogram, into h. -func (h *histogram) CopyFrom(other timeseries.Observable) { - o := other.(*histogram) - if o.valueCount == -1 { - h.allocateBuckets() - copy(h.buckets, o.buckets) - } - h.sum = o.sum - h.sumOfSquares = o.sumOfSquares - h.value = o.value - h.valueCount = o.valueCount -} - -// Multiply scales the histogram by the specified ratio. -func (h *histogram) Multiply(ratio float64) { - if h.valueCount == -1 { - for i := range h.buckets { - h.buckets[i] = int64(float64(h.buckets[i]) * ratio) - } - } else { - h.valueCount = int64(float64(h.valueCount) * ratio) - } - h.sum = int64(float64(h.sum) * ratio) - h.sumOfSquares = h.sumOfSquares * ratio -} - -// New creates a new histogram. -func (h *histogram) New() timeseries.Observable { - r := new(histogram) - r.Clear() - return r -} - -func (h *histogram) String() string { - return fmt.Sprintf("%d, %f, %d, %d, %v", - h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) -} - -// round returns the closest int64 to the argument -func round(in float64) int64 { - return int64(math.Floor(in + 0.5)) -} - -// bucketBoundary returns the first value in the bucket. -func bucketBoundary(bucket uint8) int64 { - if bucket == 0 { - return 0 - } - return 1 << bucket -} - -// bucketData holds data about a specific bucket for use in distTmpl. -type bucketData struct { - Lower, Upper int64 - N int64 - Pct, CumulativePct float64 - GraphWidth int -} - -// data holds data about a Distribution for use in distTmpl. -type data struct { - Buckets []*bucketData - Count, Median int64 - Mean, StandardDeviation float64 -} - -// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. -const maxHTMLBarWidth = 350.0 - -// newData returns data representing h for use in distTmpl. -func (h *histogram) newData() *data { - // Force the allocation of buckets to simplify the rendering implementation - h.allocateBuckets() - // We scale the bars on the right so that the largest bar is - // maxHTMLBarWidth pixels in width. - maxBucket := int64(0) - for _, n := range h.buckets { - if n > maxBucket { - maxBucket = n - } - } - total := h.total() - barsizeMult := maxHTMLBarWidth / float64(maxBucket) - var pctMult float64 - if total == 0 { - pctMult = 1.0 - } else { - pctMult = 100.0 / float64(total) - } - - buckets := make([]*bucketData, len(h.buckets)) - runningTotal := int64(0) - for i, n := range h.buckets { - if n == 0 { - continue - } - runningTotal += n - var upperBound int64 - if i < bucketCount-1 { - upperBound = bucketBoundary(uint8(i + 1)) - } else { - upperBound = math.MaxInt64 - } - buckets[i] = &bucketData{ - Lower: bucketBoundary(uint8(i)), - Upper: upperBound, - N: n, - Pct: float64(n) * pctMult, - CumulativePct: float64(runningTotal) * pctMult, - GraphWidth: int(float64(n) * barsizeMult), - } - } - return &data{ - Buckets: buckets, - Count: total, - Median: h.median(), - Mean: h.average(), - StandardDeviation: h.standardDeviation(), - } -} - -func (h *histogram) html() template.HTML { - buf := new(bytes.Buffer) - if err := distTmpl.Execute(buf, h.newData()); err != nil { - buf.Reset() - log.Printf("net/trace: couldn't execute template: %v", err) - } - return template.HTML(buf.String()) -} - -// Input: data -var distTmpl = template.Must(template.New("distTmpl").Parse(` - - - - - - - -
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
-
- -{{range $b := .Buckets}} -{{if $b}} - - - - - - - - - -{{end}} -{{end}} -
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
-`)) diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/trace.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/trace.go deleted file mode 100644 index 9ee19362..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/net/trace/trace.go +++ /dev/null @@ -1,1062 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package trace implements tracing of requests and long-lived objects. -It exports HTTP interfaces on /debug/requests and /debug/events. - -A trace.Trace provides tracing for short-lived objects, usually requests. -A request handler might be implemented like this: - - func fooHandler(w http.ResponseWriter, req *http.Request) { - tr := trace.New("mypkg.Foo", req.URL.Path) - defer tr.Finish() - ... - tr.LazyPrintf("some event %q happened", str) - ... - if err := somethingImportant(); err != nil { - tr.LazyPrintf("somethingImportant failed: %v", err) - tr.SetError() - } - } - -The /debug/requests HTTP endpoint organizes the traces by family, -errors, and duration. It also provides histogram of request duration -for each family. - -A trace.EventLog provides tracing for long-lived objects, such as RPC -connections. - - // A Fetcher fetches URL paths for a single domain. - type Fetcher struct { - domain string - events trace.EventLog - } - - func NewFetcher(domain string) *Fetcher { - return &Fetcher{ - domain, - trace.NewEventLog("mypkg.Fetcher", domain), - } - } - - func (f *Fetcher) Fetch(path string) (string, error) { - resp, err := http.Get("http://" + f.domain + "/" + path) - if err != nil { - f.events.Errorf("Get(%q) = %v", path, err) - return "", err - } - f.events.Printf("Get(%q) = %s", path, resp.Status) - ... - } - - func (f *Fetcher) Close() error { - f.events.Finish() - return nil - } - -The /debug/events HTTP endpoint organizes the event logs by family and -by time since the last error. The expanded view displays recent log -entries and the log's call stack. -*/ -package trace - -import ( - "bytes" - "fmt" - "html/template" - "io" - "log" - "net" - "net/http" - "runtime" - "sort" - "strconv" - "sync" - "sync/atomic" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/internal/timeseries" -) - -// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. -// FOR DEBUGGING ONLY. This will slow down the program. -var DebugUseAfterFinish = false - -// AuthRequest determines whether a specific request is permitted to load the -// /debug/requests or /debug/events pages. -// -// It returns two bools; the first indicates whether the page may be viewed at all, -// and the second indicates whether sensitive events will be shown. -// -// AuthRequest may be replaced by a program to customise its authorisation requirements. -// -// The default AuthRequest function returns (true, true) iff the request comes from localhost/127.0.0.1/[::1]. -var AuthRequest = func(req *http.Request) (any, sensitive bool) { - // RemoteAddr is commonly in the form "IP" or "IP:port". - // If it is in the form "IP:port", split off the port. - host, _, err := net.SplitHostPort(req.RemoteAddr) - if err != nil { - host = req.RemoteAddr - } - switch host { - case "localhost", "127.0.0.1", "::1": - return true, true - default: - return false, false - } -} - -func init() { - http.HandleFunc("/debug/requests", func(w http.ResponseWriter, req *http.Request) { - any, sensitive := AuthRequest(req) - if !any { - http.Error(w, "not allowed", http.StatusUnauthorized) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - Render(w, req, sensitive) - }) - http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) { - any, sensitive := AuthRequest(req) - if !any { - http.Error(w, "not allowed", http.StatusUnauthorized) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - RenderEvents(w, req, sensitive) - }) -} - -// Render renders the HTML page typically served at /debug/requests. -// It does not do any auth checking; see AuthRequest for the default auth check -// used by the handler registered on http.DefaultServeMux. -// req may be nil. -func Render(w io.Writer, req *http.Request, sensitive bool) { - data := &struct { - Families []string - ActiveTraceCount map[string]int - CompletedTraces map[string]*family - - // Set when a bucket has been selected. - Traces traceList - Family string - Bucket int - Expanded bool - Traced bool - Active bool - ShowSensitive bool // whether to show sensitive events - - Histogram template.HTML - HistogramWindow string // e.g. "last minute", "last hour", "all time" - - // If non-zero, the set of traces is a partial set, - // and this is the total number. - Total int - }{ - CompletedTraces: completedTraces, - } - - data.ShowSensitive = sensitive - if req != nil { - // Allow show_sensitive=0 to force hiding of sensitive data for testing. - // This only goes one way; you can't use show_sensitive=1 to see things. - if req.FormValue("show_sensitive") == "0" { - data.ShowSensitive = false - } - - if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { - data.Expanded = exp - } - if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { - data.Traced = exp - } - } - - completedMu.RLock() - data.Families = make([]string, 0, len(completedTraces)) - for fam := range completedTraces { - data.Families = append(data.Families, fam) - } - completedMu.RUnlock() - sort.Strings(data.Families) - - // We are careful here to minimize the time spent locking activeMu, - // since that lock is required every time an RPC starts and finishes. - data.ActiveTraceCount = make(map[string]int, len(data.Families)) - activeMu.RLock() - for fam, s := range activeTraces { - data.ActiveTraceCount[fam] = s.Len() - } - activeMu.RUnlock() - - var ok bool - data.Family, data.Bucket, ok = parseArgs(req) - switch { - case !ok: - // No-op - case data.Bucket == -1: - data.Active = true - n := data.ActiveTraceCount[data.Family] - data.Traces = getActiveTraces(data.Family) - if len(data.Traces) < n { - data.Total = n - } - case data.Bucket < bucketsPerFamily: - if b := lookupBucket(data.Family, data.Bucket); b != nil { - data.Traces = b.Copy(data.Traced) - } - default: - if f := getFamily(data.Family, false); f != nil { - var obs timeseries.Observable - f.LatencyMu.RLock() - switch o := data.Bucket - bucketsPerFamily; o { - case 0: - obs = f.Latency.Minute() - data.HistogramWindow = "last minute" - case 1: - obs = f.Latency.Hour() - data.HistogramWindow = "last hour" - case 2: - obs = f.Latency.Total() - data.HistogramWindow = "all time" - } - f.LatencyMu.RUnlock() - if obs != nil { - data.Histogram = obs.(*histogram).html() - } - } - } - - if data.Traces != nil { - defer data.Traces.Free() - sort.Sort(data.Traces) - } - - completedMu.RLock() - defer completedMu.RUnlock() - if err := pageTmpl.ExecuteTemplate(w, "Page", data); err != nil { - log.Printf("net/trace: Failed executing template: %v", err) - } -} - -func parseArgs(req *http.Request) (fam string, b int, ok bool) { - if req == nil { - return "", 0, false - } - fam, bStr := req.FormValue("fam"), req.FormValue("b") - if fam == "" || bStr == "" { - return "", 0, false - } - b, err := strconv.Atoi(bStr) - if err != nil || b < -1 { - return "", 0, false - } - - return fam, b, true -} - -func lookupBucket(fam string, b int) *traceBucket { - f := getFamily(fam, false) - if f == nil || b < 0 || b >= len(f.Buckets) { - return nil - } - return f.Buckets[b] -} - -type contextKeyT string - -var contextKey = contextKeyT("golang.org/x/net/trace.Trace") - -// NewContext returns a copy of the parent context -// and associates it with a Trace. -func NewContext(ctx context.Context, tr Trace) context.Context { - return context.WithValue(ctx, contextKey, tr) -} - -// FromContext returns the Trace bound to the context, if any. -func FromContext(ctx context.Context) (tr Trace, ok bool) { - tr, ok = ctx.Value(contextKey).(Trace) - return -} - -// Trace represents an active request. -type Trace interface { - // LazyLog adds x to the event log. It will be evaluated each time the - // /debug/requests page is rendered. Any memory referenced by x will be - // pinned until the trace is finished and later discarded. - LazyLog(x fmt.Stringer, sensitive bool) - - // LazyPrintf evaluates its arguments with fmt.Sprintf each time the - // /debug/requests page is rendered. Any memory referenced by a will be - // pinned until the trace is finished and later discarded. - LazyPrintf(format string, a ...interface{}) - - // SetError declares that this trace resulted in an error. - SetError() - - // SetRecycler sets a recycler for the trace. - // f will be called for each event passed to LazyLog at a time when - // it is no longer required, whether while the trace is still active - // and the event is discarded, or when a completed trace is discarded. - SetRecycler(f func(interface{})) - - // SetTraceInfo sets the trace info for the trace. - // This is currently unused. - SetTraceInfo(traceID, spanID uint64) - - // SetMaxEvents sets the maximum number of events that will be stored - // in the trace. This has no effect if any events have already been - // added to the trace. - SetMaxEvents(m int) - - // Finish declares that this trace is complete. - // The trace should not be used after calling this method. - Finish() -} - -type lazySprintf struct { - format string - a []interface{} -} - -func (l *lazySprintf) String() string { - return fmt.Sprintf(l.format, l.a...) -} - -// New returns a new Trace with the specified family and title. -func New(family, title string) Trace { - tr := newTrace() - tr.ref() - tr.Family, tr.Title = family, title - tr.Start = time.Now() - tr.events = make([]event, 0, maxEventsPerTrace) - - activeMu.RLock() - s := activeTraces[tr.Family] - activeMu.RUnlock() - if s == nil { - activeMu.Lock() - s = activeTraces[tr.Family] // check again - if s == nil { - s = new(traceSet) - activeTraces[tr.Family] = s - } - activeMu.Unlock() - } - s.Add(tr) - - // Trigger allocation of the completed trace structure for this family. - // This will cause the family to be present in the request page during - // the first trace of this family. We don't care about the return value, - // nor is there any need for this to run inline, so we execute it in its - // own goroutine, but only if the family isn't allocated yet. - completedMu.RLock() - if _, ok := completedTraces[tr.Family]; !ok { - go allocFamily(tr.Family) - } - completedMu.RUnlock() - - return tr -} - -func (tr *trace) Finish() { - tr.Elapsed = time.Now().Sub(tr.Start) - if DebugUseAfterFinish { - buf := make([]byte, 4<<10) // 4 KB should be enough - n := runtime.Stack(buf, false) - tr.finishStack = buf[:n] - } - - activeMu.RLock() - m := activeTraces[tr.Family] - activeMu.RUnlock() - m.Remove(tr) - - f := getFamily(tr.Family, true) - for _, b := range f.Buckets { - if b.Cond.match(tr) { - b.Add(tr) - } - } - // Add a sample of elapsed time as microseconds to the family's timeseries - h := new(histogram) - h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3) - f.LatencyMu.Lock() - f.Latency.Add(h) - f.LatencyMu.Unlock() - - tr.unref() // matches ref in New -} - -const ( - bucketsPerFamily = 9 - tracesPerBucket = 10 - maxActiveTraces = 20 // Maximum number of active traces to show. - maxEventsPerTrace = 10 - numHistogramBuckets = 38 -) - -var ( - // The active traces. - activeMu sync.RWMutex - activeTraces = make(map[string]*traceSet) // family -> traces - - // Families of completed traces. - completedMu sync.RWMutex - completedTraces = make(map[string]*family) // family -> traces -) - -type traceSet struct { - mu sync.RWMutex - m map[*trace]bool - - // We could avoid the entire map scan in FirstN by having a slice of all the traces - // ordered by start time, and an index into that from the trace struct, with a periodic - // repack of the slice after enough traces finish; we could also use a skip list or similar. - // However, that would shift some of the expense from /debug/requests time to RPC time, - // which is probably the wrong trade-off. -} - -func (ts *traceSet) Len() int { - ts.mu.RLock() - defer ts.mu.RUnlock() - return len(ts.m) -} - -func (ts *traceSet) Add(tr *trace) { - ts.mu.Lock() - if ts.m == nil { - ts.m = make(map[*trace]bool) - } - ts.m[tr] = true - ts.mu.Unlock() -} - -func (ts *traceSet) Remove(tr *trace) { - ts.mu.Lock() - delete(ts.m, tr) - ts.mu.Unlock() -} - -// FirstN returns the first n traces ordered by time. -func (ts *traceSet) FirstN(n int) traceList { - ts.mu.RLock() - defer ts.mu.RUnlock() - - if n > len(ts.m) { - n = len(ts.m) - } - trl := make(traceList, 0, n) - - // Fast path for when no selectivity is needed. - if n == len(ts.m) { - for tr := range ts.m { - tr.ref() - trl = append(trl, tr) - } - sort.Sort(trl) - return trl - } - - // Pick the oldest n traces. - // This is inefficient. See the comment in the traceSet struct. - for tr := range ts.m { - // Put the first n traces into trl in the order they occur. - // When we have n, sort trl, and thereafter maintain its order. - if len(trl) < n { - tr.ref() - trl = append(trl, tr) - if len(trl) == n { - // This is guaranteed to happen exactly once during this loop. - sort.Sort(trl) - } - continue - } - if tr.Start.After(trl[n-1].Start) { - continue - } - - // Find where to insert this one. - tr.ref() - i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) - trl[n-1].unref() - copy(trl[i+1:], trl[i:]) - trl[i] = tr - } - - return trl -} - -func getActiveTraces(fam string) traceList { - activeMu.RLock() - s := activeTraces[fam] - activeMu.RUnlock() - if s == nil { - return nil - } - return s.FirstN(maxActiveTraces) -} - -func getFamily(fam string, allocNew bool) *family { - completedMu.RLock() - f := completedTraces[fam] - completedMu.RUnlock() - if f == nil && allocNew { - f = allocFamily(fam) - } - return f -} - -func allocFamily(fam string) *family { - completedMu.Lock() - defer completedMu.Unlock() - f := completedTraces[fam] - if f == nil { - f = newFamily() - completedTraces[fam] = f - } - return f -} - -// family represents a set of trace buckets and associated latency information. -type family struct { - // traces may occur in multiple buckets. - Buckets [bucketsPerFamily]*traceBucket - - // latency time series - LatencyMu sync.RWMutex - Latency *timeseries.MinuteHourSeries -} - -func newFamily() *family { - return &family{ - Buckets: [bucketsPerFamily]*traceBucket{ - {Cond: minCond(0)}, - {Cond: minCond(50 * time.Millisecond)}, - {Cond: minCond(100 * time.Millisecond)}, - {Cond: minCond(200 * time.Millisecond)}, - {Cond: minCond(500 * time.Millisecond)}, - {Cond: minCond(1 * time.Second)}, - {Cond: minCond(10 * time.Second)}, - {Cond: minCond(100 * time.Second)}, - {Cond: errorCond{}}, - }, - Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), - } -} - -// traceBucket represents a size-capped bucket of historic traces, -// along with a condition for a trace to belong to the bucket. -type traceBucket struct { - Cond cond - - // Ring buffer implementation of a fixed-size FIFO queue. - mu sync.RWMutex - buf [tracesPerBucket]*trace - start int // < tracesPerBucket - length int // <= tracesPerBucket -} - -func (b *traceBucket) Add(tr *trace) { - b.mu.Lock() - defer b.mu.Unlock() - - i := b.start + b.length - if i >= tracesPerBucket { - i -= tracesPerBucket - } - if b.length == tracesPerBucket { - // "Remove" an element from the bucket. - b.buf[i].unref() - b.start++ - if b.start == tracesPerBucket { - b.start = 0 - } - } - b.buf[i] = tr - if b.length < tracesPerBucket { - b.length++ - } - tr.ref() -} - -// Copy returns a copy of the traces in the bucket. -// If tracedOnly is true, only the traces with trace information will be returned. -// The logs will be ref'd before returning; the caller should call -// the Free method when it is done with them. -// TODO(dsymonds): keep track of traced requests in separate buckets. -func (b *traceBucket) Copy(tracedOnly bool) traceList { - b.mu.RLock() - defer b.mu.RUnlock() - - trl := make(traceList, 0, b.length) - for i, x := 0, b.start; i < b.length; i++ { - tr := b.buf[x] - if !tracedOnly || tr.spanID != 0 { - tr.ref() - trl = append(trl, tr) - } - x++ - if x == b.length { - x = 0 - } - } - return trl -} - -func (b *traceBucket) Empty() bool { - b.mu.RLock() - defer b.mu.RUnlock() - return b.length == 0 -} - -// cond represents a condition on a trace. -type cond interface { - match(t *trace) bool - String() string -} - -type minCond time.Duration - -func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } -func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } - -type errorCond struct{} - -func (e errorCond) match(t *trace) bool { return t.IsError } -func (e errorCond) String() string { return "errors" } - -type traceList []*trace - -// Free calls unref on each element of the list. -func (trl traceList) Free() { - for _, t := range trl { - t.unref() - } -} - -// traceList may be sorted in reverse chronological order. -func (trl traceList) Len() int { return len(trl) } -func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } -func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } - -// An event is a timestamped log entry in a trace. -type event struct { - When time.Time - Elapsed time.Duration // since previous event in trace - NewDay bool // whether this event is on a different day to the previous event - Recyclable bool // whether this event was passed via LazyLog - What interface{} // string or fmt.Stringer - Sensitive bool // whether this event contains sensitive information -} - -// WhenString returns a string representation of the elapsed time of the event. -// It will include the date if midnight was crossed. -func (e event) WhenString() string { - if e.NewDay { - return e.When.Format("2006/01/02 15:04:05.000000") - } - return e.When.Format("15:04:05.000000") -} - -// discarded represents a number of discarded events. -// It is stored as *discarded to make it easier to update in-place. -type discarded int - -func (d *discarded) String() string { - return fmt.Sprintf("(%d events discarded)", int(*d)) -} - -// trace represents an active or complete request, -// either sent or received by this program. -type trace struct { - // Family is the top-level grouping of traces to which this belongs. - Family string - - // Title is the title of this trace. - Title string - - // Timing information. - Start time.Time - Elapsed time.Duration // zero while active - - // Trace information if non-zero. - traceID uint64 - spanID uint64 - - // Whether this trace resulted in an error. - IsError bool - - // Append-only sequence of events (modulo discards). - mu sync.RWMutex - events []event - - refs int32 // how many buckets this is in - recycler func(interface{}) - disc discarded // scratch space to avoid allocation - - finishStack []byte // where finish was called, if DebugUseAfterFinish is set -} - -func (tr *trace) reset() { - // Clear all but the mutex. Mutexes may not be copied, even when unlocked. - tr.Family = "" - tr.Title = "" - tr.Start = time.Time{} - tr.Elapsed = 0 - tr.traceID = 0 - tr.spanID = 0 - tr.IsError = false - tr.events = nil - tr.refs = 0 - tr.recycler = nil - tr.disc = 0 - tr.finishStack = nil -} - -// delta returns the elapsed time since the last event or the trace start, -// and whether it spans midnight. -// L >= tr.mu -func (tr *trace) delta(t time.Time) (time.Duration, bool) { - if len(tr.events) == 0 { - return t.Sub(tr.Start), false - } - prev := tr.events[len(tr.events)-1].When - return t.Sub(prev), prev.Day() != t.Day() -} - -func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { - if DebugUseAfterFinish && tr.finishStack != nil { - buf := make([]byte, 4<<10) // 4 KB should be enough - n := runtime.Stack(buf, false) - log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) - } - - /* - NOTE TO DEBUGGERS - - If you are here because your program panicked in this code, - it is almost definitely the fault of code using this package, - and very unlikely to be the fault of this code. - - The most likely scenario is that some code elsewhere is using - a requestz.Trace after its Finish method is called. - You can temporarily set the DebugUseAfterFinish var - to help discover where that is; do not leave that var set, - since it makes this package much less efficient. - */ - - e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} - tr.mu.Lock() - e.Elapsed, e.NewDay = tr.delta(e.When) - if len(tr.events) < cap(tr.events) { - tr.events = append(tr.events, e) - } else { - // Discard the middle events. - di := int((cap(tr.events) - 1) / 2) - if d, ok := tr.events[di].What.(*discarded); ok { - (*d)++ - } else { - // disc starts at two to count for the event it is replacing, - // plus the next one that we are about to drop. - tr.disc = 2 - if tr.recycler != nil && tr.events[di].Recyclable { - go tr.recycler(tr.events[di].What) - } - tr.events[di].What = &tr.disc - } - // The timestamp of the discarded meta-event should be - // the time of the last event it is representing. - tr.events[di].When = tr.events[di+1].When - - if tr.recycler != nil && tr.events[di+1].Recyclable { - go tr.recycler(tr.events[di+1].What) - } - copy(tr.events[di+1:], tr.events[di+2:]) - tr.events[cap(tr.events)-1] = e - } - tr.mu.Unlock() -} - -func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { - tr.addEvent(x, true, sensitive) -} - -func (tr *trace) LazyPrintf(format string, a ...interface{}) { - tr.addEvent(&lazySprintf{format, a}, false, false) -} - -func (tr *trace) SetError() { tr.IsError = true } - -func (tr *trace) SetRecycler(f func(interface{})) { - tr.recycler = f -} - -func (tr *trace) SetTraceInfo(traceID, spanID uint64) { - tr.traceID, tr.spanID = traceID, spanID -} - -func (tr *trace) SetMaxEvents(m int) { - // Always keep at least three events: first, discarded count, last. - if len(tr.events) == 0 && m > 3 { - tr.events = make([]event, 0, m) - } -} - -func (tr *trace) ref() { - atomic.AddInt32(&tr.refs, 1) -} - -func (tr *trace) unref() { - if atomic.AddInt32(&tr.refs, -1) == 0 { - if tr.recycler != nil { - // freeTrace clears tr, so we hold tr.recycler and tr.events here. - go func(f func(interface{}), es []event) { - for _, e := range es { - if e.Recyclable { - f(e.What) - } - } - }(tr.recycler, tr.events) - } - - freeTrace(tr) - } -} - -func (tr *trace) When() string { - return tr.Start.Format("2006/01/02 15:04:05.000000") -} - -func (tr *trace) ElapsedTime() string { - t := tr.Elapsed - if t == 0 { - // Active trace. - t = time.Since(tr.Start) - } - return fmt.Sprintf("%.6f", t.Seconds()) -} - -func (tr *trace) Events() []event { - tr.mu.RLock() - defer tr.mu.RUnlock() - return tr.events -} - -var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? - -// newTrace returns a trace ready to use. -func newTrace() *trace { - select { - case tr := <-traceFreeList: - return tr - default: - return new(trace) - } -} - -// freeTrace adds tr to traceFreeList if there's room. -// This is non-blocking. -func freeTrace(tr *trace) { - if DebugUseAfterFinish { - return // never reuse - } - tr.reset() - select { - case traceFreeList <- tr: - default: - } -} - -func elapsed(d time.Duration) string { - b := []byte(fmt.Sprintf("%.6f", d.Seconds())) - - // For subsecond durations, blank all zeros before decimal point, - // and all zeros between the decimal point and the first non-zero digit. - if d < time.Second { - dot := bytes.IndexByte(b, '.') - for i := 0; i < dot; i++ { - b[i] = ' ' - } - for i := dot + 1; i < len(b); i++ { - if b[i] == '0' { - b[i] = ' ' - } else { - break - } - } - } - - return string(b) -} - -var pageTmpl = template.Must(template.New("Page").Funcs(template.FuncMap{ - "elapsed": elapsed, - "add": func(a, b int) int { return a + b }, -}).Parse(pageHTML)) - -const pageHTML = ` -{{template "Prolog" .}} -{{template "StatusTable" .}} -{{template "Epilog" .}} - -{{define "Prolog"}} - - - /debug/requests - - - - -

/debug/requests

-{{end}} {{/* end of Prolog */}} - -{{define "StatusTable"}} - - {{range $fam := .Families}} - - - - {{$n := index $.ActiveTraceCount $fam}} - - - {{$f := index $.CompletedTraces $fam}} - {{range $i, $b := $f.Buckets}} - {{$empty := $b.Empty}} - - {{end}} - - {{$nb := len $f.Buckets}} - - - - - - {{end}} -
{{$fam}} - {{if $n}}{{end}} - [{{$n}} active] - {{if $n}}{{end}} - - {{if not $empty}}{{end}} - [{{.Cond}}] - {{if not $empty}}{{end}} - - [minute] - - [hour] - - [total] -
-{{end}} {{/* end of StatusTable */}} - -{{define "Epilog"}} -{{if $.Traces}} -
-

Family: {{$.Family}}

- -{{if or $.Expanded $.Traced}} - [Normal/Summary] -{{else}} - [Normal/Summary] -{{end}} - -{{if or (not $.Expanded) $.Traced}} - [Normal/Expanded] -{{else}} - [Normal/Expanded] -{{end}} - -{{if not $.Active}} - {{if or $.Expanded (not $.Traced)}} - [Traced/Summary] - {{else}} - [Traced/Summary] - {{end}} - {{if or (not $.Expanded) (not $.Traced)}} - [Traced/Expanded] - {{else}} - [Traced/Expanded] - {{end}} -{{end}} - -{{if $.Total}} -

Showing {{len $.Traces}} of {{$.Total}} traces.

-{{end}} - - - - - {{range $tr := $.Traces}} - - - - - {{/* TODO: include traceID/spanID */}} - - {{if $.Expanded}} - {{range $tr.Events}} - - - - - - {{end}} - {{end}} - {{end}} -
- {{if $.Active}}Active{{else}}Completed{{end}} Requests -
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
-{{end}} {{/* if $.Traces */}} - -{{if $.Histogram}} -

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

-{{$.Histogram}} -{{end}} {{/* if $.Histogram */}} - - - -{{end}} {{/* end of Epilog */}} -` diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/.travis.yml b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/.travis.yml deleted file mode 100644 index a035125c..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go - -go: - - 1.3 - - 1.4 - -install: - - export GOPATH="$HOME/gopath" - - mkdir -p "$GOPATH/src/golang.org/x" - - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" - - go get -v -t -d golang.org/x/oauth2/... - -script: - - go test -v golang.org/x/oauth2/... diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/AUTHORS b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/AUTHORS deleted file mode 100644 index 15167cd7..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/CONTRIBUTING.md deleted file mode 100644 index 46aa2b12..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/CONTRIBUTING.md +++ /dev/null @@ -1,31 +0,0 @@ -# Contributing to Go - -Go is an open source project. - -It is the work of hundreds of contributors. We appreciate your help! - - -## Filing issues - -When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. -The gophers there will answer or ask you to file an issue if you've tripped over a bug. - -## Contributing code - -Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) -before sending patches. - -**We do not accept GitHub pull requests** -(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). - -Unless otherwise noted, the Go source files are distributed under -the BSD-style license found in the LICENSE file. - diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/CONTRIBUTORS deleted file mode 100644 index 1c4577e9..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/LICENSE b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/LICENSE deleted file mode 100644 index d02f24fd..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The oauth2 Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/README.md b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/README.md deleted file mode 100644 index 0d514173..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# OAuth2 for Go - -[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) - -oauth2 package contains a client implementation for OAuth 2.0 spec. - -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -See godoc for further documentation and examples. - -* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) -* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) - - -## App Engine - -In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor -of the [`context.Context`](https://golang.org/x/net/context#Context) type from -the `golang.org/x/net/context` package - -This means its no longer possible to use the "Classic App Engine" -`appengine.Context` type with the `oauth2` package. (You're using -Classic App Engine if you import the package `"appengine"`.) - -To work around this, you may use the new `"google.golang.org/appengine"` -package. This package has almost the same API as the `"appengine"` package, -but it can be fetched with `go get` and used on "Managed VMs" and well as -Classic App Engine. - -See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) -for information on updating your app. - -If you don't want to update your entire app to use the new App Engine packages, -you may use both sets of packages in parallel, using only the new packages -with the `oauth2` package. - - import ( - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - newappengine "google.golang.org/appengine" - newurlfetch "google.golang.org/appengine/urlfetch" - - "appengine" - ) - - func handler(w http.ResponseWriter, r *http.Request) { - var c appengine.Context = appengine.NewContext(r) - c.Infof("Logging a message with the old package") - - var ctx context.Context = newappengine.NewContext(r) - client := &http.Client{ - Transport: &oauth2.Transport{ - Source: google.AppEngineTokenSource(ctx, "scope"), - Base: &newurlfetch.Transport{Context: ctx}, - }, - } - client.Get("...") - } - diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/client_appengine.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/client_appengine.go deleted file mode 100644 index 8962c49d..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/client_appengine.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine - -// App Engine hooks. - -package oauth2 - -import ( - "net/http" - - "golang.org/x/net/context" - "golang.org/x/oauth2/internal" - "google.golang.org/appengine/urlfetch" -) - -func init() { - internal.RegisterContextClientFunc(contextClientAppEngine) -} - -func contextClientAppEngine(ctx context.Context) (*http.Client, error) { - return urlfetch.Client(ctx), nil -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine.go deleted file mode 100644 index dc993efb..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "sort" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2" -) - -// Set at init time by appenginevm_hook.go. If true, we are on App Engine Managed VMs. -var appengineVM bool - -// Set at init time by appengine_hook.go. If nil, we're not on App Engine. -var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) - -// AppEngineTokenSource returns a token source that fetches tokens -// issued to the current App Engine application's service account. -// If you are implementing a 3-legged OAuth 2.0 flow on App Engine -// that involves user accounts, see oauth2.Config instead. -// -// The provided context must have come from appengine.NewContext. -func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - if appengineTokenFunc == nil { - panic("google: AppEngineTokenSource can only be used on App Engine.") - } - scopes := append([]string{}, scope...) - sort.Strings(scopes) - return &appEngineTokenSource{ - ctx: ctx, - scopes: scopes, - key: strings.Join(scopes, " "), - } -} - -// aeTokens helps the fetched tokens to be reused until their expiration. -var ( - aeTokensMu sync.Mutex - aeTokens = make(map[string]*tokenLock) // key is space-separated scopes -) - -type tokenLock struct { - mu sync.Mutex // guards t; held while fetching or updating t - t *oauth2.Token -} - -type appEngineTokenSource struct { - ctx context.Context - scopes []string - key string // to aeTokens map; space-separated scopes -} - -func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) { - if appengineTokenFunc == nil { - panic("google: AppEngineTokenSource can only be used on App Engine.") - } - - aeTokensMu.Lock() - tok, ok := aeTokens[ts.key] - if !ok { - tok = &tokenLock{} - aeTokens[ts.key] = tok - } - aeTokensMu.Unlock() - - tok.mu.Lock() - defer tok.mu.Unlock() - if tok.t.Valid() { - return tok.t, nil - } - access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) - if err != nil { - return nil, err - } - tok.t = &oauth2.Token{ - AccessToken: access, - Expiry: exp, - } - return tok.t, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine_hook.go deleted file mode 100644 index 4f42c8b3..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appengine_hook.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine - -package google - -import "google.golang.org/appengine" - -func init() { - appengineTokenFunc = appengine.AccessToken -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appenginevm_hook.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appenginevm_hook.go deleted file mode 100644 index 633611cc..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/appenginevm_hook.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2015 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appenginevm - -package google - -import "google.golang.org/appengine" - -func init() { - appengineVM = true - appengineTokenFunc = appengine.AccessToken -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/default.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/default.go deleted file mode 100644 index b9523629..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/default.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "os" - "path/filepath" - "runtime" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/jwt" - "google.golang.org/cloud/compute/metadata" -) - -// DefaultClient returns an HTTP Client that uses the -// DefaultTokenSource to obtain authentication credentials. -// -// This client should be used when developing services -// that run on Google App Engine or Google Compute Engine -// and use "Application Default Credentials." -// -// For more details, see: -// https://developers.google.com/accounts/docs/application-default-credentials -// -func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { - ts, err := DefaultTokenSource(ctx, scope...) - if err != nil { - return nil, err - } - return oauth2.NewClient(ctx, ts), nil -} - -// DefaultTokenSource is a token source that uses -// "Application Default Credentials". -// -// It looks for credentials in the following places, -// preferring the first location found: -// -// 1. A JSON file whose path is specified by the -// GOOGLE_APPLICATION_CREDENTIALS environment variable. -// 2. A JSON file in a location known to the gcloud command-line tool. -// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. -// On other systems, $HOME/.config/gcloud/application_default_credentials.json. -// 3. On Google App Engine it uses the appengine.AccessToken function. -// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches -// credentials from the metadata server. -// (In this final case any provided scopes are ignored.) -// -// For more details, see: -// https://developers.google.com/accounts/docs/application-default-credentials -// -func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { - // First, try the environment variable. - const envVar = "GOOGLE_APPLICATION_CREDENTIALS" - if filename := os.Getenv(envVar); filename != "" { - ts, err := tokenSourceFromFile(ctx, filename, scope) - if err != nil { - return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) - } - return ts, nil - } - - // Second, try a well-known file. - filename := wellKnownFile() - _, err := os.Stat(filename) - if err == nil { - ts, err2 := tokenSourceFromFile(ctx, filename, scope) - if err2 == nil { - return ts, nil - } - err = err2 - } else if os.IsNotExist(err) { - err = nil // ignore this error - } - if err != nil { - return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) - } - - // Third, if we're on Google App Engine use those credentials. - if appengineTokenFunc != nil && !appengineVM { - return AppEngineTokenSource(ctx, scope...), nil - } - - // Fourth, if we're on Google Compute Engine use the metadata server. - if metadata.OnGCE() { - return ComputeTokenSource(""), nil - } - - // None are found; return helpful error. - const url = "https://developers.google.com/accounts/docs/application-default-credentials" - return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) -} - -func wellKnownFile() string { - const f = "application_default_credentials.json" - if runtime.GOOS == "windows" { - return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) - } - return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) -} - -func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - var d struct { - // Common fields - Type string - ClientID string `json:"client_id"` - - // User Credential fields - ClientSecret string `json:"client_secret"` - RefreshToken string `json:"refresh_token"` - - // Service Account fields - ClientEmail string `json:"client_email"` - PrivateKeyID string `json:"private_key_id"` - PrivateKey string `json:"private_key"` - } - if err := json.Unmarshal(b, &d); err != nil { - return nil, err - } - switch d.Type { - case "authorized_user": - cfg := &oauth2.Config{ - ClientID: d.ClientID, - ClientSecret: d.ClientSecret, - Scopes: append([]string{}, scopes...), // copy - Endpoint: Endpoint, - } - tok := &oauth2.Token{RefreshToken: d.RefreshToken} - return cfg.TokenSource(ctx, tok), nil - case "service_account": - cfg := &jwt.Config{ - Email: d.ClientEmail, - PrivateKey: []byte(d.PrivateKey), - Scopes: append([]string{}, scopes...), // copy - TokenURL: JWTTokenURL, - } - return cfg.TokenSource(ctx), nil - case "": - return nil, errors.New("missing 'type' field in credentials") - default: - return nil, fmt.Errorf("unknown credential type: %q", d.Type) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/google.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/google.go deleted file mode 100644 index 0bed7386..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/google.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package google provides support for making OAuth2 authorized and -// authenticated HTTP requests to Google APIs. -// It supports the Web server flow, client-side credentials, service accounts, -// Google Compute Engine service accounts, and Google App Engine service -// accounts. -// -// For more information, please read -// https://developers.google.com/accounts/docs/OAuth2 -// and -// https://developers.google.com/accounts/docs/application-default-credentials. -package google - -import ( - "encoding/json" - "errors" - "fmt" - "strings" - "time" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/jwt" - "google.golang.org/cloud/compute/metadata" -) - -// Endpoint is Google's OAuth 2.0 endpoint. -var Endpoint = oauth2.Endpoint{ - AuthURL: "https://accounts.google.com/o/oauth2/auth", - TokenURL: "https://accounts.google.com/o/oauth2/token", -} - -// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. -const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" - -// ConfigFromJSON uses a Google Developers Console client_credentials.json -// file to construct a config. -// client_credentials.json can be downloadable from https://console.developers.google.com, -// under "APIs & Auth" > "Credentials". Download the Web application credentials in the -// JSON format and provide the contents of the file as jsonKey. -func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { - type cred struct { - ClientID string `json:"client_id"` - ClientSecret string `json:"client_secret"` - RedirectURIs []string `json:"redirect_uris"` - AuthURI string `json:"auth_uri"` - TokenURI string `json:"token_uri"` - } - var j struct { - Web *cred `json:"web"` - Installed *cred `json:"installed"` - } - if err := json.Unmarshal(jsonKey, &j); err != nil { - return nil, err - } - var c *cred - switch { - case j.Web != nil: - c = j.Web - case j.Installed != nil: - c = j.Installed - default: - return nil, fmt.Errorf("oauth2/google: no credentials found") - } - if len(c.RedirectURIs) < 1 { - return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") - } - return &oauth2.Config{ - ClientID: c.ClientID, - ClientSecret: c.ClientSecret, - RedirectURL: c.RedirectURIs[0], - Scopes: scope, - Endpoint: oauth2.Endpoint{ - AuthURL: c.AuthURI, - TokenURL: c.TokenURI, - }, - }, nil -} - -// JWTConfigFromJSON uses a Google Developers service account JSON key file to read -// the credentials that authorize and authenticate the requests. -// Create a service account on "Credentials" page under "APIs & Auth" for your -// project at https://console.developers.google.com to download a JSON key file. -func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { - var key struct { - Email string `json:"client_email"` - PrivateKey string `json:"private_key"` - } - if err := json.Unmarshal(jsonKey, &key); err != nil { - return nil, err - } - return &jwt.Config{ - Email: key.Email, - PrivateKey: []byte(key.PrivateKey), - Scopes: scope, - TokenURL: JWTTokenURL, - }, nil -} - -// ComputeTokenSource returns a token source that fetches access tokens -// from Google Compute Engine (GCE)'s metadata server. It's only valid to use -// this token source if your program is running on a GCE instance. -// If no account is specified, "default" is used. -// Further information about retrieving access tokens from the GCE metadata -// server can be found at https://cloud.google.com/compute/docs/authentication. -func ComputeTokenSource(account string) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, computeSource{account: account}) -} - -type computeSource struct { - account string -} - -func (cs computeSource) Token() (*oauth2.Token, error) { - if !metadata.OnGCE() { - return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") - } - acct := cs.account - if acct == "" { - acct = "default" - } - tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token") - if err != nil { - return nil, err - } - var res struct { - AccessToken string `json:"access_token"` - ExpiresInSec int `json:"expires_in"` - TokenType string `json:"token_type"` - } - err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) - if err != nil { - return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) - } - if res.ExpiresInSec == 0 || res.AccessToken == "" { - return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") - } - return &oauth2.Token{ - AccessToken: res.AccessToken, - TokenType: res.TokenType, - Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), - }, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/jwt.go deleted file mode 100644 index b9199178..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/jwt.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "crypto/rsa" - "fmt" - "time" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/internal" - "golang.org/x/oauth2/jws" -) - -// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON -// key file to read the credentials that authorize and authenticate the -// requests, and returns a TokenSource that does not use any OAuth2 flow but -// instead creates a JWT and sends that as the access token. -// The audience is typically a URL that specifies the scope of the credentials. -// -// Note that this is not a standard OAuth flow, but rather an -// optimization supported by a few Google services. -// Unless you know otherwise, you should use JWTConfigFromJSON instead. -func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { - cfg, err := JWTConfigFromJSON(jsonKey) - if err != nil { - return nil, fmt.Errorf("google: could not parse JSON key: %v", err) - } - pk, err := internal.ParseKey(cfg.PrivateKey) - if err != nil { - return nil, fmt.Errorf("google: could not parse key: %v", err) - } - ts := &jwtAccessTokenSource{ - email: cfg.Email, - audience: audience, - pk: pk, - } - tok, err := ts.Token() - if err != nil { - return nil, err - } - return oauth2.ReuseTokenSource(tok, ts), nil -} - -type jwtAccessTokenSource struct { - email, audience string - pk *rsa.PrivateKey -} - -func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { - iat := time.Now() - exp := iat.Add(time.Hour) - cs := &jws.ClaimSet{ - Iss: ts.email, - Sub: ts.email, - Aud: ts.audience, - Iat: iat.Unix(), - Exp: exp.Unix(), - } - hdr := &jws.Header{ - Algorithm: "RS256", - Typ: "JWT", - } - msg, err := jws.Encode(hdr, cs, ts.pk) - if err != nil { - return nil, fmt.Errorf("google: could not encode JWT: %v", err) - } - return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/sdk.go deleted file mode 100644 index d29a3bb9..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/google/sdk.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "os" - "os/user" - "path/filepath" - "runtime" - "strings" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/internal" -) - -type sdkCredentials struct { - Data []struct { - Credential struct { - ClientID string `json:"client_id"` - ClientSecret string `json:"client_secret"` - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - TokenExpiry *time.Time `json:"token_expiry"` - } `json:"credential"` - Key struct { - Account string `json:"account"` - Scope string `json:"scope"` - } `json:"key"` - } -} - -// An SDKConfig provides access to tokens from an account already -// authorized via the Google Cloud SDK. -type SDKConfig struct { - conf oauth2.Config - initialToken *oauth2.Token -} - -// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK -// account. If account is empty, the account currently active in -// Google Cloud SDK properties is used. -// Google Cloud SDK credentials must be created by running `gcloud auth` -// before using this function. -// The Google Cloud SDK is available at https://cloud.google.com/sdk/. -func NewSDKConfig(account string) (*SDKConfig, error) { - configPath, err := sdkConfigPath() - if err != nil { - return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) - } - credentialsPath := filepath.Join(configPath, "credentials") - f, err := os.Open(credentialsPath) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) - } - defer f.Close() - - var c sdkCredentials - if err := json.NewDecoder(f).Decode(&c); err != nil { - return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) - } - if len(c.Data) == 0 { - return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) - } - if account == "" { - propertiesPath := filepath.Join(configPath, "properties") - f, err := os.Open(propertiesPath) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) - } - defer f.Close() - ini, err := internal.ParseINI(f) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) - } - core, ok := ini["core"] - if !ok { - return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) - } - active, ok := core["account"] - if !ok { - return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) - } - account = active - } - - for _, d := range c.Data { - if account == "" || d.Key.Account == account { - if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { - return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) - } - var expiry time.Time - if d.Credential.TokenExpiry != nil { - expiry = *d.Credential.TokenExpiry - } - return &SDKConfig{ - conf: oauth2.Config{ - ClientID: d.Credential.ClientID, - ClientSecret: d.Credential.ClientSecret, - Scopes: strings.Split(d.Key.Scope, " "), - Endpoint: Endpoint, - RedirectURL: "oob", - }, - initialToken: &oauth2.Token{ - AccessToken: d.Credential.AccessToken, - RefreshToken: d.Credential.RefreshToken, - Expiry: expiry, - }, - }, nil - } - } - return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) -} - -// Client returns an HTTP client using Google Cloud SDK credentials to -// authorize requests. The token will auto-refresh as necessary. The -// underlying http.RoundTripper will be obtained using the provided -// context. The returned client and its Transport should not be -// modified. -func (c *SDKConfig) Client(ctx context.Context) *http.Client { - return &http.Client{ - Transport: &oauth2.Transport{ - Source: c.TokenSource(ctx), - }, - } -} - -// TokenSource returns an oauth2.TokenSource that retrieve tokens from -// Google Cloud SDK credentials using the provided context. -// It will returns the current access token stored in the credentials, -// and refresh it when it expires, but it won't update the credentials -// with the new access token. -func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { - return c.conf.TokenSource(ctx, c.initialToken) -} - -// Scopes are the OAuth 2.0 scopes the current account is authorized for. -func (c *SDKConfig) Scopes() []string { - return c.conf.Scopes -} - -// sdkConfigPath tries to guess where the gcloud config is located. -// It can be overridden during tests. -var sdkConfigPath = func() (string, error) { - if runtime.GOOS == "windows" { - return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil - } - homeDir := guessUnixHomeDir() - if homeDir == "" { - return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") - } - return filepath.Join(homeDir, ".config", "gcloud"), nil -} - -func guessUnixHomeDir() string { - usr, err := user.Current() - if err == nil { - return usr.HomeDir - } - return os.Getenv("HOME") -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/oauth2.go deleted file mode 100644 index fbe1028d..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/oauth2.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internal contains support packages for oauth2 package. -package internal - -import ( - "bufio" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - "io" - "strings" -) - -// ParseKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a -// PEM container or not. If so, it extracts the the private key -// from PEM container before conversion. It only supports PEM -// containers with no passphrase. -func ParseKey(key []byte) (*rsa.PrivateKey, error) { - block, _ := pem.Decode(key) - if block != nil { - key = block.Bytes - } - parsedKey, err := x509.ParsePKCS8PrivateKey(key) - if err != nil { - parsedKey, err = x509.ParsePKCS1PrivateKey(key) - if err != nil { - return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) - } - } - parsed, ok := parsedKey.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("private key is invalid") - } - return parsed, nil -} - -func ParseINI(ini io.Reader) (map[string]map[string]string, error) { - result := map[string]map[string]string{ - "": map[string]string{}, // root section - } - scanner := bufio.NewScanner(ini) - currentSection := "" - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - if strings.HasPrefix(line, ";") { - // comment. - continue - } - if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { - currentSection = strings.TrimSpace(line[1 : len(line)-1]) - result[currentSection] = map[string]string{} - continue - } - parts := strings.SplitN(line, "=", 2) - if len(parts) == 2 && parts[0] != "" { - result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) - } - } - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("error scanning ini: %v", err) - } - return result, nil -} - -func CondVal(v string) []string { - if v == "" { - return nil - } - return []string{v} -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/token.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/token.go deleted file mode 100644 index 739a89bf..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/token.go +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internal contains support packages for oauth2 package. -package internal - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "mime" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "golang.org/x/net/context" -) - -// Token represents the crendentials used to authorize -// the requests to access protected resources on the OAuth 2.0 -// provider's backend. -// -// This type is a mirror of oauth2.Token and exists to break -// an otherwise-circular dependency. Other internal packages -// should convert this Token into an oauth2.Token before use. -type Token struct { - // AccessToken is the token that authorizes and authenticates - // the requests. - AccessToken string - - // TokenType is the type of token. - // The Type method returns either this or "Bearer", the default. - TokenType string - - // RefreshToken is a token that's used by the application - // (as opposed to the user) to refresh the access token - // if it expires. - RefreshToken string - - // Expiry is the optional expiration time of the access token. - // - // If zero, TokenSource implementations will reuse the same - // token forever and RefreshToken or equivalent - // mechanisms for that TokenSource will not be used. - Expiry time.Time - - // Raw optionally contains extra metadata from the server - // when updating a token. - Raw interface{} -} - -// tokenJSON is the struct representing the HTTP response from OAuth2 -// providers returning a token in JSON form. -type tokenJSON struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - RefreshToken string `json:"refresh_token"` - ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number - Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in -} - -func (e *tokenJSON) expiry() (t time.Time) { - if v := e.ExpiresIn; v != 0 { - return time.Now().Add(time.Duration(v) * time.Second) - } - if v := e.Expires; v != 0 { - return time.Now().Add(time.Duration(v) * time.Second) - } - return -} - -type expirationTime int32 - -func (e *expirationTime) UnmarshalJSON(b []byte) error { - var n json.Number - err := json.Unmarshal(b, &n) - if err != nil { - return err - } - i, err := n.Int64() - if err != nil { - return err - } - *e = expirationTime(i) - return nil -} - -var brokenAuthHeaderProviders = []string{ - "https://accounts.google.com/", - "https://api.dropbox.com/", - "https://api.instagram.com/", - "https://api.netatmo.net/", - "https://api.odnoklassniki.ru/", - "https://api.pushbullet.com/", - "https://api.soundcloud.com/", - "https://api.twitch.tv/", - "https://app.box.com/", - "https://connect.stripe.com/", - "https://login.microsoftonline.com/", - "https://login.salesforce.com/", - "https://oauth.sandbox.trainingpeaks.com/", - "https://oauth.trainingpeaks.com/", - "https://oauth.vk.com/", - "https://openapi.baidu.com/", - "https://slack.com/", - "https://test-sandbox.auth.corp.google.com", - "https://test.salesforce.com/", - "https://user.gini.net/", - "https://www.douban.com/", - "https://www.googleapis.com/", - "https://www.linkedin.com/", - "https://www.strava.com/oauth/", - "https://www.wunderlist.com/oauth/", - "https://api.patreon.com/", -} - -func RegisterBrokenAuthHeaderProvider(tokenURL string) { - brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL) -} - -// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL -// implements the OAuth2 spec correctly -// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. -// In summary: -// - Reddit only accepts client secret in the Authorization header -// - Dropbox accepts either it in URL param or Auth header, but not both. -// - Google only accepts URL param (not spec compliant?), not Auth header -// - Stripe only accepts client secret in Auth header with Bearer method, not Basic -func providerAuthHeaderWorks(tokenURL string) bool { - for _, s := range brokenAuthHeaderProviders { - if strings.HasPrefix(tokenURL, s) { - // Some sites fail to implement the OAuth2 spec fully. - return false - } - } - - // Assume the provider implements the spec properly - // otherwise. We can add more exceptions as they're - // discovered. We will _not_ be adding configurable hooks - // to this package to let users select server bugs. - return true -} - -func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) { - hc, err := ContextClient(ctx) - if err != nil { - return nil, err - } - v.Set("client_id", ClientID) - bustedAuth := !providerAuthHeaderWorks(TokenURL) - if bustedAuth && ClientSecret != "" { - v.Set("client_secret", ClientSecret) - } - req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode())) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - if !bustedAuth { - req.SetBasicAuth(ClientID, ClientSecret) - } - r, err := hc.Do(req) - if err != nil { - return nil, err - } - defer r.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - if code := r.StatusCode; code < 200 || code > 299 { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body) - } - - var token *Token - content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) - switch content { - case "application/x-www-form-urlencoded", "text/plain": - vals, err := url.ParseQuery(string(body)) - if err != nil { - return nil, err - } - token = &Token{ - AccessToken: vals.Get("access_token"), - TokenType: vals.Get("token_type"), - RefreshToken: vals.Get("refresh_token"), - Raw: vals, - } - e := vals.Get("expires_in") - if e == "" { - // TODO(jbd): Facebook's OAuth2 implementation is broken and - // returns expires_in field in expires. Remove the fallback to expires, - // when Facebook fixes their implementation. - e = vals.Get("expires") - } - expires, _ := strconv.Atoi(e) - if expires != 0 { - token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) - } - default: - var tj tokenJSON - if err = json.Unmarshal(body, &tj); err != nil { - return nil, err - } - token = &Token{ - AccessToken: tj.AccessToken, - TokenType: tj.TokenType, - RefreshToken: tj.RefreshToken, - Expiry: tj.expiry(), - Raw: make(map[string]interface{}), - } - json.Unmarshal(body, &token.Raw) // no error checks for optional fields - } - // Don't overwrite `RefreshToken` with an empty value - // if this was a token refreshing request. - if token.RefreshToken == "" { - token.RefreshToken = v.Get("refresh_token") - } - return token, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/transport.go deleted file mode 100644 index f1f173e3..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/internal/transport.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internal contains support packages for oauth2 package. -package internal - -import ( - "net/http" - - "golang.org/x/net/context" -) - -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. -var HTTPClient ContextKey - -// ContextKey is just an empty struct. It exists so HTTPClient can be -// an immutable public variable with a unique type. It's immutable -// because nobody else can create a ContextKey, being unexported. -type ContextKey struct{} - -// ContextClientFunc is a func which tries to return an *http.Client -// given a Context value. If it returns an error, the search stops -// with that error. If it returns (nil, nil), the search continues -// down the list of registered funcs. -type ContextClientFunc func(context.Context) (*http.Client, error) - -var contextClientFuncs []ContextClientFunc - -func RegisterContextClientFunc(fn ContextClientFunc) { - contextClientFuncs = append(contextClientFuncs, fn) -} - -func ContextClient(ctx context.Context) (*http.Client, error) { - if ctx != nil { - if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { - return hc, nil - } - } - for _, fn := range contextClientFuncs { - c, err := fn(ctx) - if err != nil { - return nil, err - } - if c != nil { - return c, nil - } - } - return http.DefaultClient, nil -} - -func ContextTransport(ctx context.Context) http.RoundTripper { - hc, err := ContextClient(ctx) - // This is a rare error case (somebody using nil on App Engine). - if err != nil { - return ErrorTransport{err} - } - return hc.Transport -} - -// ErrorTransport returns the specified error on RoundTrip. -// This RoundTripper should be used in rare error cases where -// error handling can be postponed to response handling time. -type ErrorTransport struct{ Err error } - -func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) { - return nil, t.Err -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/jws/jws.go deleted file mode 100644 index b46edb27..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/jws/jws.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package jws provides encoding and decoding utilities for -// signed JWS messages. -package jws - -import ( - "bytes" - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "strings" - "time" -) - -// ClaimSet contains information about the JWT signature including the -// permissions being requested (scopes), the target of the token, the issuer, -// the time the token was issued, and the lifetime of the token. -type ClaimSet struct { - Iss string `json:"iss"` // email address of the client_id of the application making the access token request - Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests - Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). - Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) - Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) - Typ string `json:"typ,omitempty"` // token type (Optional). - - // Email for which the application is requesting delegated access (Optional). - Sub string `json:"sub,omitempty"` - - // The old name of Sub. Client keeps setting Prn to be - // complaint with legacy OAuth 2.0 providers. (Optional) - Prn string `json:"prn,omitempty"` - - // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 - // This array is marshalled using custom code (see (c *ClaimSet) encode()). - PrivateClaims map[string]interface{} `json:"-"` -} - -func (c *ClaimSet) encode() (string, error) { - // Reverting time back for machines whose time is not perfectly in sync. - // If client machine's time is in the future according - // to Google servers, an access token will not be issued. - now := time.Now().Add(-10 * time.Second) - if c.Iat == 0 { - c.Iat = now.Unix() - } - if c.Exp == 0 { - c.Exp = now.Add(time.Hour).Unix() - } - if c.Exp < c.Iat { - return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) - } - - b, err := json.Marshal(c) - if err != nil { - return "", err - } - - if len(c.PrivateClaims) == 0 { - return base64Encode(b), nil - } - - // Marshal private claim set and then append it to b. - prv, err := json.Marshal(c.PrivateClaims) - if err != nil { - return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) - } - - // Concatenate public and private claim JSON objects. - if !bytes.HasSuffix(b, []byte{'}'}) { - return "", fmt.Errorf("jws: invalid JSON %s", b) - } - if !bytes.HasPrefix(prv, []byte{'{'}) { - return "", fmt.Errorf("jws: invalid JSON %s", prv) - } - b[len(b)-1] = ',' // Replace closing curly brace with a comma. - b = append(b, prv[1:]...) // Append private claims. - return base64Encode(b), nil -} - -// Header represents the header for the signed JWS payloads. -type Header struct { - // The algorithm used for signature. - Algorithm string `json:"alg"` - - // Represents the token type. - Typ string `json:"typ"` -} - -func (h *Header) encode() (string, error) { - b, err := json.Marshal(h) - if err != nil { - return "", err - } - return base64Encode(b), nil -} - -// Decode decodes a claim set from a JWS payload. -func Decode(payload string) (*ClaimSet, error) { - // decode returned id token to get expiry - s := strings.Split(payload, ".") - if len(s) < 2 { - // TODO(jbd): Provide more context about the error. - return nil, errors.New("jws: invalid token received") - } - decoded, err := base64Decode(s[1]) - if err != nil { - return nil, err - } - c := &ClaimSet{} - err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) - return c, err -} - -// Signer returns a signature for the given data. -type Signer func(data []byte) (sig []byte, err error) - -// EncodeWithSigner encodes a header and claim set with the provided signer. -func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { - head, err := header.encode() - if err != nil { - return "", err - } - cs, err := c.encode() - if err != nil { - return "", err - } - ss := fmt.Sprintf("%s.%s", head, cs) - sig, err := sg([]byte(ss)) - if err != nil { - return "", err - } - return fmt.Sprintf("%s.%s", ss, base64Encode(sig)), nil -} - -// Encode encodes a signed JWS with provided header and claim set. -// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. -func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { - sg := func(data []byte) (sig []byte, err error) { - h := sha256.New() - h.Write([]byte(data)) - return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) - } - return EncodeWithSigner(header, c, sg) -} - -// base64Encode returns and Base64url encoded version of the input string with any -// trailing "=" stripped. -func base64Encode(b []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") -} - -// base64Decode decodes the Base64url encoded string -func base64Decode(s string) ([]byte, error) { - // add back missing padding - switch len(s) % 4 { - case 1: - s += "===" - case 2: - s += "==" - case 3: - s += "=" - } - return base64.URLEncoding.DecodeString(s) -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/jwt/jwt.go deleted file mode 100644 index 2ffad21a..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/jwt/jwt.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly -// known as "two-legged OAuth 2.0". -// -// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 -package jwt - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/internal" - "golang.org/x/oauth2/jws" -) - -var ( - defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" - defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} -) - -// Config is the configuration for using JWT to fetch tokens, -// commonly known as "two-legged OAuth 2.0". -type Config struct { - // Email is the OAuth client identifier used when communicating with - // the configured OAuth provider. - Email string - - // PrivateKey contains the contents of an RSA private key or the - // contents of a PEM file that contains a private key. The provided - // private key is used to sign JWT payloads. - // PEM containers with a passphrase are not supported. - // Use the following command to convert a PKCS 12 file into a PEM. - // - // $ openssl pkcs12 -in key.p12 -out key.pem -nodes - // - PrivateKey []byte - - // Subject is the optional user to impersonate. - Subject string - - // Scopes optionally specifies a list of requested permission scopes. - Scopes []string - - // TokenURL is the endpoint required to complete the 2-legged JWT flow. - TokenURL string - - // Expires optionally specifies how long the token is valid for. - Expires time.Duration -} - -// TokenSource returns a JWT TokenSource using the configuration -// in c and the HTTP client from the provided context. -func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) -} - -// Client returns an HTTP client wrapping the context's -// HTTP transport and adding Authorization headers with tokens -// obtained from c. -// -// The returned client and its Transport should not be modified. -func (c *Config) Client(ctx context.Context) *http.Client { - return oauth2.NewClient(ctx, c.TokenSource(ctx)) -} - -// jwtSource is a source that always does a signed JWT request for a token. -// It should typically be wrapped with a reuseTokenSource. -type jwtSource struct { - ctx context.Context - conf *Config -} - -func (js jwtSource) Token() (*oauth2.Token, error) { - pk, err := internal.ParseKey(js.conf.PrivateKey) - if err != nil { - return nil, err - } - hc := oauth2.NewClient(js.ctx, nil) - claimSet := &jws.ClaimSet{ - Iss: js.conf.Email, - Scope: strings.Join(js.conf.Scopes, " "), - Aud: js.conf.TokenURL, - } - if subject := js.conf.Subject; subject != "" { - claimSet.Sub = subject - // prn is the old name of sub. Keep setting it - // to be compatible with legacy OAuth 2.0 providers. - claimSet.Prn = subject - } - if t := js.conf.Expires; t > 0 { - claimSet.Exp = time.Now().Add(t).Unix() - } - payload, err := jws.Encode(defaultHeader, claimSet, pk) - if err != nil { - return nil, err - } - v := url.Values{} - v.Set("grant_type", defaultGrantType) - v.Set("assertion", payload) - resp, err := hc.PostForm(js.conf.TokenURL, v) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - if c := resp.StatusCode; c < 200 || c > 299 { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body) - } - // tokenRes is the JSON response body. - var tokenRes struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - IDToken string `json:"id_token"` - ExpiresIn int64 `json:"expires_in"` // relative seconds from now - } - if err := json.Unmarshal(body, &tokenRes); err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - token := &oauth2.Token{ - AccessToken: tokenRes.AccessToken, - TokenType: tokenRes.TokenType, - } - raw := make(map[string]interface{}) - json.Unmarshal(body, &raw) // no error checks for optional fields - token = token.WithExtra(raw) - - if secs := tokenRes.ExpiresIn; secs > 0 { - token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) - } - if v := tokenRes.IDToken; v != "" { - // decode returned id token to get expiry - claimSet, err := jws.Decode(v) - if err != nil { - return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) - } - token.Expiry = time.Unix(claimSet.Exp, 0) - } - return token, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/oauth2.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/oauth2.go deleted file mode 100644 index 9b7b977d..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/oauth2.go +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package oauth2 provides support for making -// OAuth2 authorized and authenticated HTTP requests. -// It can additionally grant authorization with Bearer JWT. -package oauth2 - -import ( - "bytes" - "errors" - "net/http" - "net/url" - "strings" - "sync" - - "golang.org/x/net/context" - "golang.org/x/oauth2/internal" -) - -// NoContext is the default context you should supply if not using -// your own context.Context (see https://golang.org/x/net/context). -var NoContext = context.TODO() - -// RegisterBrokenAuthHeaderProvider registers an OAuth2 server -// identified by the tokenURL prefix as an OAuth2 implementation -// which doesn't support the HTTP Basic authentication -// scheme to authenticate with the authorization server. -// Once a server is registered, credentials (client_id and client_secret) -// will be passed as query parameters rather than being present -// in the Authorization header. -// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. -func RegisterBrokenAuthHeaderProvider(tokenURL string) { - internal.RegisterBrokenAuthHeaderProvider(tokenURL) -} - -// Config describes a typical 3-legged OAuth2 flow, with both the -// client application information and the server's endpoint URLs. -type Config struct { - // ClientID is the application's ID. - ClientID string - - // ClientSecret is the application's secret. - ClientSecret string - - // Endpoint contains the resource server's token endpoint - // URLs. These are constants specific to each server and are - // often available via site-specific packages, such as - // google.Endpoint or github.Endpoint. - Endpoint Endpoint - - // RedirectURL is the URL to redirect users going through - // the OAuth flow, after the resource owner's URLs. - RedirectURL string - - // Scope specifies optional requested permissions. - Scopes []string -} - -// A TokenSource is anything that can return a token. -type TokenSource interface { - // Token returns a token or an error. - // Token must be safe for concurrent use by multiple goroutines. - // The returned Token must not be modified. - Token() (*Token, error) -} - -// Endpoint contains the OAuth 2.0 provider's authorization and token -// endpoint URLs. -type Endpoint struct { - AuthURL string - TokenURL string -} - -var ( - // AccessTypeOnline and AccessTypeOffline are options passed - // to the Options.AuthCodeURL method. They modify the - // "access_type" field that gets sent in the URL returned by - // AuthCodeURL. - // - // Online is the default if neither is specified. If your - // application needs to refresh access tokens when the user - // is not present at the browser, then use offline. This will - // result in your application obtaining a refresh token the - // first time your application exchanges an authorization - // code for a user. - AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") - AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") - - // ApprovalForce forces the users to view the consent dialog - // and confirm the permissions request at the URL returned - // from AuthCodeURL, even if they've already done so. - ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") -) - -// An AuthCodeOption is passed to Config.AuthCodeURL. -type AuthCodeOption interface { - setValue(url.Values) -} - -type setParam struct{ k, v string } - -func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } - -// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters -// to a provider's authorization endpoint. -func SetAuthURLParam(key, value string) AuthCodeOption { - return setParam{key, value} -} - -// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page -// that asks for permissions for the required scopes explicitly. -// -// State is a token to protect the user from CSRF attacks. You must -// always provide a non-zero string and validate that it matches the -// the state query parameter on your redirect callback. -// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. -// -// Opts may include AccessTypeOnline or AccessTypeOffline, as well -// as ApprovalForce. -func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { - var buf bytes.Buffer - buf.WriteString(c.Endpoint.AuthURL) - v := url.Values{ - "response_type": {"code"}, - "client_id": {c.ClientID}, - "redirect_uri": internal.CondVal(c.RedirectURL), - "scope": internal.CondVal(strings.Join(c.Scopes, " ")), - "state": internal.CondVal(state), - } - for _, opt := range opts { - opt.setValue(v) - } - if strings.Contains(c.Endpoint.AuthURL, "?") { - buf.WriteByte('&') - } else { - buf.WriteByte('?') - } - buf.WriteString(v.Encode()) - return buf.String() -} - -// PasswordCredentialsToken converts a resource owner username and password -// pair into a token. -// -// Per the RFC, this grant type should only be used "when there is a high -// degree of trust between the resource owner and the client (e.g., the client -// is part of the device operating system or a highly privileged application), -// and when other authorization grant types are not available." -// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. -// -// The HTTP client to use is derived from the context. -// If nil, http.DefaultClient is used. -func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { - return retrieveToken(ctx, c, url.Values{ - "grant_type": {"password"}, - "username": {username}, - "password": {password}, - "scope": internal.CondVal(strings.Join(c.Scopes, " ")), - }) -} - -// Exchange converts an authorization code into a token. -// -// It is used after a resource provider redirects the user back -// to the Redirect URI (the URL obtained from AuthCodeURL). -// -// The HTTP client to use is derived from the context. -// If a client is not provided via the context, http.DefaultClient is used. -// -// The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state"). -func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { - return retrieveToken(ctx, c, url.Values{ - "grant_type": {"authorization_code"}, - "code": {code}, - "redirect_uri": internal.CondVal(c.RedirectURL), - "scope": internal.CondVal(strings.Join(c.Scopes, " ")), - }) -} - -// Client returns an HTTP client using the provided token. -// The token will auto-refresh as necessary. The underlying -// HTTP transport will be obtained using the provided context. -// The returned client and its Transport should not be modified. -func (c *Config) Client(ctx context.Context, t *Token) *http.Client { - return NewClient(ctx, c.TokenSource(ctx, t)) -} - -// TokenSource returns a TokenSource that returns t until t expires, -// automatically refreshing it as necessary using the provided context. -// -// Most users will use Config.Client instead. -func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { - tkr := &tokenRefresher{ - ctx: ctx, - conf: c, - } - if t != nil { - tkr.refreshToken = t.RefreshToken - } - return &reuseTokenSource{ - t: t, - new: tkr, - } -} - -// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" -// HTTP requests to renew a token using a RefreshToken. -type tokenRefresher struct { - ctx context.Context // used to get HTTP requests - conf *Config - refreshToken string -} - -// WARNING: Token is not safe for concurrent access, as it -// updates the tokenRefresher's refreshToken field. -// Within this package, it is used by reuseTokenSource which -// synchronizes calls to this method with its own mutex. -func (tf *tokenRefresher) Token() (*Token, error) { - if tf.refreshToken == "" { - return nil, errors.New("oauth2: token expired and refresh token is not set") - } - - tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ - "grant_type": {"refresh_token"}, - "refresh_token": {tf.refreshToken}, - }) - - if err != nil { - return nil, err - } - if tf.refreshToken != tk.RefreshToken { - tf.refreshToken = tk.RefreshToken - } - return tk, err -} - -// reuseTokenSource is a TokenSource that holds a single token in memory -// and validates its expiry before each call to retrieve it with -// Token. If it's expired, it will be auto-refreshed using the -// new TokenSource. -type reuseTokenSource struct { - new TokenSource // called when t is expired. - - mu sync.Mutex // guards t - t *Token -} - -// Token returns the current token if it's still valid, else will -// refresh the current token (using r.Context for HTTP client -// information) and return the new one. -func (s *reuseTokenSource) Token() (*Token, error) { - s.mu.Lock() - defer s.mu.Unlock() - if s.t.Valid() { - return s.t, nil - } - t, err := s.new.Token() - if err != nil { - return nil, err - } - s.t = t - return t, nil -} - -// StaticTokenSource returns a TokenSource that always returns the same token. -// Because the provided token t is never refreshed, StaticTokenSource is only -// useful for tokens that never expire. -func StaticTokenSource(t *Token) TokenSource { - return staticTokenSource{t} -} - -// staticTokenSource is a TokenSource that always returns the same Token. -type staticTokenSource struct { - t *Token -} - -func (s staticTokenSource) Token() (*Token, error) { - return s.t, nil -} - -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. -var HTTPClient internal.ContextKey - -// NewClient creates an *http.Client from a Context and TokenSource. -// The returned client is not valid beyond the lifetime of the context. -// -// As a special case, if src is nil, a non-OAuth2 client is returned -// using the provided context. This exists to support related OAuth2 -// packages. -func NewClient(ctx context.Context, src TokenSource) *http.Client { - if src == nil { - c, err := internal.ContextClient(ctx) - if err != nil { - return &http.Client{Transport: internal.ErrorTransport{err}} - } - return c - } - return &http.Client{ - Transport: &Transport{ - Base: internal.ContextTransport(ctx), - Source: ReuseTokenSource(nil, src), - }, - } -} - -// ReuseTokenSource returns a TokenSource which repeatedly returns the -// same token as long as it's valid, starting with t. -// When its cached token is invalid, a new token is obtained from src. -// -// ReuseTokenSource is typically used to reuse tokens from a cache -// (such as a file on disk) between runs of a program, rather than -// obtaining new tokens unnecessarily. -// -// The initial token t may be nil, in which case the TokenSource is -// wrapped in a caching version if it isn't one already. This also -// means it's always safe to wrap ReuseTokenSource around any other -// TokenSource without adverse effects. -func ReuseTokenSource(t *Token, src TokenSource) TokenSource { - // Don't wrap a reuseTokenSource in itself. That would work, - // but cause an unnecessary number of mutex operations. - // Just build the equivalent one. - if rt, ok := src.(*reuseTokenSource); ok { - if t == nil { - // Just use it directly. - return rt - } - src = rt.new - } - return &reuseTokenSource{ - t: t, - new: src, - } -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/token.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/token.go deleted file mode 100644 index 7a3167f1..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/token.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package oauth2 - -import ( - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2/internal" -) - -// expiryDelta determines how earlier a token should be considered -// expired than its actual expiration time. It is used to avoid late -// expirations due to client-server time mismatches. -const expiryDelta = 10 * time.Second - -// Token represents the crendentials used to authorize -// the requests to access protected resources on the OAuth 2.0 -// provider's backend. -// -// Most users of this package should not access fields of Token -// directly. They're exported mostly for use by related packages -// implementing derivative OAuth2 flows. -type Token struct { - // AccessToken is the token that authorizes and authenticates - // the requests. - AccessToken string `json:"access_token"` - - // TokenType is the type of token. - // The Type method returns either this or "Bearer", the default. - TokenType string `json:"token_type,omitempty"` - - // RefreshToken is a token that's used by the application - // (as opposed to the user) to refresh the access token - // if it expires. - RefreshToken string `json:"refresh_token,omitempty"` - - // Expiry is the optional expiration time of the access token. - // - // If zero, TokenSource implementations will reuse the same - // token forever and RefreshToken or equivalent - // mechanisms for that TokenSource will not be used. - Expiry time.Time `json:"expiry,omitempty"` - - // raw optionally contains extra metadata from the server - // when updating a token. - raw interface{} -} - -// Type returns t.TokenType if non-empty, else "Bearer". -func (t *Token) Type() string { - if strings.EqualFold(t.TokenType, "bearer") { - return "Bearer" - } - if strings.EqualFold(t.TokenType, "mac") { - return "MAC" - } - if strings.EqualFold(t.TokenType, "basic") { - return "Basic" - } - if t.TokenType != "" { - return t.TokenType - } - return "Bearer" -} - -// SetAuthHeader sets the Authorization header to r using the access -// token in t. -// -// This method is unnecessary when using Transport or an HTTP Client -// returned by this package. -func (t *Token) SetAuthHeader(r *http.Request) { - r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) -} - -// WithExtra returns a new Token that's a clone of t, but using the -// provided raw extra map. This is only intended for use by packages -// implementing derivative OAuth2 flows. -func (t *Token) WithExtra(extra interface{}) *Token { - t2 := new(Token) - *t2 = *t - t2.raw = extra - return t2 -} - -// Extra returns an extra field. -// Extra fields are key-value pairs returned by the server as a -// part of the token retrieval response. -func (t *Token) Extra(key string) interface{} { - if raw, ok := t.raw.(map[string]interface{}); ok { - return raw[key] - } - - vals, ok := t.raw.(url.Values) - if !ok { - return nil - } - - v := vals.Get(key) - switch s := strings.TrimSpace(v); strings.Count(s, ".") { - case 0: // Contains no "."; try to parse as int - if i, err := strconv.ParseInt(s, 10, 64); err == nil { - return i - } - case 1: // Contains a single "."; try to parse as float - if f, err := strconv.ParseFloat(s, 64); err == nil { - return f - } - } - - return v -} - -// expired reports whether the token is expired. -// t must be non-nil. -func (t *Token) expired() bool { - if t.Expiry.IsZero() { - return false - } - return t.Expiry.Add(-expiryDelta).Before(time.Now()) -} - -// Valid reports whether t is non-nil, has an AccessToken, and is not expired. -func (t *Token) Valid() bool { - return t != nil && t.AccessToken != "" && !t.expired() -} - -// tokenFromInternal maps an *internal.Token struct into -// a *Token struct. -func tokenFromInternal(t *internal.Token) *Token { - if t == nil { - return nil - } - return &Token{ - AccessToken: t.AccessToken, - TokenType: t.TokenType, - RefreshToken: t.RefreshToken, - Expiry: t.Expiry, - raw: t.Raw, - } -} - -// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. -// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along -// with an error.. -func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { - tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) - if err != nil { - return nil, err - } - return tokenFromInternal(tk), nil -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/transport.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/transport.go deleted file mode 100644 index 92ac7e25..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/oauth2/transport.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package oauth2 - -import ( - "errors" - "io" - "net/http" - "sync" -) - -// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, -// wrapping a base RoundTripper and adding an Authorization header -// with a token from the supplied Sources. -// -// Transport is a low-level mechanism. Most code will use the -// higher-level Config.Client method instead. -type Transport struct { - // Source supplies the token to add to outgoing requests' - // Authorization headers. - Source TokenSource - - // Base is the base RoundTripper used to make HTTP requests. - // If nil, http.DefaultTransport is used. - Base http.RoundTripper - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// RoundTrip authorizes and authenticates the request with an -// access token. If no token exists or token is expired, -// tries to refresh/fetch a new token. -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - if t.Source == nil { - return nil, errors.New("oauth2: Transport's Source is nil") - } - token, err := t.Source.Token() - if err != nil { - return nil, err - } - - req2 := cloneRequest(req) // per RoundTripper contract - token.SetAuthHeader(req2) - t.setModReq(req, req2) - res, err := t.base().RoundTrip(req2) - if err != nil { - t.setModReq(req, nil) - return nil, err - } - res.Body = &onEOFReader{ - rc: res.Body, - fn: func() { t.setModReq(req, nil) }, - } - return res, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *Transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - t.mu.Lock() - modReq := t.modReq[req] - delete(t.modReq, req) - t.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func (t *Transport) base() http.RoundTripper { - if t.Base != nil { - return t.Base - } - return http.DefaultTransport -} - -func (t *Transport) setModReq(orig, mod *http.Request) { - t.mu.Lock() - defer t.mu.Unlock() - if t.modReq == nil { - t.modReq = make(map[*http.Request]*http.Request) - } - if mod == nil { - delete(t.modReq, orig) - } else { - t.modReq[orig] = mod - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - return r2 -} - -type onEOFReader struct { - rc io.ReadCloser - fn func() -} - -func (r *onEOFReader) Read(p []byte) (n int, err error) { - n, err = r.rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -func (r *onEOFReader) Close() error { - err := r.rc.Close() - r.runFunc() - return err -} - -func (r *onEOFReader) runFunc() { - if fn := r.fn; fn != nil { - fn() - r.fn = nil - } -} diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/time/LICENSE b/vendor/github.com/docker/distribution/vendor/golang.org/x/time/LICENSE deleted file mode 100644 index 6a66aea5..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/time/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/time/PATENTS b/vendor/github.com/docker/distribution/vendor/golang.org/x/time/PATENTS deleted file mode 100644 index 73309904..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/time/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/github.com/docker/distribution/vendor/golang.org/x/time/rate/rate.go b/vendor/github.com/docker/distribution/vendor/golang.org/x/time/rate/rate.go deleted file mode 100644 index 2131b921..00000000 --- a/vendor/github.com/docker/distribution/vendor/golang.org/x/time/rate/rate.go +++ /dev/null @@ -1,368 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package rate provides a rate limiter. -package rate - -import ( - "fmt" - "math" - "sync" - "time" - - "golang.org/x/net/context" -) - -// Limit defines the maximum frequency of some events. -// Limit is represented as number of events per second. -// A zero Limit allows no events. -type Limit float64 - -// Inf is the infinite rate limit; it allows all events (even if burst is zero). -const Inf = Limit(math.MaxFloat64) - -// Every converts a minimum time interval between events to a Limit. -func Every(interval time.Duration) Limit { - if interval <= 0 { - return Inf - } - return 1 / Limit(interval.Seconds()) -} - -// A Limiter controls how frequently events are allowed to happen. -// It implements a "token bucket" of size b, initially full and refilled -// at rate r tokens per second. -// Informally, in any large enough time interval, the Limiter limits the -// rate to r tokens per second, with a maximum burst size of b events. -// As a special case, if r == Inf (the infinite rate), b is ignored. -// See https://en.wikipedia.org/wiki/Token_bucket for more about token buckets. -// -// The zero value is a valid Limiter, but it will reject all events. -// Use NewLimiter to create non-zero Limiters. -// -// Limiter has three main methods, Allow, Reserve, and Wait. -// Most callers should use Wait. -// -// Each of the three methods consumes a single token. -// They differ in their behavior when no token is available. -// If no token is available, Allow returns false. -// If no token is available, Reserve returns a reservation for a future token -// and the amount of time the caller must wait before using it. -// If no token is available, Wait blocks until one can be obtained -// or its associated context.Context is canceled. -// -// The methods AllowN, ReserveN, and WaitN consume n tokens. -type Limiter struct { - limit Limit - burst int - - mu sync.Mutex - tokens float64 - // last is the last time the limiter's tokens field was updated - last time.Time - // lastEvent is the latest time of a rate-limited event (past or future) - lastEvent time.Time -} - -// Limit returns the maximum overall event rate. -func (lim *Limiter) Limit() Limit { - lim.mu.Lock() - defer lim.mu.Unlock() - return lim.limit -} - -// Burst returns the maximum burst size. Burst is the maximum number of tokens -// that can be consumed in a single call to Allow, Reserve, or Wait, so higher -// Burst values allow more events to happen at once. -// A zero Burst allows no events, unless limit == Inf. -func (lim *Limiter) Burst() int { - return lim.burst -} - -// NewLimiter returns a new Limiter that allows events up to rate r and permits -// bursts of at most b tokens. -func NewLimiter(r Limit, b int) *Limiter { - return &Limiter{ - limit: r, - burst: b, - } -} - -// Allow is shorthand for AllowN(time.Now(), 1). -func (lim *Limiter) Allow() bool { - return lim.AllowN(time.Now(), 1) -} - -// AllowN reports whether n events may happen at time now. -// Use this method if you intend to drop / skip events that exceed the rate limit. -// Otherwise use Reserve or Wait. -func (lim *Limiter) AllowN(now time.Time, n int) bool { - return lim.reserveN(now, n, 0).ok -} - -// A Reservation holds information about events that are permitted by a Limiter to happen after a delay. -// A Reservation may be canceled, which may enable the Limiter to permit additional events. -type Reservation struct { - ok bool - lim *Limiter - tokens int - timeToAct time.Time - // This is the Limit at reservation time, it can change later. - limit Limit -} - -// OK returns whether the limiter can provide the requested number of tokens -// within the maximum wait time. If OK is false, Delay returns InfDuration, and -// Cancel does nothing. -func (r *Reservation) OK() bool { - return r.ok -} - -// Delay is shorthand for DelayFrom(time.Now()). -func (r *Reservation) Delay() time.Duration { - return r.DelayFrom(time.Now()) -} - -// InfDuration is the duration returned by Delay when a Reservation is not OK. -const InfDuration = time.Duration(1<<63 - 1) - -// DelayFrom returns the duration for which the reservation holder must wait -// before taking the reserved action. Zero duration means act immediately. -// InfDuration means the limiter cannot grant the tokens requested in this -// Reservation within the maximum wait time. -func (r *Reservation) DelayFrom(now time.Time) time.Duration { - if !r.ok { - return InfDuration - } - delay := r.timeToAct.Sub(now) - if delay < 0 { - return 0 - } - return delay -} - -// Cancel is shorthand for CancelAt(time.Now()). -func (r *Reservation) Cancel() { - r.CancelAt(time.Now()) - return -} - -// CancelAt indicates that the reservation holder will not perform the reserved action -// and reverses the effects of this Reservation on the rate limit as much as possible, -// considering that other reservations may have already been made. -func (r *Reservation) CancelAt(now time.Time) { - if !r.ok { - return - } - - r.lim.mu.Lock() - defer r.lim.mu.Unlock() - - if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) { - return - } - - // calculate tokens to restore - // The duration between lim.lastEvent and r.timeToAct tells us how many tokens were reserved - // after r was obtained. These tokens should not be restored. - restoreTokens := float64(r.tokens) - r.limit.tokensFromDuration(r.lim.lastEvent.Sub(r.timeToAct)) - if restoreTokens <= 0 { - return - } - // advance time to now - now, _, tokens := r.lim.advance(now) - // calculate new number of tokens - tokens += restoreTokens - if burst := float64(r.lim.burst); tokens > burst { - tokens = burst - } - // update state - r.lim.last = now - r.lim.tokens = tokens - if r.timeToAct == r.lim.lastEvent { - prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) - if !prevEvent.Before(now) { - r.lim.lastEvent = prevEvent - } - } - - return -} - -// Reserve is shorthand for ReserveN(time.Now(), 1). -func (lim *Limiter) Reserve() *Reservation { - return lim.ReserveN(time.Now(), 1) -} - -// ReserveN returns a Reservation that indicates how long the caller must wait before n events happen. -// The Limiter takes this Reservation into account when allowing future events. -// ReserveN returns false if n exceeds the Limiter's burst size. -// Usage example: -// r, ok := lim.ReserveN(time.Now(), 1) -// if !ok { -// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? -// } -// time.Sleep(r.Delay()) -// Act() -// Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. -// If you need to respect a deadline or cancel the delay, use Wait instead. -// To drop or skip events exceeding rate limit, use Allow instead. -func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { - r := lim.reserveN(now, n, InfDuration) - return &r -} - -// Wait is shorthand for WaitN(ctx, 1). -func (lim *Limiter) Wait(ctx context.Context) (err error) { - return lim.WaitN(ctx, 1) -} - -// WaitN blocks until lim permits n events to happen. -// It returns an error if n exceeds the Limiter's burst size, the Context is -// canceled, or the expected wait time exceeds the Context's Deadline. -func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { - if n > lim.burst { - return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst) - } - // Check if ctx is already cancelled - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - // Determine wait limit - now := time.Now() - waitLimit := InfDuration - if deadline, ok := ctx.Deadline(); ok { - waitLimit = deadline.Sub(now) - } - // Reserve - r := lim.reserveN(now, n, waitLimit) - if !r.ok { - return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) - } - // Wait - t := time.NewTimer(r.DelayFrom(now)) - defer t.Stop() - select { - case <-t.C: - // We can proceed. - return nil - case <-ctx.Done(): - // Context was canceled before we could proceed. Cancel the - // reservation, which may permit other events to proceed sooner. - r.Cancel() - return ctx.Err() - } -} - -// SetLimit is shorthand for SetLimitAt(time.Now(), newLimit). -func (lim *Limiter) SetLimit(newLimit Limit) { - lim.SetLimitAt(time.Now(), newLimit) -} - -// SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated -// or underutilized by those which reserved (using Reserve or Wait) but did not yet act -// before SetLimitAt was called. -func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { - lim.mu.Lock() - defer lim.mu.Unlock() - - now, _, tokens := lim.advance(now) - - lim.last = now - lim.tokens = tokens - lim.limit = newLimit -} - -// reserveN is a helper method for AllowN, ReserveN, and WaitN. -// maxFutureReserve specifies the maximum reservation wait duration allowed. -// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. -func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation { - lim.mu.Lock() - defer lim.mu.Unlock() - - if lim.limit == Inf { - return Reservation{ - ok: true, - lim: lim, - tokens: n, - timeToAct: now, - } - } - - now, last, tokens := lim.advance(now) - - // Calculate the remaining number of tokens resulting from the request. - tokens -= float64(n) - - // Calculate the wait duration - var waitDuration time.Duration - if tokens < 0 { - waitDuration = lim.limit.durationFromTokens(-tokens) - } - - // Decide result - ok := n <= lim.burst && waitDuration <= maxFutureReserve - - // Prepare reservation - r := Reservation{ - ok: ok, - lim: lim, - limit: lim.limit, - } - if ok { - r.tokens = n - r.timeToAct = now.Add(waitDuration) - } - - // Update state - if ok { - lim.last = now - lim.tokens = tokens - lim.lastEvent = r.timeToAct - } else { - lim.last = last - } - - return r -} - -// advance calculates and returns an updated state for lim resulting from the passage of time. -// lim is not changed. -func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { - last := lim.last - if now.Before(last) { - last = now - } - - // Avoid making delta overflow below when last is very old. - maxElapsed := lim.limit.durationFromTokens(float64(lim.burst) - lim.tokens) - elapsed := now.Sub(last) - if elapsed > maxElapsed { - elapsed = maxElapsed - } - - // Calculate the new number of tokens, due to time that passed. - delta := lim.limit.tokensFromDuration(elapsed) - tokens := lim.tokens + delta - if burst := float64(lim.burst); tokens > burst { - tokens = burst - } - - return now, last, tokens -} - -// durationFromTokens is a unit conversion function from the number of tokens to the duration -// of time it takes to accumulate them at a rate of limit tokens per second. -func (limit Limit) durationFromTokens(tokens float64) time.Duration { - seconds := tokens / float64(limit) - return time.Nanosecond * time.Duration(1e9*seconds) -} - -// tokensFromDuration is a unit conversion function from a time duration to the number of tokens -// which could be accumulated during that duration at a rate of limit tokens per second. -func (limit Limit) tokensFromDuration(d time.Duration) float64 { - return d.Seconds() * float64(limit) -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/LICENSE b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/LICENSE deleted file mode 100644 index 263aa7a0..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/backoff.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/backoff.go deleted file mode 100644 index 13561404..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/backoff.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "math/rand" - "time" -) - -type BackoffStrategy interface { - // Pause returns the duration of the next pause and true if the operation should be - // retried, or false if no further retries should be attempted. - Pause() (time.Duration, bool) - - // Reset restores the strategy to its initial state. - Reset() -} - -// ExponentialBackoff performs exponential backoff as per https://en.wikipedia.org/wiki/Exponential_backoff. -// The initial pause time is given by Base. -// Once the total pause time exceeds Max, Pause will indicate no further retries. -type ExponentialBackoff struct { - Base time.Duration - Max time.Duration - total time.Duration - n uint -} - -func (eb *ExponentialBackoff) Pause() (time.Duration, bool) { - if eb.total > eb.Max { - return 0, false - } - - // The next pause is selected from randomly from [0, 2^n * Base). - d := time.Duration(rand.Int63n((1 << eb.n) * int64(eb.Base))) - eb.total += d - eb.n++ - return d, true -} - -func (eb *ExponentialBackoff) Reset() { - eb.n = 0 - eb.total = 0 -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/buffer.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/buffer.go deleted file mode 100644 index 4b8ec142..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/buffer.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "bytes" - "io" - - "google.golang.org/api/googleapi" -) - -// ResumableBuffer buffers data from an io.Reader to support uploading media in retryable chunks. -type ResumableBuffer struct { - media io.Reader - - chunk []byte // The current chunk which is pending upload. The capacity is the chunk size. - err error // Any error generated when populating chunk by reading media. - - // The absolute position of chunk in the underlying media. - off int64 -} - -func NewResumableBuffer(media io.Reader, chunkSize int) *ResumableBuffer { - return &ResumableBuffer{media: media, chunk: make([]byte, 0, chunkSize)} -} - -// Chunk returns the current buffered chunk, the offset in the underlying media -// from which the chunk is drawn, and the size of the chunk. -// Successive calls to Chunk return the same chunk between calls to Next. -func (rb *ResumableBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) { - // There may already be data in chunk if Next has not been called since the previous call to Chunk. - if rb.err == nil && len(rb.chunk) == 0 { - rb.err = rb.loadChunk() - } - return bytes.NewReader(rb.chunk), rb.off, len(rb.chunk), rb.err -} - -// loadChunk will read from media into chunk, up to the capacity of chunk. -func (rb *ResumableBuffer) loadChunk() error { - bufSize := cap(rb.chunk) - rb.chunk = rb.chunk[:bufSize] - - read := 0 - var err error - for err == nil && read < bufSize { - var n int - n, err = rb.media.Read(rb.chunk[read:]) - read += n - } - rb.chunk = rb.chunk[:read] - return err -} - -// Next advances to the next chunk, which will be returned by the next call to Chunk. -// Calls to Next without a corresponding prior call to Chunk will have no effect. -func (rb *ResumableBuffer) Next() { - rb.off += int64(len(rb.chunk)) - rb.chunk = rb.chunk[0:0] -} - -type readerTyper struct { - io.Reader - googleapi.ContentTyper -} - -// ReaderAtToReader adapts a ReaderAt to be used as a Reader. -// If ra implements googleapi.ContentTyper, then the returned reader -// will also implement googleapi.ContentTyper, delegating to ra. -func ReaderAtToReader(ra io.ReaderAt, size int64) io.Reader { - r := io.NewSectionReader(ra, 0, size) - if typer, ok := ra.(googleapi.ContentTyper); ok { - return readerTyper{r, typer} - } - return r -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/doc.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/doc.go deleted file mode 100644 index 752c4b41..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package gensupport is an internal implementation detail used by code -// generated by the google-api-go-generator tool. -// -// This package may be modified at any time without regard for backwards -// compatibility. It should not be used directly by API users. -package gensupport diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/json.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/json.go deleted file mode 100644 index dd7bcd2e..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/json.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "encoding/json" - "fmt" - "reflect" - "strings" -) - -// MarshalJSON returns a JSON encoding of schema containing only selected fields. -// A field is selected if: -// * it has a non-empty value, or -// * its field name is present in forceSendFields, and -// * it is not a nil pointer or nil interface. -// The JSON key for each selected field is taken from the field's json: struct tag. -func MarshalJSON(schema interface{}, forceSendFields []string) ([]byte, error) { - if len(forceSendFields) == 0 { - return json.Marshal(schema) - } - - mustInclude := make(map[string]struct{}) - for _, f := range forceSendFields { - mustInclude[f] = struct{}{} - } - - dataMap, err := schemaToMap(schema, mustInclude) - if err != nil { - return nil, err - } - return json.Marshal(dataMap) -} - -func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[string]interface{}, error) { - m := make(map[string]interface{}) - s := reflect.ValueOf(schema) - st := s.Type() - - for i := 0; i < s.NumField(); i++ { - jsonTag := st.Field(i).Tag.Get("json") - if jsonTag == "" { - continue - } - tag, err := parseJSONTag(jsonTag) - if err != nil { - return nil, err - } - if tag.ignore { - continue - } - - v := s.Field(i) - f := st.Field(i) - if !includeField(v, f, mustInclude) { - continue - } - - // nil maps are treated as empty maps. - if f.Type.Kind() == reflect.Map && v.IsNil() { - m[tag.apiName] = map[string]string{} - continue - } - - // nil slices are treated as empty slices. - if f.Type.Kind() == reflect.Slice && v.IsNil() { - m[tag.apiName] = []bool{} - continue - } - - if tag.stringFormat { - m[tag.apiName] = formatAsString(v, f.Type.Kind()) - } else { - m[tag.apiName] = v.Interface() - } - } - return m, nil -} - -// formatAsString returns a string representation of v, dereferencing it first if possible. -func formatAsString(v reflect.Value, kind reflect.Kind) string { - if kind == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - - return fmt.Sprintf("%v", v.Interface()) -} - -// jsonTag represents a restricted version of the struct tag format used by encoding/json. -// It is used to describe the JSON encoding of fields in a Schema struct. -type jsonTag struct { - apiName string - stringFormat bool - ignore bool -} - -// parseJSONTag parses a restricted version of the struct tag format used by encoding/json. -// The format of the tag must match that generated by the Schema.writeSchemaStruct method -// in the api generator. -func parseJSONTag(val string) (jsonTag, error) { - if val == "-" { - return jsonTag{ignore: true}, nil - } - - var tag jsonTag - - i := strings.Index(val, ",") - if i == -1 || val[:i] == "" { - return tag, fmt.Errorf("malformed json tag: %s", val) - } - - tag = jsonTag{ - apiName: val[:i], - } - - switch val[i+1:] { - case "omitempty": - case "omitempty,string": - tag.stringFormat = true - default: - return tag, fmt.Errorf("malformed json tag: %s", val) - } - - return tag, nil -} - -// Reports whether the struct field "f" with value "v" should be included in JSON output. -func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]struct{}) bool { - // The regular JSON encoding of a nil pointer is "null", which means "delete this field". - // Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set. - // However, many fields are not pointers, so there would be no way to delete these fields. - // Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields. - // Deletion will be handled by a separate mechanism. - if f.Type.Kind() == reflect.Ptr && v.IsNil() { - return false - } - - // The "any" type is represented as an interface{}. If this interface - // is nil, there is no reasonable representation to send. We ignore - // these fields, for the same reasons as given above for pointers. - if f.Type.Kind() == reflect.Interface && v.IsNil() { - return false - } - - _, ok := mustInclude[f.Name] - return ok || !isEmptyValue(v) -} - -// isEmptyValue reports whether v is the empty value for its type. This -// implementation is based on that of the encoding/json package, but its -// correctness does not depend on it being identical. What's important is that -// this function return false in situations where v should not be sent as part -// of a PATCH operation. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/media.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/media.go deleted file mode 100644 index 817f46f5..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/media.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "fmt" - "io" - "io/ioutil" - "mime/multipart" - "net/http" - "net/textproto" - - "google.golang.org/api/googleapi" -) - -const sniffBuffSize = 512 - -func newContentSniffer(r io.Reader) *contentSniffer { - return &contentSniffer{r: r} -} - -// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. -type contentSniffer struct { - r io.Reader - start []byte // buffer for the sniffed bytes. - err error // set to any error encountered while reading bytes to be sniffed. - - ctype string // set on first sniff. - sniffed bool // set to true on first sniff. -} - -func (cs *contentSniffer) Read(p []byte) (n int, err error) { - // Ensure that the content type is sniffed before any data is consumed from Reader. - _, _ = cs.ContentType() - - if len(cs.start) > 0 { - n := copy(p, cs.start) - cs.start = cs.start[n:] - return n, nil - } - - // We may have read some bytes into start while sniffing, even if the read ended in an error. - // We should first return those bytes, then the error. - if cs.err != nil { - return 0, cs.err - } - - // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. - return cs.r.Read(p) -} - -// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed. -func (cs *contentSniffer) ContentType() (string, bool) { - if cs.sniffed { - return cs.ctype, cs.ctype != "" - } - cs.sniffed = true - // If ReadAll hits EOF, it returns err==nil. - cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) - - // Don't try to detect the content type based on possibly incomplete data. - if cs.err != nil { - return "", false - } - - cs.ctype = http.DetectContentType(cs.start) - return cs.ctype, true -} - -// DetermineContentType determines the content type of the supplied reader. -// If the content type is already known, it can be specified via ctype. -// Otherwise, the content of media will be sniffed to determine the content type. -// If media implements googleapi.ContentTyper (deprecated), this will be used -// instead of sniffing the content. -// After calling DetectContentType the caller must not perform further reads on -// media, but rather read from the Reader that is returned. -func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) { - // Note: callers could avoid calling DetectContentType if ctype != "", - // but doing the check inside this function reduces the amount of - // generated code. - if ctype != "" { - return media, ctype - } - - // For backwards compatability, allow clients to set content - // type by providing a ContentTyper for media. - if typer, ok := media.(googleapi.ContentTyper); ok { - return media, typer.ContentType() - } - - sniffer := newContentSniffer(media) - if ctype, ok := sniffer.ContentType(); ok { - return sniffer, ctype - } - // If content type could not be sniffed, reads from sniffer will eventually fail with an error. - return sniffer, "" -} - -type typeReader struct { - io.Reader - typ string -} - -// multipartReader combines the contents of multiple readers to creat a multipart/related HTTP body. -// Close must be called if reads from the multipartReader are abandoned before reaching EOF. -type multipartReader struct { - pr *io.PipeReader - pipeOpen bool - ctype string -} - -func newMultipartReader(parts []typeReader) *multipartReader { - mp := &multipartReader{pipeOpen: true} - var pw *io.PipeWriter - mp.pr, pw = io.Pipe() - mpw := multipart.NewWriter(pw) - mp.ctype = "multipart/related; boundary=" + mpw.Boundary() - go func() { - for _, part := range parts { - w, err := mpw.CreatePart(typeHeader(part.typ)) - if err != nil { - mpw.Close() - pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err)) - return - } - _, err = io.Copy(w, part.Reader) - if err != nil { - mpw.Close() - pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err)) - return - } - } - - mpw.Close() - pw.Close() - }() - return mp -} - -func (mp *multipartReader) Read(data []byte) (n int, err error) { - return mp.pr.Read(data) -} - -func (mp *multipartReader) Close() error { - if !mp.pipeOpen { - return nil - } - mp.pipeOpen = false - return mp.pr.Close() -} - -// CombineBodyMedia combines a json body with media content to create a multipart/related HTTP body. -// It returns a ReadCloser containing the combined body, and the overall "multipart/related" content type, with random boundary. -// -// The caller must call Close on the returned ReadCloser if reads are abandoned before reaching EOF. -func CombineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType string) (io.ReadCloser, string) { - mp := newMultipartReader([]typeReader{ - {body, bodyContentType}, - {media, mediaContentType}, - }) - return mp, mp.ctype -} - -func typeHeader(contentType string) textproto.MIMEHeader { - h := make(textproto.MIMEHeader) - if contentType != "" { - h.Set("Content-Type", contentType) - } - return h -} - -// PrepareUpload determines whether the data in the supplied reader should be -// uploaded in a single request, or in sequential chunks. -// chunkSize is the size of the chunk that media should be split into. -// If chunkSize is non-zero and the contents of media do not fit in a single -// chunk (or there is an error reading media), then media will be returned as a -// ResumableBuffer. Otherwise, media will be returned as a Reader. -// -// After PrepareUpload has been called, media should no longer be used: the -// media content should be accessed via one of the return values. -func PrepareUpload(media io.Reader, chunkSize int) (io.Reader, - *ResumableBuffer) { - if chunkSize == 0 { // do not chunk - return media, nil - } - - rb := NewResumableBuffer(media, chunkSize) - rdr, _, _, err := rb.Chunk() - - if err == io.EOF { // we can upload this in a single request - return rdr, nil - } - // err might be a non-EOF error. If it is, the next call to rb.Chunk will - // return the same error. Returning a ResumableBuffer ensures that this error - // will be handled at some point. - - return nil, rb -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/params.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/params.go deleted file mode 100644 index 3b3c7439..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/params.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "net/url" - - "google.golang.org/api/googleapi" -) - -// URLParams is a simplified replacement for url.Values -// that safely builds up URL parameters for encoding. -type URLParams map[string][]string - -// Get returns the first value for the given key, or "". -func (u URLParams) Get(key string) string { - vs := u[key] - if len(vs) == 0 { - return "" - } - return vs[0] -} - -// Set sets the key to value. -// It replaces any existing values. -func (u URLParams) Set(key, value string) { - u[key] = []string{value} -} - -// SetMulti sets the key to an array of values. -// It replaces any existing values. -// Note that values must not be modified after calling SetMulti -// so the caller is responsible for making a copy if necessary. -func (u URLParams) SetMulti(key string, values []string) { - u[key] = values -} - -// Encode encodes the values into ``URL encoded'' form -// ("bar=baz&foo=quux") sorted by key. -func (u URLParams) Encode() string { - return url.Values(u).Encode() -} - -func SetOptions(u URLParams, opts ...googleapi.CallOption) { - for _, o := range opts { - u.Set(o.Get()) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/resumable.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/resumable.go deleted file mode 100644 index b3e774aa..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/resumable.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gensupport - -import ( - "fmt" - "io" - "net/http" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/context/ctxhttp" -) - -const ( - // statusResumeIncomplete is the code returned by the Google uploader - // when the transfer is not yet complete. - statusResumeIncomplete = 308 - - // statusTooManyRequests is returned by the storage API if the - // per-project limits have been temporarily exceeded. The request - // should be retried. - // https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes - statusTooManyRequests = 429 -) - -// ResumableUpload is used by the generated APIs to provide resumable uploads. -// It is not used by developers directly. -type ResumableUpload struct { - Client *http.Client - // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable". - URI string - UserAgent string // User-Agent for header of the request - // Media is the object being uploaded. - Media *ResumableBuffer - // MediaType defines the media type, e.g. "image/jpeg". - MediaType string - - mu sync.Mutex // guards progress - progress int64 // number of bytes uploaded so far - - // Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded. - Callback func(int64) - - // If not specified, a default exponential backoff strategy will be used. - Backoff BackoffStrategy -} - -// Progress returns the number of bytes uploaded at this point. -func (rx *ResumableUpload) Progress() int64 { - rx.mu.Lock() - defer rx.mu.Unlock() - return rx.progress -} - -// doUploadRequest performs a single HTTP request to upload data. -// off specifies the offset in rx.Media from which data is drawn. -// size is the number of bytes in data. -// final specifies whether data is the final chunk to be uploaded. -func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, off, size int64, final bool) (*http.Response, error) { - req, err := http.NewRequest("POST", rx.URI, data) - if err != nil { - return nil, err - } - - req.ContentLength = size - var contentRange string - if final { - if size == 0 { - contentRange = fmt.Sprintf("bytes */%v", off) - } else { - contentRange = fmt.Sprintf("bytes %v-%v/%v", off, off+size-1, off+size) - } - } else { - contentRange = fmt.Sprintf("bytes %v-%v/*", off, off+size-1) - } - req.Header.Set("Content-Range", contentRange) - req.Header.Set("Content-Type", rx.MediaType) - req.Header.Set("User-Agent", rx.UserAgent) - return ctxhttp.Do(ctx, rx.Client, req) - -} - -// reportProgress calls a user-supplied callback to report upload progress. -// If old==updated, the callback is not called. -func (rx *ResumableUpload) reportProgress(old, updated int64) { - if updated-old == 0 { - return - } - rx.mu.Lock() - rx.progress = updated - rx.mu.Unlock() - if rx.Callback != nil { - rx.Callback(updated) - } -} - -// transferChunk performs a single HTTP request to upload a single chunk from rx.Media. -func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, error) { - chunk, off, size, err := rx.Media.Chunk() - - done := err == io.EOF - if !done && err != nil { - return nil, err - } - - res, err := rx.doUploadRequest(ctx, chunk, off, int64(size), done) - if err != nil { - return res, err - } - - if res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK { - rx.reportProgress(off, off+int64(size)) - } - - if res.StatusCode == statusResumeIncomplete { - rx.Media.Next() - } - return res, nil -} - -func contextDone(ctx context.Context) bool { - select { - case <-ctx.Done(): - return true - default: - return false - } -} - -// Upload starts the process of a resumable upload with a cancellable context. -// It retries using the provided back off strategy until cancelled or the -// strategy indicates to stop retrying. -// It is called from the auto-generated API code and is not visible to the user. -// rx is private to the auto-generated API code. -// Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. -func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { - var pause time.Duration - backoff := rx.Backoff - if backoff == nil { - backoff = DefaultBackoffStrategy() - } - - for { - // Ensure that we return in the case of cancelled context, even if pause is 0. - if contextDone(ctx) { - return nil, ctx.Err() - } - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-time.After(pause): - } - - resp, err = rx.transferChunk(ctx) - - var status int - if resp != nil { - status = resp.StatusCode - } - - // Check if we should retry the request. - if shouldRetry(status, err) { - var retry bool - pause, retry = backoff.Pause() - if retry { - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - continue - } - } - - // If the chunk was uploaded successfully, but there's still - // more to go, upload the next chunk without any delay. - if status == statusResumeIncomplete { - pause = 0 - backoff.Reset() - resp.Body.Close() - continue - } - - // It's possible for err and resp to both be non-nil here, but we expose a simpler - // contract to our callers: exactly one of resp and err will be non-nil. This means - // that any response body must be closed here before returning a non-nil error. - if err != nil { - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - return nil, err - } - - return resp, nil - } -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/retry.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/retry.go deleted file mode 100644 index 7f83d1da..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/gensupport/retry.go +++ /dev/null @@ -1,77 +0,0 @@ -package gensupport - -import ( - "io" - "net" - "net/http" - "time" - - "golang.org/x/net/context" -) - -// Retry invokes the given function, retrying it multiple times if the connection failed or -// the HTTP status response indicates the request should be attempted again. ctx may be nil. -func Retry(ctx context.Context, f func() (*http.Response, error), backoff BackoffStrategy) (*http.Response, error) { - for { - resp, err := f() - - var status int - if resp != nil { - status = resp.StatusCode - } - - // Return if we shouldn't retry. - pause, retry := backoff.Pause() - if !shouldRetry(status, err) || !retry { - return resp, err - } - - // Ensure the response body is closed, if any. - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - - // Pause, but still listen to ctx.Done if context is not nil. - var done <-chan struct{} - if ctx != nil { - done = ctx.Done() - } - select { - case <-done: - return nil, ctx.Err() - case <-time.After(pause): - } - } -} - -// DefaultBackoffStrategy returns a default strategy to use for retrying failed upload requests. -func DefaultBackoffStrategy() BackoffStrategy { - return &ExponentialBackoff{ - Base: 250 * time.Millisecond, - Max: 16 * time.Second, - } -} - -// shouldRetry returns true if the HTTP response / error indicates that the -// request should be attempted again. -func shouldRetry(status int, err error) bool { - // Retry for 5xx response codes. - if 500 <= status && status < 600 { - return true - } - - // Retry on statusTooManyRequests{ - if status == statusTooManyRequests { - return true - } - - // Retry on unexpected EOFs and temporary network errors. - if err == io.ErrUnexpectedEOF { - return true - } - if err, ok := err.(net.Error); ok { - return err.Temporary() - } - - return false -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/googleapi.go deleted file mode 100644 index 03e9acdd..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/googleapi.go +++ /dev/null @@ -1,424 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package googleapi contains the common code shared by all Google API -// libraries. -package googleapi - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "google.golang.org/api/googleapi/internal/uritemplates" -) - -// ContentTyper is an interface for Readers which know (or would like -// to override) their Content-Type. If a media body doesn't implement -// ContentTyper, the type is sniffed from the content using -// http.DetectContentType. -type ContentTyper interface { - ContentType() string -} - -// A SizeReaderAt is a ReaderAt with a Size method. -// An io.SectionReader implements SizeReaderAt. -type SizeReaderAt interface { - io.ReaderAt - Size() int64 -} - -// ServerResponse is embedded in each Do response and -// provides the HTTP status code and header sent by the server. -type ServerResponse struct { - // HTTPStatusCode is the server's response status code. - // When using a resource method's Do call, this will always be in the 2xx range. - HTTPStatusCode int - // Header contains the response header fields from the server. - Header http.Header -} - -const ( - Version = "0.5" - - // UserAgent is the header string used to identify this package. - UserAgent = "google-api-go-client/" + Version - - // The default chunk size to use for resumable uplods if not specified by the user. - DefaultUploadChunkSize = 8 * 1024 * 1024 - - // The minimum chunk size that can be used for resumable uploads. All - // user-specified chunk sizes must be multiple of this value. - MinUploadChunkSize = 256 * 1024 -) - -// Error contains an error response from the server. -type Error struct { - // Code is the HTTP response status code and will always be populated. - Code int `json:"code"` - // Message is the server response message and is only populated when - // explicitly referenced by the JSON server response. - Message string `json:"message"` - // Body is the raw response returned by the server. - // It is often but not always JSON, depending on how the request fails. - Body string - // Header contains the response header fields from the server. - Header http.Header - - Errors []ErrorItem -} - -// ErrorItem is a detailed error code & message from the Google API frontend. -type ErrorItem struct { - // Reason is the typed error code. For example: "some_example". - Reason string `json:"reason"` - // Message is the human-readable description of the error. - Message string `json:"message"` -} - -func (e *Error) Error() string { - if len(e.Errors) == 0 && e.Message == "" { - return fmt.Sprintf("googleapi: got HTTP response code %d with body: %v", e.Code, e.Body) - } - var buf bytes.Buffer - fmt.Fprintf(&buf, "googleapi: Error %d: ", e.Code) - if e.Message != "" { - fmt.Fprintf(&buf, "%s", e.Message) - } - if len(e.Errors) == 0 { - return strings.TrimSpace(buf.String()) - } - if len(e.Errors) == 1 && e.Errors[0].Message == e.Message { - fmt.Fprintf(&buf, ", %s", e.Errors[0].Reason) - return buf.String() - } - fmt.Fprintln(&buf, "\nMore details:") - for _, v := range e.Errors { - fmt.Fprintf(&buf, "Reason: %s, Message: %s\n", v.Reason, v.Message) - } - return buf.String() -} - -type errorReply struct { - Error *Error `json:"error"` -} - -// CheckResponse returns an error (of type *Error) if the response -// status code is not 2xx. -func CheckResponse(res *http.Response) error { - if res.StatusCode >= 200 && res.StatusCode <= 299 { - return nil - } - slurp, err := ioutil.ReadAll(res.Body) - if err == nil { - jerr := new(errorReply) - err = json.Unmarshal(slurp, jerr) - if err == nil && jerr.Error != nil { - if jerr.Error.Code == 0 { - jerr.Error.Code = res.StatusCode - } - jerr.Error.Body = string(slurp) - return jerr.Error - } - } - return &Error{ - Code: res.StatusCode, - Body: string(slurp), - Header: res.Header, - } -} - -// IsNotModified reports whether err is the result of the -// server replying with http.StatusNotModified. -// Such error values are sometimes returned by "Do" methods -// on calls when If-None-Match is used. -func IsNotModified(err error) bool { - if err == nil { - return false - } - ae, ok := err.(*Error) - return ok && ae.Code == http.StatusNotModified -} - -// CheckMediaResponse returns an error (of type *Error) if the response -// status code is not 2xx. Unlike CheckResponse it does not assume the -// body is a JSON error document. -func CheckMediaResponse(res *http.Response) error { - if res.StatusCode >= 200 && res.StatusCode <= 299 { - return nil - } - slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) - res.Body.Close() - return &Error{ - Code: res.StatusCode, - Body: string(slurp), - } -} - -type MarshalStyle bool - -var WithDataWrapper = MarshalStyle(true) -var WithoutDataWrapper = MarshalStyle(false) - -func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { - buf := new(bytes.Buffer) - if wrap { - buf.Write([]byte(`{"data": `)) - } - err := json.NewEncoder(buf).Encode(v) - if err != nil { - return nil, err - } - if wrap { - buf.Write([]byte(`}`)) - } - return buf, nil -} - -// endingWithErrorReader from r until it returns an error. If the -// final error from r is io.EOF and e is non-nil, e is used instead. -type endingWithErrorReader struct { - r io.Reader - e error -} - -func (er endingWithErrorReader) Read(p []byte) (n int, err error) { - n, err = er.r.Read(p) - if err == io.EOF && er.e != nil { - err = er.e - } - return -} - -// countingWriter counts the number of bytes it receives to write, but -// discards them. -type countingWriter struct { - n *int64 -} - -func (w countingWriter) Write(p []byte) (int, error) { - *w.n += int64(len(p)) - return len(p), nil -} - -// ProgressUpdater is a function that is called upon every progress update of a resumable upload. -// This is the only part of a resumable upload (from googleapi) that is usable by the developer. -// The remaining usable pieces of resumable uploads is exposed in each auto-generated API. -type ProgressUpdater func(current, total int64) - -type MediaOption interface { - setOptions(o *MediaOptions) -} - -type contentTypeOption string - -func (ct contentTypeOption) setOptions(o *MediaOptions) { - o.ContentType = string(ct) - if o.ContentType == "" { - o.ForceEmptyContentType = true - } -} - -// ContentType returns a MediaOption which sets the Content-Type header for media uploads. -// If ctype is empty, the Content-Type header will be omitted. -func ContentType(ctype string) MediaOption { - return contentTypeOption(ctype) -} - -type chunkSizeOption int - -func (cs chunkSizeOption) setOptions(o *MediaOptions) { - size := int(cs) - if size%MinUploadChunkSize != 0 { - size += MinUploadChunkSize - (size % MinUploadChunkSize) - } - o.ChunkSize = size -} - -// ChunkSize returns a MediaOption which sets the chunk size for media uploads. -// size will be rounded up to the nearest multiple of 256K. -// Media which contains fewer than size bytes will be uploaded in a single request. -// Media which contains size bytes or more will be uploaded in separate chunks. -// If size is zero, media will be uploaded in a single request. -func ChunkSize(size int) MediaOption { - return chunkSizeOption(size) -} - -// MediaOptions stores options for customizing media upload. It is not used by developers directly. -type MediaOptions struct { - ContentType string - ForceEmptyContentType bool - - ChunkSize int -} - -// ProcessMediaOptions stores options from opts in a MediaOptions. -// It is not used by developers directly. -func ProcessMediaOptions(opts []MediaOption) *MediaOptions { - mo := &MediaOptions{ChunkSize: DefaultUploadChunkSize} - for _, o := range opts { - o.setOptions(mo) - } - return mo -} - -func ResolveRelative(basestr, relstr string) string { - u, _ := url.Parse(basestr) - rel, _ := url.Parse(relstr) - u = u.ResolveReference(rel) - us := u.String() - us = strings.Replace(us, "%7B", "{", -1) - us = strings.Replace(us, "%7D", "}", -1) - return us -} - -// has4860Fix is whether this Go environment contains the fix for -// http://golang.org/issue/4860 -var has4860Fix bool - -// init initializes has4860Fix by checking the behavior of the net/http package. -func init() { - r := http.Request{ - URL: &url.URL{ - Scheme: "http", - Opaque: "//opaque", - }, - } - b := &bytes.Buffer{} - r.Write(b) - has4860Fix = bytes.HasPrefix(b.Bytes(), []byte("GET http")) -} - -// SetOpaque sets u.Opaque from u.Path such that HTTP requests to it -// don't alter any hex-escaped characters in u.Path. -func SetOpaque(u *url.URL) { - u.Opaque = "//" + u.Host + u.Path - if !has4860Fix { - u.Opaque = u.Scheme + ":" + u.Opaque - } -} - -// Expand subsitutes any {encoded} strings in the URL passed in using -// the map supplied. -// -// This calls SetOpaque to avoid encoding of the parameters in the URL path. -func Expand(u *url.URL, expansions map[string]string) { - expanded, err := uritemplates.Expand(u.Path, expansions) - if err == nil { - u.Path = expanded - SetOpaque(u) - } -} - -// CloseBody is used to close res.Body. -// Prior to calling Close, it also tries to Read a small amount to see an EOF. -// Not seeing an EOF can prevent HTTP Transports from reusing connections. -func CloseBody(res *http.Response) { - if res == nil || res.Body == nil { - return - } - // Justification for 3 byte reads: two for up to "\r\n" after - // a JSON/XML document, and then 1 to see EOF if we haven't yet. - // TODO(bradfitz): detect Go 1.3+ and skip these reads. - // See https://codereview.appspot.com/58240043 - // and https://codereview.appspot.com/49570044 - buf := make([]byte, 1) - for i := 0; i < 3; i++ { - _, err := res.Body.Read(buf) - if err != nil { - break - } - } - res.Body.Close() - -} - -// VariantType returns the type name of the given variant. -// If the map doesn't contain the named key or the value is not a []interface{}, "" is returned. -// This is used to support "variant" APIs that can return one of a number of different types. -func VariantType(t map[string]interface{}) string { - s, _ := t["type"].(string) - return s -} - -// ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'. -// This is used to support "variant" APIs that can return one of a number of different types. -// It reports whether the conversion was successful. -func ConvertVariant(v map[string]interface{}, dst interface{}) bool { - var buf bytes.Buffer - err := json.NewEncoder(&buf).Encode(v) - if err != nil { - return false - } - return json.Unmarshal(buf.Bytes(), dst) == nil -} - -// A Field names a field to be retrieved with a partial response. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// -// Partial responses can dramatically reduce the amount of data that must be sent to your application. -// In order to request partial responses, you can specify the full list of fields -// that your application needs by adding the Fields option to your request. -// -// Field strings use camelCase with leading lower-case characters to identify fields within the response. -// -// For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields, -// you could request just those fields like this: -// -// svc.Events.List().Fields("nextPageToken", "items/id").Do() -// -// or if you were also interested in each Item's "Updated" field, you can combine them like this: -// -// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() -// -// More information about field formatting can be found here: -// https://developers.google.com/+/api/#fields-syntax -// -// Another way to find field names is through the Google API explorer: -// https://developers.google.com/apis-explorer/#p/ -type Field string - -// CombineFields combines fields into a single string. -func CombineFields(s []Field) string { - r := make([]string, len(s)) - for i, v := range s { - r[i] = string(v) - } - return strings.Join(r, ",") -} - -// A CallOption is an optional argument to an API call. -// It should be treated as an opaque value by users of Google APIs. -// -// A CallOption is something that configures an API call in a way that is -// not specific to that API; for instance, controlling the quota user for -// an API call is common across many APIs, and is thus a CallOption. -type CallOption interface { - Get() (key, value string) -} - -// QuotaUser returns a CallOption that will set the quota user for a call. -// The quota user can be used by server-side applications to control accounting. -// It can be an arbitrary string up to 40 characters, and will override UserIP -// if both are provided. -func QuotaUser(u string) CallOption { return quotaUser(u) } - -type quotaUser string - -func (q quotaUser) Get() (string, string) { return "quotaUser", string(q) } - -// UserIP returns a CallOption that will set the "userIp" parameter of a call. -// This should be the IP address of the originating request. -func UserIP(ip string) CallOption { return userIP(ip) } - -type userIP string - -func (i userIP) Get() (string, string) { return "userIp", string(i) } - -// TODO: Fields too diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE deleted file mode 100644 index de9c88cb..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE +++ /dev/null @@ -1,18 +0,0 @@ -Copyright (c) 2013 Joshua Tacoma - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go deleted file mode 100644 index 7c103ba1..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2013 Joshua Tacoma. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package uritemplates is a level 3 implementation of RFC 6570 (URI -// Template, http://tools.ietf.org/html/rfc6570). -// uritemplates does not support composite values (in Go: slices or maps) -// and so does not qualify as a level 4 implementation. -package uritemplates - -import ( - "bytes" - "errors" - "regexp" - "strconv" - "strings" -) - -var ( - unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") - reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") - validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") - hex = []byte("0123456789ABCDEF") -) - -func pctEncode(src []byte) []byte { - dst := make([]byte, len(src)*3) - for i, b := range src { - buf := dst[i*3 : i*3+3] - buf[0] = 0x25 - buf[1] = hex[b/16] - buf[2] = hex[b%16] - } - return dst -} - -func escape(s string, allowReserved bool) string { - if allowReserved { - return string(reserved.ReplaceAllFunc([]byte(s), pctEncode)) - } - return string(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) -} - -// A uriTemplate is a parsed representation of a URI template. -type uriTemplate struct { - raw string - parts []templatePart -} - -// parse parses a URI template string into a uriTemplate object. -func parse(rawTemplate string) (*uriTemplate, error) { - split := strings.Split(rawTemplate, "{") - parts := make([]templatePart, len(split)*2-1) - for i, s := range split { - if i == 0 { - if strings.Contains(s, "}") { - return nil, errors.New("unexpected }") - } - parts[i].raw = s - continue - } - subsplit := strings.Split(s, "}") - if len(subsplit) != 2 { - return nil, errors.New("malformed template") - } - expression := subsplit[0] - var err error - parts[i*2-1], err = parseExpression(expression) - if err != nil { - return nil, err - } - parts[i*2].raw = subsplit[1] - } - return &uriTemplate{ - raw: rawTemplate, - parts: parts, - }, nil -} - -type templatePart struct { - raw string - terms []templateTerm - first string - sep string - named bool - ifemp string - allowReserved bool -} - -type templateTerm struct { - name string - explode bool - truncate int -} - -func parseExpression(expression string) (result templatePart, err error) { - switch expression[0] { - case '+': - result.sep = "," - result.allowReserved = true - expression = expression[1:] - case '.': - result.first = "." - result.sep = "." - expression = expression[1:] - case '/': - result.first = "/" - result.sep = "/" - expression = expression[1:] - case ';': - result.first = ";" - result.sep = ";" - result.named = true - expression = expression[1:] - case '?': - result.first = "?" - result.sep = "&" - result.named = true - result.ifemp = "=" - expression = expression[1:] - case '&': - result.first = "&" - result.sep = "&" - result.named = true - result.ifemp = "=" - expression = expression[1:] - case '#': - result.first = "#" - result.sep = "," - result.allowReserved = true - expression = expression[1:] - default: - result.sep = "," - } - rawterms := strings.Split(expression, ",") - result.terms = make([]templateTerm, len(rawterms)) - for i, raw := range rawterms { - result.terms[i], err = parseTerm(raw) - if err != nil { - break - } - } - return result, err -} - -func parseTerm(term string) (result templateTerm, err error) { - // TODO(djd): Remove "*" suffix parsing once we check that no APIs have - // mistakenly used that attribute. - if strings.HasSuffix(term, "*") { - result.explode = true - term = term[:len(term)-1] - } - split := strings.Split(term, ":") - if len(split) == 1 { - result.name = term - } else if len(split) == 2 { - result.name = split[0] - var parsed int64 - parsed, err = strconv.ParseInt(split[1], 10, 0) - result.truncate = int(parsed) - } else { - err = errors.New("multiple colons in same term") - } - if !validname.MatchString(result.name) { - err = errors.New("not a valid name: " + result.name) - } - if result.explode && result.truncate > 0 { - err = errors.New("both explode and prefix modifers on same term") - } - return result, err -} - -// Expand expands a URI template with a set of values to produce a string. -func (t *uriTemplate) Expand(values map[string]string) string { - var buf bytes.Buffer - for _, p := range t.parts { - p.expand(&buf, values) - } - return buf.String() -} - -func (tp *templatePart) expand(buf *bytes.Buffer, values map[string]string) { - if len(tp.raw) > 0 { - buf.WriteString(tp.raw) - return - } - var first = true - for _, term := range tp.terms { - value, exists := values[term.name] - if !exists { - continue - } - if first { - buf.WriteString(tp.first) - first = false - } else { - buf.WriteString(tp.sep) - } - tp.expandString(buf, term, value) - } -} - -func (tp *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) { - if tp.named { - buf.WriteString(name) - if empty { - buf.WriteString(tp.ifemp) - } else { - buf.WriteString("=") - } - } -} - -func (tp *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) { - if len(s) > t.truncate && t.truncate > 0 { - s = s[:t.truncate] - } - tp.expandName(buf, t.name, len(s) == 0) - buf.WriteString(escape(s, tp.allowReserved)) -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go deleted file mode 100644 index eff260a6..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uritemplates - -func Expand(path string, values map[string]string) (string, error) { - template, err := parse(path) - if err != nil { - return "", err - } - return template.Expand(values), nil -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/types.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/types.go deleted file mode 100644 index a02b4b07..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/googleapi/types.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2013 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package googleapi - -import ( - "encoding/json" - "strconv" -) - -// Int64s is a slice of int64s that marshal as quoted strings in JSON. -type Int64s []int64 - -func (q *Int64s) UnmarshalJSON(raw []byte) error { - *q = (*q)[:0] - var ss []string - if err := json.Unmarshal(raw, &ss); err != nil { - return err - } - for _, s := range ss { - v, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return err - } - *q = append(*q, int64(v)) - } - return nil -} - -// Int32s is a slice of int32s that marshal as quoted strings in JSON. -type Int32s []int32 - -func (q *Int32s) UnmarshalJSON(raw []byte) error { - *q = (*q)[:0] - var ss []string - if err := json.Unmarshal(raw, &ss); err != nil { - return err - } - for _, s := range ss { - v, err := strconv.ParseInt(s, 10, 32) - if err != nil { - return err - } - *q = append(*q, int32(v)) - } - return nil -} - -// Uint64s is a slice of uint64s that marshal as quoted strings in JSON. -type Uint64s []uint64 - -func (q *Uint64s) UnmarshalJSON(raw []byte) error { - *q = (*q)[:0] - var ss []string - if err := json.Unmarshal(raw, &ss); err != nil { - return err - } - for _, s := range ss { - v, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return err - } - *q = append(*q, uint64(v)) - } - return nil -} - -// Uint32s is a slice of uint32s that marshal as quoted strings in JSON. -type Uint32s []uint32 - -func (q *Uint32s) UnmarshalJSON(raw []byte) error { - *q = (*q)[:0] - var ss []string - if err := json.Unmarshal(raw, &ss); err != nil { - return err - } - for _, s := range ss { - v, err := strconv.ParseUint(s, 10, 32) - if err != nil { - return err - } - *q = append(*q, uint32(v)) - } - return nil -} - -// Float64s is a slice of float64s that marshal as quoted strings in JSON. -type Float64s []float64 - -func (q *Float64s) UnmarshalJSON(raw []byte) error { - *q = (*q)[:0] - var ss []string - if err := json.Unmarshal(raw, &ss); err != nil { - return err - } - for _, s := range ss { - v, err := strconv.ParseFloat(s, 64) - if err != nil { - return err - } - *q = append(*q, float64(v)) - } - return nil -} - -func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) { - dst := make([]byte, 0, 2+n*10) // somewhat arbitrary - dst = append(dst, '[') - for i := 0; i < n; i++ { - if i > 0 { - dst = append(dst, ',') - } - dst = append(dst, '"') - dst = fn(dst, i) - dst = append(dst, '"') - } - dst = append(dst, ']') - return dst, nil -} - -func (s Int64s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendInt(dst, s[i], 10) - }) -} - -func (s Int32s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendInt(dst, int64(s[i]), 10) - }) -} - -func (s Uint64s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendUint(dst, s[i], 10) - }) -} - -func (s Uint32s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendUint(dst, uint64(s[i]), 10) - }) -} - -func (s Float64s) MarshalJSON() ([]byte, error) { - return quotedList(len(s), func(dst []byte, i int) []byte { - return strconv.AppendFloat(dst, s[i], 'g', -1, 64) - }) -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { return &v } - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { return &v } - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { return &v } - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { return &v } - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { return &v } - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { return &v } - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { return &v } diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-api.json deleted file mode 100644 index 3768b468..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-api.json +++ /dev/null @@ -1,2865 +0,0 @@ -{ - "kind": "discovery#restDescription", - "etag": "\"bRFOOrZKfO9LweMbPqu0kcu6De8/KVPQfwGxQTBtH0g1kuij0C9i4uc\"", - "discoveryVersion": "v1", - "id": "storage:v1", - "name": "storage", - "version": "v1", - "revision": "20160304", - "title": "Cloud Storage JSON API", - "description": "Stores and retrieves potentially large, immutable data objects.", - "ownerDomain": "google.com", - "ownerName": "Google", - "icons": { - "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", - "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" - }, - "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "labels": [ - "labs" - ], - "protocol": "rest", - "baseUrl": "https://www.googleapis.com/storage/v1/", - "basePath": "/storage/v1/", - "rootUrl": "https://www.googleapis.com/", - "servicePath": "storage/v1/", - "batchPath": "batch", - "parameters": { - "alt": { - "type": "string", - "description": "Data format for the response.", - "default": "json", - "enum": [ - "json" - ], - "enumDescriptions": [ - "Responses with Content-Type of application/json" - ], - "location": "query" - }, - "fields": { - "type": "string", - "description": "Selector specifying which fields to include in a partial response.", - "location": "query" - }, - "key": { - "type": "string", - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "location": "query" - }, - "oauth_token": { - "type": "string", - "description": "OAuth 2.0 token for the current user.", - "location": "query" - }, - "prettyPrint": { - "type": "boolean", - "description": "Returns response with indentations and line breaks.", - "default": "true", - "location": "query" - }, - "quotaUser": { - "type": "string", - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", - "location": "query" - }, - "userIp": { - "type": "string", - "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", - "location": "query" - } - }, - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/cloud-platform.read-only": { - "description": "View your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/devstorage.full_control": { - "description": "Manage your data and permissions in Google Cloud Storage" - }, - "https://www.googleapis.com/auth/devstorage.read_only": { - "description": "View your data in Google Cloud Storage" - }, - "https://www.googleapis.com/auth/devstorage.read_write": { - "description": "Manage your data in Google Cloud Storage" - } - } - } - }, - "schemas": { - "Bucket": { - "id": "Bucket", - "type": "object", - "description": "A bucket.", - "properties": { - "acl": { - "type": "array", - "description": "Access controls on the bucket.", - "items": { - "$ref": "BucketAccessControl" - }, - "annotations": { - "required": [ - "storage.buckets.update" - ] - } - }, - "cors": { - "type": "array", - "description": "The bucket's Cross-Origin Resource Sharing (CORS) configuration.", - "items": { - "type": "object", - "properties": { - "maxAgeSeconds": { - "type": "integer", - "description": "The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses.", - "format": "int32" - }, - "method": { - "type": "array", - "description": "The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: \"*\" is permitted in the list of methods, and means \"any method\".", - "items": { - "type": "string" - } - }, - "origin": { - "type": "array", - "description": "The list of Origins eligible to receive CORS response headers. Note: \"*\" is permitted in the list of origins, and means \"any Origin\".", - "items": { - "type": "string" - } - }, - "responseHeader": { - "type": "array", - "description": "The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.", - "items": { - "type": "string" - } - } - } - } - }, - "defaultObjectAcl": { - "type": "array", - "description": "Default access controls to apply to new objects when no ACL is provided.", - "items": { - "$ref": "ObjectAccessControl" - } - }, - "etag": { - "type": "string", - "description": "HTTP 1.1 Entity tag for the bucket." - }, - "id": { - "type": "string", - "description": "The ID of the bucket." - }, - "kind": { - "type": "string", - "description": "The kind of item this is. For buckets, this is always storage#bucket.", - "default": "storage#bucket" - }, - "lifecycle": { - "type": "object", - "description": "The bucket's lifecycle configuration. See lifecycle management for more information.", - "properties": { - "rule": { - "type": "array", - "description": "A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken.", - "items": { - "type": "object", - "properties": { - "action": { - "type": "object", - "description": "The action to take.", - "properties": { - "type": { - "type": "string", - "description": "Type of the action. Currently, only Delete is supported." - } - } - }, - "condition": { - "type": "object", - "description": "The condition(s) under which the action will be taken.", - "properties": { - "age": { - "type": "integer", - "description": "Age of an object (in days). This condition is satisfied when an object reaches the specified age.", - "format": "int32" - }, - "createdBefore": { - "type": "string", - "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when an object is created before midnight of the specified date in UTC.", - "format": "date" - }, - "isLive": { - "type": "boolean", - "description": "Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects." - }, - "numNewerVersions": { - "type": "integer", - "description": "Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object.", - "format": "int32" - } - } - } - } - } - } - } - }, - "location": { - "type": "string", - "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list." - }, - "logging": { - "type": "object", - "description": "The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.", - "properties": { - "logBucket": { - "type": "string", - "description": "The destination bucket where the current bucket's logs should be placed." - }, - "logObjectPrefix": { - "type": "string", - "description": "A prefix for log object names." - } - } - }, - "metageneration": { - "type": "string", - "description": "The metadata generation of this bucket.", - "format": "int64" - }, - "name": { - "type": "string", - "description": "The name of the bucket.", - "annotations": { - "required": [ - "storage.buckets.insert" - ] - } - }, - "owner": { - "type": "object", - "description": "The owner of the bucket. This is always the project team's owner group.", - "properties": { - "entity": { - "type": "string", - "description": "The entity, in the form project-owner-projectId." - }, - "entityId": { - "type": "string", - "description": "The ID for the entity." - } - } - }, - "projectNumber": { - "type": "string", - "description": "The project number of the project the bucket belongs to.", - "format": "uint64" - }, - "selfLink": { - "type": "string", - "description": "The URI of this bucket." - }, - "storageClass": { - "type": "string", - "description": "The bucket's storage class. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY. Defaults to STANDARD. For more information, see storage classes." - }, - "timeCreated": { - "type": "string", - "description": "The creation time of the bucket in RFC 3339 format.", - "format": "date-time" - }, - "updated": { - "type": "string", - "description": "The modification time of the bucket in RFC 3339 format.", - "format": "date-time" - }, - "versioning": { - "type": "object", - "description": "The bucket's versioning configuration.", - "properties": { - "enabled": { - "type": "boolean", - "description": "While set to true, versioning is fully enabled for this bucket." - } - } - }, - "website": { - "type": "object", - "description": "The bucket's website configuration.", - "properties": { - "mainPageSuffix": { - "type": "string", - "description": "Behaves as the bucket's directory index where missing objects are treated as potential directories." - }, - "notFoundPage": { - "type": "string", - "description": "The custom object to return when a requested resource is not found." - } - } - } - } - }, - "BucketAccessControl": { - "id": "BucketAccessControl", - "type": "object", - "description": "An access-control entry.", - "properties": { - "bucket": { - "type": "string", - "description": "The name of the bucket." - }, - "domain": { - "type": "string", - "description": "The domain associated with the entity, if any." - }, - "email": { - "type": "string", - "description": "The email address associated with the entity, if any." - }, - "entity": { - "type": "string", - "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.", - "annotations": { - "required": [ - "storage.bucketAccessControls.insert" - ] - } - }, - "entityId": { - "type": "string", - "description": "The ID for the entity, if any." - }, - "etag": { - "type": "string", - "description": "HTTP 1.1 Entity tag for the access-control entry." - }, - "id": { - "type": "string", - "description": "The ID of the access-control entry." - }, - "kind": { - "type": "string", - "description": "The kind of item this is. For bucket access control entries, this is always storage#bucketAccessControl.", - "default": "storage#bucketAccessControl" - }, - "projectTeam": { - "type": "object", - "description": "The project team associated with the entity, if any.", - "properties": { - "projectNumber": { - "type": "string", - "description": "The project number." - }, - "team": { - "type": "string", - "description": "The team. Can be owners, editors, or viewers." - } - } - }, - "role": { - "type": "string", - "description": "The access permission for the entity. Can be READER, WRITER, or OWNER.", - "annotations": { - "required": [ - "storage.bucketAccessControls.insert" - ] - } - }, - "selfLink": { - "type": "string", - "description": "The link to this access-control entry." - } - } - }, - "BucketAccessControls": { - "id": "BucketAccessControls", - "type": "object", - "description": "An access-control list.", - "properties": { - "items": { - "type": "array", - "description": "The list of items.", - "items": { - "$ref": "BucketAccessControl" - } - }, - "kind": { - "type": "string", - "description": "The kind of item this is. For lists of bucket access control entries, this is always storage#bucketAccessControls.", - "default": "storage#bucketAccessControls" - } - } - }, - "Buckets": { - "id": "Buckets", - "type": "object", - "description": "A list of buckets.", - "properties": { - "items": { - "type": "array", - "description": "The list of items.", - "items": { - "$ref": "Bucket" - } - }, - "kind": { - "type": "string", - "description": "The kind of item this is. For lists of buckets, this is always storage#buckets.", - "default": "storage#buckets" - }, - "nextPageToken": { - "type": "string", - "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results." - } - } - }, - "Channel": { - "id": "Channel", - "type": "object", - "description": "An notification channel used to watch for resource changes.", - "properties": { - "address": { - "type": "string", - "description": "The address where notifications are delivered for this channel." - }, - "expiration": { - "type": "string", - "description": "Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional.", - "format": "int64" - }, - "id": { - "type": "string", - "description": "A UUID or similar unique string that identifies this channel." - }, - "kind": { - "type": "string", - "description": "Identifies this as a notification channel used to watch for changes to a resource. Value: the fixed string \"api#channel\".", - "default": "api#channel" - }, - "params": { - "type": "object", - "description": "Additional parameters controlling delivery channel behavior. Optional.", - "additionalProperties": { - "type": "string", - "description": "Declares a new parameter by name." - } - }, - "payload": { - "type": "boolean", - "description": "A Boolean value to indicate whether payload is wanted. Optional." - }, - "resourceId": { - "type": "string", - "description": "An opaque ID that identifies the resource being watched on this channel. Stable across different API versions." - }, - "resourceUri": { - "type": "string", - "description": "A version-specific identifier for the watched resource." - }, - "token": { - "type": "string", - "description": "An arbitrary string delivered to the target address with each notification delivered over this channel. Optional." - }, - "type": { - "type": "string", - "description": "The type of delivery mechanism used for this channel." - } - } - }, - "ComposeRequest": { - "id": "ComposeRequest", - "type": "object", - "description": "A Compose request.", - "properties": { - "destination": { - "$ref": "Object", - "description": "Properties of the resulting object." - }, - "kind": { - "type": "string", - "description": "The kind of item this is.", - "default": "storage#composeRequest" - }, - "sourceObjects": { - "type": "array", - "description": "The list of source objects that will be concatenated into a single object.", - "items": { - "type": "object", - "properties": { - "generation": { - "type": "string", - "description": "The generation of this object to use as the source.", - "format": "int64" - }, - "name": { - "type": "string", - "description": "The source object's name. The source object's bucket is implicitly the destination bucket.", - "annotations": { - "required": [ - "storage.objects.compose" - ] - } - }, - "objectPreconditions": { - "type": "object", - "description": "Conditions that must be met for this operation to execute.", - "properties": { - "ifGenerationMatch": { - "type": "string", - "description": "Only perform the composition if the generation of the source object that would be used matches this value. If this value and a generation are both specified, they must be the same value or the call will fail.", - "format": "int64" - } - } - } - } - }, - "annotations": { - "required": [ - "storage.objects.compose" - ] - } - } - } - }, - "Object": { - "id": "Object", - "type": "object", - "description": "An object.", - "properties": { - "acl": { - "type": "array", - "description": "Access controls on the object.", - "items": { - "$ref": "ObjectAccessControl" - }, - "annotations": { - "required": [ - "storage.objects.update" - ] - } - }, - "bucket": { - "type": "string", - "description": "The name of the bucket containing this object." - }, - "cacheControl": { - "type": "string", - "description": "Cache-Control directive for the object data." - }, - "componentCount": { - "type": "integer", - "description": "Number of underlying components that make up this object. Components are accumulated by compose operations.", - "format": "int32" - }, - "contentDisposition": { - "type": "string", - "description": "Content-Disposition of the object data." - }, - "contentEncoding": { - "type": "string", - "description": "Content-Encoding of the object data." - }, - "contentLanguage": { - "type": "string", - "description": "Content-Language of the object data." - }, - "contentType": { - "type": "string", - "description": "Content-Type of the object data." - }, - "crc32c": { - "type": "string", - "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices." - }, - "customerEncryption": { - "type": "object", - "description": "Metadata of customer-supplied encryption key, if the object is encrypted by such a key.", - "properties": { - "encryptionAlgorithm": { - "type": "string", - "description": "The encryption algorithm." - }, - "keySha256": { - "type": "string", - "description": "SHA256 hash value of the encryption key." - } - } - }, - "etag": { - "type": "string", - "description": "HTTP 1.1 Entity tag for the object." - }, - "generation": { - "type": "string", - "description": "The content generation of this object. Used for object versioning.", - "format": "int64" - }, - "id": { - "type": "string", - "description": "The ID of the object." - }, - "kind": { - "type": "string", - "description": "The kind of item this is. For objects, this is always storage#object.", - "default": "storage#object" - }, - "md5Hash": { - "type": "string", - "description": "MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see Hashes and ETags: Best Practices." - }, - "mediaLink": { - "type": "string", - "description": "Media download link." - }, - "metadata": { - "type": "object", - "description": "User-provided metadata, in key/value pairs.", - "additionalProperties": { - "type": "string", - "description": "An individual metadata entry." - } - }, - "metageneration": { - "type": "string", - "description": "The version of the metadata for this object at this generation. Used for preconditions and for detecting changes in metadata. A metageneration number is only meaningful in the context of a particular generation of a particular object.", - "format": "int64" - }, - "name": { - "type": "string", - "description": "The name of this object. Required if not specified by URL parameter." - }, - "owner": { - "type": "object", - "description": "The owner of the object. This will always be the uploader of the object.", - "properties": { - "entity": { - "type": "string", - "description": "The entity, in the form user-userId." - }, - "entityId": { - "type": "string", - "description": "The ID for the entity." - } - } - }, - "selfLink": { - "type": "string", - "description": "The link to this object." - }, - "size": { - "type": "string", - "description": "Content-Length of the data in bytes.", - "format": "uint64" - }, - "storageClass": { - "type": "string", - "description": "Storage class of the object." - }, - "timeCreated": { - "type": "string", - "description": "The creation time of the object in RFC 3339 format.", - "format": "date-time" - }, - "timeDeleted": { - "type": "string", - "description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.", - "format": "date-time" - }, - "updated": { - "type": "string", - "description": "The modification time of the object metadata in RFC 3339 format.", - "format": "date-time" - } - } - }, - "ObjectAccessControl": { - "id": "ObjectAccessControl", - "type": "object", - "description": "An access-control entry.", - "properties": { - "bucket": { - "type": "string", - "description": "The name of the bucket." - }, - "domain": { - "type": "string", - "description": "The domain associated with the entity, if any." - }, - "email": { - "type": "string", - "description": "The email address associated with the entity, if any." - }, - "entity": { - "type": "string", - "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com." - }, - "entityId": { - "type": "string", - "description": "The ID for the entity, if any." - }, - "etag": { - "type": "string", - "description": "HTTP 1.1 Entity tag for the access-control entry." - }, - "generation": { - "type": "string", - "description": "The content generation of the object.", - "format": "int64" - }, - "id": { - "type": "string", - "description": "The ID of the access-control entry." - }, - "kind": { - "type": "string", - "description": "The kind of item this is. For object access control entries, this is always storage#objectAccessControl.", - "default": "storage#objectAccessControl" - }, - "object": { - "type": "string", - "description": "The name of the object." - }, - "projectTeam": { - "type": "object", - "description": "The project team associated with the entity, if any.", - "properties": { - "projectNumber": { - "type": "string", - "description": "The project number." - }, - "team": { - "type": "string", - "description": "The team. Can be owners, editors, or viewers." - } - } - }, - "role": { - "type": "string", - "description": "The access permission for the entity. Can be READER or OWNER." - }, - "selfLink": { - "type": "string", - "description": "The link to this access-control entry." - } - } - }, - "ObjectAccessControls": { - "id": "ObjectAccessControls", - "type": "object", - "description": "An access-control list.", - "properties": { - "items": { - "type": "array", - "description": "The list of items.", - "items": { - "type": "any" - } - }, - "kind": { - "type": "string", - "description": "The kind of item this is. For lists of object access control entries, this is always storage#objectAccessControls.", - "default": "storage#objectAccessControls" - } - } - }, - "Objects": { - "id": "Objects", - "type": "object", - "description": "A list of objects.", - "properties": { - "items": { - "type": "array", - "description": "The list of items.", - "items": { - "$ref": "Object" - } - }, - "kind": { - "type": "string", - "description": "The kind of item this is. For lists of objects, this is always storage#objects.", - "default": "storage#objects" - }, - "nextPageToken": { - "type": "string", - "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results." - }, - "prefixes": { - "type": "array", - "description": "The list of prefixes of objects matching-but-not-listed up to and including the requested delimiter.", - "items": { - "type": "string" - } - } - } - }, - "RewriteResponse": { - "id": "RewriteResponse", - "type": "object", - "description": "A rewrite response.", - "properties": { - "done": { - "type": "boolean", - "description": "true if the copy is finished; otherwise, false if the copy is in progress. This property is always present in the response." - }, - "kind": { - "type": "string", - "description": "The kind of item this is.", - "default": "storage#rewriteResponse" - }, - "objectSize": { - "type": "string", - "description": "The total size of the object being copied in bytes. This property is always present in the response.", - "format": "uint64" - }, - "resource": { - "$ref": "Object", - "description": "A resource containing the metadata for the copied-to object. This property is present in the response only when copying completes." - }, - "rewriteToken": { - "type": "string", - "description": "A token to use in subsequent requests to continue copying data. This token is present in the response only when there is more data to copy." - }, - "totalBytesRewritten": { - "type": "string", - "description": "The total bytes written so far, which can be used to provide a waiting user with a progress indicator. This property is always present in the response.", - "format": "uint64" - } - } - } - }, - "resources": { - "bucketAccessControls": { - "methods": { - "delete": { - "id": "storage.bucketAccessControls.delete", - "path": "b/{bucket}/acl/{entity}", - "httpMethod": "DELETE", - "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "bucket", - "entity" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "get": { - "id": "storage.bucketAccessControls.get", - "path": "b/{bucket}/acl/{entity}", - "httpMethod": "GET", - "description": "Returns the ACL entry for the specified entity on the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "bucket", - "entity" - ], - "response": { - "$ref": "BucketAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "insert": { - "id": "storage.bucketAccessControls.insert", - "path": "b/{bucket}/acl", - "httpMethod": "POST", - "description": "Creates a new ACL entry on the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "bucket" - ], - "request": { - "$ref": "BucketAccessControl" - }, - "response": { - "$ref": "BucketAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "list": { - "id": "storage.bucketAccessControls.list", - "path": "b/{bucket}/acl", - "httpMethod": "GET", - "description": "Retrieves ACL entries on the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "bucket" - ], - "response": { - "$ref": "BucketAccessControls" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "patch": { - "id": "storage.bucketAccessControls.patch", - "path": "b/{bucket}/acl/{entity}", - "httpMethod": "PATCH", - "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "bucket", - "entity" - ], - "request": { - "$ref": "BucketAccessControl" - }, - "response": { - "$ref": "BucketAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "update": { - "id": "storage.bucketAccessControls.update", - "path": "b/{bucket}/acl/{entity}", - "httpMethod": "PUT", - "description": "Updates an ACL entry on the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "bucket", - "entity" - ], - "request": { - "$ref": "BucketAccessControl" - }, - "response": { - "$ref": "BucketAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - } - } - }, - "buckets": { - "methods": { - "delete": { - "id": "storage.buckets.delete", - "path": "b/{bucket}", - "httpMethod": "DELETE", - "description": "Permanently deletes an empty bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "If set, only deletes the bucket if its metageneration matches this value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "If set, only deletes the bucket if its metageneration does not match this value.", - "format": "int64", - "location": "query" - } - }, - "parameterOrder": [ - "bucket" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "get": { - "id": "storage.buckets.get", - "path": "b/{bucket}", - "httpMethod": "GET", - "description": "Returns metadata for the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to noAcl.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit acl and defaultObjectAcl properties." - ], - "location": "query" - } - }, - "parameterOrder": [ - "bucket" - ], - "response": { - "$ref": "Bucket" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "insert": { - "id": "storage.buckets.insert", - "path": "b", - "httpMethod": "POST", - "description": "Creates a new bucket.", - "parameters": { - "predefinedAcl": { - "type": "string", - "description": "Apply a predefined set of access controls to this bucket.", - "enum": [ - "authenticatedRead", - "private", - "projectPrivate", - "publicRead", - "publicReadWrite" - ], - "enumDescriptions": [ - "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - "Project team owners get OWNER access.", - "Project team members get access according to their roles.", - "Project team owners get OWNER access, and allUsers get READER access.", - "Project team owners get OWNER access, and allUsers get WRITER access." - ], - "location": "query" - }, - "predefinedDefaultObjectAcl": { - "type": "string", - "description": "Apply a predefined set of default object access controls to this bucket.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query" - }, - "project": { - "type": "string", - "description": "A valid API project identifier.", - "required": true, - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit acl and defaultObjectAcl properties." - ], - "location": "query" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "Bucket" - }, - "response": { - "$ref": "Bucket" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "list": { - "id": "storage.buckets.list", - "path": "b", - "httpMethod": "GET", - "description": "Retrieves a list of buckets for a given project.", - "parameters": { - "maxResults": { - "type": "integer", - "description": "Maximum number of buckets to return.", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "A previously-returned page token representing part of the larger set of results to view.", - "location": "query" - }, - "prefix": { - "type": "string", - "description": "Filter results to buckets whose names begin with this prefix.", - "location": "query" - }, - "project": { - "type": "string", - "description": "A valid API project identifier.", - "required": true, - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to noAcl.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit acl and defaultObjectAcl properties." - ], - "location": "query" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "Buckets" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "patch": { - "id": "storage.buckets.patch", - "path": "b/{bucket}", - "httpMethod": "PATCH", - "description": "Updates a bucket. This method supports patch semantics.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "predefinedAcl": { - "type": "string", - "description": "Apply a predefined set of access controls to this bucket.", - "enum": [ - "authenticatedRead", - "private", - "projectPrivate", - "publicRead", - "publicReadWrite" - ], - "enumDescriptions": [ - "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - "Project team owners get OWNER access.", - "Project team members get access according to their roles.", - "Project team owners get OWNER access, and allUsers get READER access.", - "Project team owners get OWNER access, and allUsers get WRITER access." - ], - "location": "query" - }, - "predefinedDefaultObjectAcl": { - "type": "string", - "description": "Apply a predefined set of default object access controls to this bucket.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit acl and defaultObjectAcl properties." - ], - "location": "query" - } - }, - "parameterOrder": [ - "bucket" - ], - "request": { - "$ref": "Bucket" - }, - "response": { - "$ref": "Bucket" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "update": { - "id": "storage.buckets.update", - "path": "b/{bucket}", - "httpMethod": "PUT", - "description": "Updates a bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "predefinedAcl": { - "type": "string", - "description": "Apply a predefined set of access controls to this bucket.", - "enum": [ - "authenticatedRead", - "private", - "projectPrivate", - "publicRead", - "publicReadWrite" - ], - "enumDescriptions": [ - "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - "Project team owners get OWNER access.", - "Project team members get access according to their roles.", - "Project team owners get OWNER access, and allUsers get READER access.", - "Project team owners get OWNER access, and allUsers get WRITER access." - ], - "location": "query" - }, - "predefinedDefaultObjectAcl": { - "type": "string", - "description": "Apply a predefined set of default object access controls to this bucket.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit acl and defaultObjectAcl properties." - ], - "location": "query" - } - }, - "parameterOrder": [ - "bucket" - ], - "request": { - "$ref": "Bucket" - }, - "response": { - "$ref": "Bucket" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - } - } - }, - "channels": { - "methods": { - "stop": { - "id": "storage.channels.stop", - "path": "channels/stop", - "httpMethod": "POST", - "description": "Stop watching resources through this channel", - "request": { - "$ref": "Channel", - "parameterName": "resource" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - } - } - }, - "defaultObjectAccessControls": { - "methods": { - "delete": { - "id": "storage.defaultObjectAccessControls.delete", - "path": "b/{bucket}/defaultObjectAcl/{entity}", - "httpMethod": "DELETE", - "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "bucket", - "entity" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "get": { - "id": "storage.defaultObjectAccessControls.get", - "path": "b/{bucket}/defaultObjectAcl/{entity}", - "httpMethod": "GET", - "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "bucket", - "entity" - ], - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "insert": { - "id": "storage.defaultObjectAccessControls.insert", - "path": "b/{bucket}/defaultObjectAcl", - "httpMethod": "POST", - "description": "Creates a new default object ACL entry on the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "bucket" - ], - "request": { - "$ref": "ObjectAccessControl" - }, - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "list": { - "id": "storage.defaultObjectAccessControls.list", - "path": "b/{bucket}/defaultObjectAcl", - "httpMethod": "GET", - "description": "Retrieves default object ACL entries on the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - } - }, - "parameterOrder": [ - "bucket" - ], - "response": { - "$ref": "ObjectAccessControls" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "patch": { - "id": "storage.defaultObjectAccessControls.patch", - "path": "b/{bucket}/defaultObjectAcl/{entity}", - "httpMethod": "PATCH", - "description": "Updates a default object ACL entry on the specified bucket. This method supports patch semantics.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "bucket", - "entity" - ], - "request": { - "$ref": "ObjectAccessControl" - }, - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "update": { - "id": "storage.defaultObjectAccessControls.update", - "path": "b/{bucket}/defaultObjectAcl/{entity}", - "httpMethod": "PUT", - "description": "Updates a default object ACL entry on the specified bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "bucket", - "entity" - ], - "request": { - "$ref": "ObjectAccessControl" - }, - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - } - } - }, - "objectAccessControls": { - "methods": { - "delete": { - "id": "storage.objectAccessControls.delete", - "path": "b/{bucket}/o/{object}/acl/{entity}", - "httpMethod": "DELETE", - "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - }, - "generation": { - "type": "string", - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "object": { - "type": "string", - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "bucket", - "object", - "entity" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "get": { - "id": "storage.objectAccessControls.get", - "path": "b/{bucket}/o/{object}/acl/{entity}", - "httpMethod": "GET", - "description": "Returns the ACL entry for the specified entity on the specified object.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - }, - "generation": { - "type": "string", - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "object": { - "type": "string", - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "bucket", - "object", - "entity" - ], - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "insert": { - "id": "storage.objectAccessControls.insert", - "path": "b/{bucket}/o/{object}/acl", - "httpMethod": "POST", - "description": "Creates a new ACL entry on the specified object.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "generation": { - "type": "string", - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "object": { - "type": "string", - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "bucket", - "object" - ], - "request": { - "$ref": "ObjectAccessControl" - }, - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "list": { - "id": "storage.objectAccessControls.list", - "path": "b/{bucket}/o/{object}/acl", - "httpMethod": "GET", - "description": "Retrieves ACL entries on the specified object.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "generation": { - "type": "string", - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "object": { - "type": "string", - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "bucket", - "object" - ], - "response": { - "$ref": "ObjectAccessControls" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "patch": { - "id": "storage.objectAccessControls.patch", - "path": "b/{bucket}/o/{object}/acl/{entity}", - "httpMethod": "PATCH", - "description": "Updates an ACL entry on the specified object. This method supports patch semantics.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - }, - "generation": { - "type": "string", - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "object": { - "type": "string", - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "bucket", - "object", - "entity" - ], - "request": { - "$ref": "ObjectAccessControl" - }, - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - }, - "update": { - "id": "storage.objectAccessControls.update", - "path": "b/{bucket}/o/{object}/acl/{entity}", - "httpMethod": "PUT", - "description": "Updates an ACL entry on the specified object.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of a bucket.", - "required": true, - "location": "path" - }, - "entity": { - "type": "string", - "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - "required": true, - "location": "path" - }, - "generation": { - "type": "string", - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "object": { - "type": "string", - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "bucket", - "object", - "entity" - ], - "request": { - "$ref": "ObjectAccessControl" - }, - "response": { - "$ref": "ObjectAccessControl" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" - ] - } - } - }, - "objects": { - "methods": { - "compose": { - "id": "storage.objects.compose", - "path": "b/{destinationBucket}/o/{destinationObject}/compose", - "httpMethod": "POST", - "description": "Concatenates a list of existing objects into a new object in the same bucket.", - "parameters": { - "destinationBucket": { - "type": "string", - "description": "Name of the bucket in which to store the new object.", - "required": true, - "location": "path" - }, - "destinationObject": { - "type": "string", - "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "destinationPredefinedAcl": { - "type": "string", - "description": "Apply a predefined set of access controls to the destination object.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query" - }, - "ifGenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current generation matches the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - "format": "int64", - "location": "query" - } - }, - "parameterOrder": [ - "destinationBucket", - "destinationObject" - ], - "request": { - "$ref": "ComposeRequest" - }, - "response": { - "$ref": "Object" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ], - "supportsMediaDownload": true, - "useMediaDownloadService": true - }, - "copy": { - "id": "storage.objects.copy", - "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", - "httpMethod": "POST", - "description": "Copies a source object to a destination object. Optionally overrides metadata.", - "parameters": { - "destinationBucket": { - "type": "string", - "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "destinationObject": { - "type": "string", - "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", - "required": true, - "location": "path" - }, - "destinationPredefinedAcl": { - "type": "string", - "description": "Apply a predefined set of access controls to the destination object.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query" - }, - "ifGenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.", - "format": "int64", - "location": "query" - }, - "ifGenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "ifSourceGenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the source object's generation matches the given value.", - "format": "int64", - "location": "query" - }, - "ifSourceGenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the source object's generation does not match the given value.", - "format": "int64", - "location": "query" - }, - "ifSourceMetagenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifSourceMetagenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the acl property." - ], - "location": "query" - }, - "sourceBucket": { - "type": "string", - "description": "Name of the bucket in which to find the source object.", - "required": true, - "location": "path" - }, - "sourceGeneration": { - "type": "string", - "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "sourceObject": { - "type": "string", - "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "sourceBucket", - "sourceObject", - "destinationBucket", - "destinationObject" - ], - "request": { - "$ref": "Object" - }, - "response": { - "$ref": "Object" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ], - "supportsMediaDownload": true, - "useMediaDownloadService": true - }, - "delete": { - "id": "storage.objects.delete", - "path": "b/{bucket}/o/{object}", - "httpMethod": "DELETE", - "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of the bucket in which the object resides.", - "required": true, - "location": "path" - }, - "generation": { - "type": "string", - "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "ifGenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current generation matches the given value.", - "format": "int64", - "location": "query" - }, - "ifGenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "object": { - "type": "string", - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "bucket", - "object" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "get": { - "id": "storage.objects.get", - "path": "b/{bucket}/o/{object}", - "httpMethod": "GET", - "description": "Retrieves an object or its metadata.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of the bucket in which the object resides.", - "required": true, - "location": "path" - }, - "generation": { - "type": "string", - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "ifGenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's generation matches the given value.", - "format": "int64", - "location": "query" - }, - "ifGenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's generation does not match the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "object": { - "type": "string", - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to noAcl.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the acl property." - ], - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "object" - ], - "response": { - "$ref": "Object" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ], - "supportsMediaDownload": true, - "useMediaDownloadService": true - }, - "insert": { - "id": "storage.objects.insert", - "path": "b/{bucket}/o", - "httpMethod": "POST", - "description": "Stores a new object and metadata.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", - "required": true, - "location": "path" - }, - "contentEncoding": { - "type": "string", - "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", - "location": "query" - }, - "ifGenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current generation matches the given value.", - "format": "int64", - "location": "query" - }, - "ifGenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "name": { - "type": "string", - "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "location": "query" - }, - "predefinedAcl": { - "type": "string", - "description": "Apply a predefined set of access controls to this object.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the acl property." - ], - "location": "query" - } - }, - "parameterOrder": [ - "bucket" - ], - "request": { - "$ref": "Object" - }, - "response": { - "$ref": "Object" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ], - "supportsMediaDownload": true, - "useMediaDownloadService": true, - "supportsMediaUpload": true, - "mediaUpload": { - "accept": [ - "*/*" - ], - "protocols": { - "simple": { - "multipart": true, - "path": "/upload/storage/v1/b/{bucket}/o" - }, - "resumable": { - "multipart": true, - "path": "/resumable/upload/storage/v1/b/{bucket}/o" - } - } - } - }, - "list": { - "id": "storage.objects.list", - "path": "b/{bucket}/o", - "httpMethod": "GET", - "description": "Retrieves a list of objects matching the criteria.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of the bucket in which to look for objects.", - "required": true, - "location": "path" - }, - "delimiter": { - "type": "string", - "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "A previously-returned page token representing part of the larger set of results to view.", - "location": "query" - }, - "prefix": { - "type": "string", - "description": "Filter results to objects whose names begin with this prefix.", - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to noAcl.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the acl property." - ], - "location": "query" - }, - "versions": { - "type": "boolean", - "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket" - ], - "response": { - "$ref": "Objects" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ], - "supportsSubscription": true - }, - "patch": { - "id": "storage.objects.patch", - "path": "b/{bucket}/o/{object}", - "httpMethod": "PATCH", - "description": "Updates an object's metadata. This method supports patch semantics.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of the bucket in which the object resides.", - "required": true, - "location": "path" - }, - "generation": { - "type": "string", - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "ifGenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current generation matches the given value.", - "format": "int64", - "location": "query" - }, - "ifGenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "object": { - "type": "string", - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "predefinedAcl": { - "type": "string", - "description": "Apply a predefined set of access controls to this object.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the acl property." - ], - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "object" - ], - "request": { - "$ref": "Object" - }, - "response": { - "$ref": "Object" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "rewrite": { - "id": "storage.objects.rewrite", - "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", - "httpMethod": "POST", - "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", - "parameters": { - "destinationBucket": { - "type": "string", - "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", - "required": true, - "location": "path" - }, - "destinationObject": { - "type": "string", - "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "destinationPredefinedAcl": { - "type": "string", - "description": "Apply a predefined set of access controls to the destination object.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query" - }, - "ifGenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.", - "format": "int64", - "location": "query" - }, - "ifGenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "ifSourceGenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the source object's generation matches the given value.", - "format": "int64", - "location": "query" - }, - "ifSourceGenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the source object's generation does not match the given value.", - "format": "int64", - "location": "query" - }, - "ifSourceMetagenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifSourceMetagenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "maxBytesRewrittenPerCall": { - "type": "string", - "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", - "format": "int64", - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the acl property." - ], - "location": "query" - }, - "rewriteToken": { - "type": "string", - "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", - "location": "query" - }, - "sourceBucket": { - "type": "string", - "description": "Name of the bucket in which to find the source object.", - "required": true, - "location": "path" - }, - "sourceGeneration": { - "type": "string", - "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "sourceObject": { - "type": "string", - "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - } - }, - "parameterOrder": [ - "sourceBucket", - "sourceObject", - "destinationBucket", - "destinationObject" - ], - "request": { - "$ref": "Object" - }, - "response": { - "$ref": "RewriteResponse" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ] - }, - "update": { - "id": "storage.objects.update", - "path": "b/{bucket}/o/{object}", - "httpMethod": "PUT", - "description": "Updates an object's metadata.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of the bucket in which the object resides.", - "required": true, - "location": "path" - }, - "generation": { - "type": "string", - "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - "format": "int64", - "location": "query" - }, - "ifGenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current generation matches the given value.", - "format": "int64", - "location": "query" - }, - "ifGenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - "format": "int64", - "location": "query" - }, - "ifMetagenerationNotMatch": { - "type": "string", - "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - "format": "int64", - "location": "query" - }, - "object": { - "type": "string", - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - "required": true, - "location": "path" - }, - "predefinedAcl": { - "type": "string", - "description": "Apply a predefined set of access controls to this object.", - "enum": [ - "authenticatedRead", - "bucketOwnerFullControl", - "bucketOwnerRead", - "private", - "projectPrivate", - "publicRead" - ], - "enumDescriptions": [ - "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - "Object owner gets OWNER access, and project team owners get OWNER access.", - "Object owner gets OWNER access, and project team owners get READER access.", - "Object owner gets OWNER access.", - "Object owner gets OWNER access, and project team members get access according to their roles.", - "Object owner gets OWNER access, and allUsers get READER access." - ], - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to full.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the acl property." - ], - "location": "query" - } - }, - "parameterOrder": [ - "bucket", - "object" - ], - "request": { - "$ref": "Object" - }, - "response": { - "$ref": "Object" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" - ], - "supportsMediaDownload": true, - "useMediaDownloadService": true - }, - "watchAll": { - "id": "storage.objects.watchAll", - "path": "b/{bucket}/o/watch", - "httpMethod": "POST", - "description": "Watch for changes on all objects in a bucket.", - "parameters": { - "bucket": { - "type": "string", - "description": "Name of the bucket in which to look for objects.", - "required": true, - "location": "path" - }, - "delimiter": { - "type": "string", - "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.", - "format": "uint32", - "minimum": "0", - "location": "query" - }, - "pageToken": { - "type": "string", - "description": "A previously-returned page token representing part of the larger set of results to view.", - "location": "query" - }, - "prefix": { - "type": "string", - "description": "Filter results to objects whose names begin with this prefix.", - "location": "query" - }, - "projection": { - "type": "string", - "description": "Set of properties to return. Defaults to noAcl.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the acl property." - ], - "location": "query" - }, - "versions": { - "type": "boolean", - "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", - "location": "query" - } - }, - "parameterOrder": [ - "bucket" - ], - "request": { - "$ref": "Channel", - "parameterName": "resource" - }, - "response": { - "$ref": "Channel" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" - ], - "supportsSubscription": true - } - } - } - } -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-gen.go deleted file mode 100644 index a2990443..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ /dev/null @@ -1,7690 +0,0 @@ -// Package storage provides access to the Cloud Storage JSON API. -// -// See https://developers.google.com/storage/docs/json_api/ -// -// Usage example: -// -// import "google.golang.org/api/storage/v1" -// ... -// storageService, err := storage.New(oauthHttpClient) -package storage - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - context "golang.org/x/net/context" - ctxhttp "golang.org/x/net/context/ctxhttp" - gensupport "google.golang.org/api/gensupport" - googleapi "google.golang.org/api/googleapi" - "io" - "net/http" - "net/url" - "strconv" - "strings" -) - -// Always reference these packages, just in case the auto-generated code -// below doesn't. -var _ = bytes.NewBuffer -var _ = strconv.Itoa -var _ = fmt.Sprintf -var _ = json.NewDecoder -var _ = io.Copy -var _ = url.Parse -var _ = gensupport.MarshalJSON -var _ = googleapi.Version -var _ = errors.New -var _ = strings.Replace -var _ = context.Canceled -var _ = ctxhttp.Do - -const apiId = "storage:v1" -const apiName = "storage" -const apiVersion = "v1" -const basePath = "https://www.googleapis.com/storage/v1/" - -// OAuth2 scopes used by this API. -const ( - // View and manage your data across Google Cloud Platform services - CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" - - // View your data across Google Cloud Platform services - CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only" - - // Manage your data and permissions in Google Cloud Storage - DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control" - - // View your data in Google Cloud Storage - DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only" - - // Manage your data in Google Cloud Storage - DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write" -) - -func New(client *http.Client) (*Service, error) { - if client == nil { - return nil, errors.New("client is nil") - } - s := &Service{client: client, BasePath: basePath} - s.BucketAccessControls = NewBucketAccessControlsService(s) - s.Buckets = NewBucketsService(s) - s.Channels = NewChannelsService(s) - s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s) - s.ObjectAccessControls = NewObjectAccessControlsService(s) - s.Objects = NewObjectsService(s) - return s, nil -} - -type Service struct { - client *http.Client - BasePath string // API endpoint base URL - UserAgent string // optional additional User-Agent fragment - - BucketAccessControls *BucketAccessControlsService - - Buckets *BucketsService - - Channels *ChannelsService - - DefaultObjectAccessControls *DefaultObjectAccessControlsService - - ObjectAccessControls *ObjectAccessControlsService - - Objects *ObjectsService -} - -func (s *Service) userAgent() string { - if s.UserAgent == "" { - return googleapi.UserAgent - } - return googleapi.UserAgent + " " + s.UserAgent -} - -func NewBucketAccessControlsService(s *Service) *BucketAccessControlsService { - rs := &BucketAccessControlsService{s: s} - return rs -} - -type BucketAccessControlsService struct { - s *Service -} - -func NewBucketsService(s *Service) *BucketsService { - rs := &BucketsService{s: s} - return rs -} - -type BucketsService struct { - s *Service -} - -func NewChannelsService(s *Service) *ChannelsService { - rs := &ChannelsService{s: s} - return rs -} - -type ChannelsService struct { - s *Service -} - -func NewDefaultObjectAccessControlsService(s *Service) *DefaultObjectAccessControlsService { - rs := &DefaultObjectAccessControlsService{s: s} - return rs -} - -type DefaultObjectAccessControlsService struct { - s *Service -} - -func NewObjectAccessControlsService(s *Service) *ObjectAccessControlsService { - rs := &ObjectAccessControlsService{s: s} - return rs -} - -type ObjectAccessControlsService struct { - s *Service -} - -func NewObjectsService(s *Service) *ObjectsService { - rs := &ObjectsService{s: s} - return rs -} - -type ObjectsService struct { - s *Service -} - -// Bucket: A bucket. -type Bucket struct { - // Acl: Access controls on the bucket. - Acl []*BucketAccessControl `json:"acl,omitempty"` - - // Cors: The bucket's Cross-Origin Resource Sharing (CORS) - // configuration. - Cors []*BucketCors `json:"cors,omitempty"` - - // DefaultObjectAcl: Default access controls to apply to new objects - // when no ACL is provided. - DefaultObjectAcl []*ObjectAccessControl `json:"defaultObjectAcl,omitempty"` - - // Etag: HTTP 1.1 Entity tag for the bucket. - Etag string `json:"etag,omitempty"` - - // Id: The ID of the bucket. - Id string `json:"id,omitempty"` - - // Kind: The kind of item this is. For buckets, this is always - // storage#bucket. - Kind string `json:"kind,omitempty"` - - // Lifecycle: The bucket's lifecycle configuration. See lifecycle - // management for more information. - Lifecycle *BucketLifecycle `json:"lifecycle,omitempty"` - - // Location: The location of the bucket. Object data for objects in the - // bucket resides in physical storage within this region. Defaults to - // US. See the developer's guide for the authoritative list. - Location string `json:"location,omitempty"` - - // Logging: The bucket's logging configuration, which defines the - // destination bucket and optional name prefix for the current bucket's - // logs. - Logging *BucketLogging `json:"logging,omitempty"` - - // Metageneration: The metadata generation of this bucket. - Metageneration int64 `json:"metageneration,omitempty,string"` - - // Name: The name of the bucket. - Name string `json:"name,omitempty"` - - // Owner: The owner of the bucket. This is always the project team's - // owner group. - Owner *BucketOwner `json:"owner,omitempty"` - - // ProjectNumber: The project number of the project the bucket belongs - // to. - ProjectNumber uint64 `json:"projectNumber,omitempty,string"` - - // SelfLink: The URI of this bucket. - SelfLink string `json:"selfLink,omitempty"` - - // StorageClass: The bucket's storage class. This defines how objects in - // the bucket are stored and determines the SLA and the cost of storage. - // Values include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY. - // Defaults to STANDARD. For more information, see storage classes. - StorageClass string `json:"storageClass,omitempty"` - - // TimeCreated: The creation time of the bucket in RFC 3339 format. - TimeCreated string `json:"timeCreated,omitempty"` - - // Updated: The modification time of the bucket in RFC 3339 format. - Updated string `json:"updated,omitempty"` - - // Versioning: The bucket's versioning configuration. - Versioning *BucketVersioning `json:"versioning,omitempty"` - - // Website: The bucket's website configuration. - Website *BucketWebsite `json:"website,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Acl") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *Bucket) MarshalJSON() ([]byte, error) { - type noMethod Bucket - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -type BucketCors struct { - // MaxAgeSeconds: The value, in seconds, to return in the - // Access-Control-Max-Age header used in preflight responses. - MaxAgeSeconds int64 `json:"maxAgeSeconds,omitempty"` - - // Method: The list of HTTP methods on which to include CORS response - // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list - // of methods, and means "any method". - Method []string `json:"method,omitempty"` - - // Origin: The list of Origins eligible to receive CORS response - // headers. Note: "*" is permitted in the list of origins, and means - // "any Origin". - Origin []string `json:"origin,omitempty"` - - // ResponseHeader: The list of HTTP headers other than the simple - // response headers to give permission for the user-agent to share - // across domains. - ResponseHeader []string `json:"responseHeader,omitempty"` - - // ForceSendFields is a list of field names (e.g. "MaxAgeSeconds") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *BucketCors) MarshalJSON() ([]byte, error) { - type noMethod BucketCors - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// BucketLifecycle: The bucket's lifecycle configuration. See lifecycle -// management for more information. -type BucketLifecycle struct { - // Rule: A lifecycle management rule, which is made of an action to take - // and the condition(s) under which the action will be taken. - Rule []*BucketLifecycleRule `json:"rule,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Rule") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *BucketLifecycle) MarshalJSON() ([]byte, error) { - type noMethod BucketLifecycle - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -type BucketLifecycleRule struct { - // Action: The action to take. - Action *BucketLifecycleRuleAction `json:"action,omitempty"` - - // Condition: The condition(s) under which the action will be taken. - Condition *BucketLifecycleRuleCondition `json:"condition,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Action") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *BucketLifecycleRule) MarshalJSON() ([]byte, error) { - type noMethod BucketLifecycleRule - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// BucketLifecycleRuleAction: The action to take. -type BucketLifecycleRuleAction struct { - // Type: Type of the action. Currently, only Delete is supported. - Type string `json:"type,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Type") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) { - type noMethod BucketLifecycleRuleAction - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// BucketLifecycleRuleCondition: The condition(s) under which the action -// will be taken. -type BucketLifecycleRuleCondition struct { - // Age: Age of an object (in days). This condition is satisfied when an - // object reaches the specified age. - Age int64 `json:"age,omitempty"` - - // CreatedBefore: A date in RFC 3339 format with only the date part (for - // instance, "2013-01-15"). This condition is satisfied when an object - // is created before midnight of the specified date in UTC. - CreatedBefore string `json:"createdBefore,omitempty"` - - // IsLive: Relevant only for versioned objects. If the value is true, - // this condition matches live objects; if the value is false, it - // matches archived objects. - IsLive bool `json:"isLive,omitempty"` - - // NumNewerVersions: Relevant only for versioned objects. If the value - // is N, this condition is satisfied when there are at least N versions - // (including the live version) newer than this version of the object. - NumNewerVersions int64 `json:"numNewerVersions,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Age") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) { - type noMethod BucketLifecycleRuleCondition - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// BucketLogging: The bucket's logging configuration, which defines the -// destination bucket and optional name prefix for the current bucket's -// logs. -type BucketLogging struct { - // LogBucket: The destination bucket where the current bucket's logs - // should be placed. - LogBucket string `json:"logBucket,omitempty"` - - // LogObjectPrefix: A prefix for log object names. - LogObjectPrefix string `json:"logObjectPrefix,omitempty"` - - // ForceSendFields is a list of field names (e.g. "LogBucket") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *BucketLogging) MarshalJSON() ([]byte, error) { - type noMethod BucketLogging - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// BucketOwner: The owner of the bucket. This is always the project -// team's owner group. -type BucketOwner struct { - // Entity: The entity, in the form project-owner-projectId. - Entity string `json:"entity,omitempty"` - - // EntityId: The ID for the entity. - EntityId string `json:"entityId,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Entity") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *BucketOwner) MarshalJSON() ([]byte, error) { - type noMethod BucketOwner - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// BucketVersioning: The bucket's versioning configuration. -type BucketVersioning struct { - // Enabled: While set to true, versioning is fully enabled for this - // bucket. - Enabled bool `json:"enabled,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Enabled") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *BucketVersioning) MarshalJSON() ([]byte, error) { - type noMethod BucketVersioning - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// BucketWebsite: The bucket's website configuration. -type BucketWebsite struct { - // MainPageSuffix: Behaves as the bucket's directory index where missing - // objects are treated as potential directories. - MainPageSuffix string `json:"mainPageSuffix,omitempty"` - - // NotFoundPage: The custom object to return when a requested resource - // is not found. - NotFoundPage string `json:"notFoundPage,omitempty"` - - // ForceSendFields is a list of field names (e.g. "MainPageSuffix") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *BucketWebsite) MarshalJSON() ([]byte, error) { - type noMethod BucketWebsite - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// BucketAccessControl: An access-control entry. -type BucketAccessControl struct { - // Bucket: The name of the bucket. - Bucket string `json:"bucket,omitempty"` - - // Domain: The domain associated with the entity, if any. - Domain string `json:"domain,omitempty"` - - // Email: The email address associated with the entity, if any. - Email string `json:"email,omitempty"` - - // Entity: The entity holding the permission, in one of the following - // forms: - // - user-userId - // - user-email - // - group-groupId - // - group-email - // - domain-domain - // - project-team-projectId - // - allUsers - // - allAuthenticatedUsers Examples: - // - The user liz@example.com would be user-liz@example.com. - // - The group example@googlegroups.com would be - // group-example@googlegroups.com. - // - To refer to all members of the Google Apps for Business domain - // example.com, the entity would be domain-example.com. - Entity string `json:"entity,omitempty"` - - // EntityId: The ID for the entity, if any. - EntityId string `json:"entityId,omitempty"` - - // Etag: HTTP 1.1 Entity tag for the access-control entry. - Etag string `json:"etag,omitempty"` - - // Id: The ID of the access-control entry. - Id string `json:"id,omitempty"` - - // Kind: The kind of item this is. For bucket access control entries, - // this is always storage#bucketAccessControl. - Kind string `json:"kind,omitempty"` - - // ProjectTeam: The project team associated with the entity, if any. - ProjectTeam *BucketAccessControlProjectTeam `json:"projectTeam,omitempty"` - - // Role: The access permission for the entity. Can be READER, WRITER, or - // OWNER. - Role string `json:"role,omitempty"` - - // SelfLink: The link to this access-control entry. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Bucket") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *BucketAccessControl) MarshalJSON() ([]byte, error) { - type noMethod BucketAccessControl - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// BucketAccessControlProjectTeam: The project team associated with the -// entity, if any. -type BucketAccessControlProjectTeam struct { - // ProjectNumber: The project number. - ProjectNumber string `json:"projectNumber,omitempty"` - - // Team: The team. Can be owners, editors, or viewers. - Team string `json:"team,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ProjectNumber") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) { - type noMethod BucketAccessControlProjectTeam - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// BucketAccessControls: An access-control list. -type BucketAccessControls struct { - // Items: The list of items. - Items []*BucketAccessControl `json:"items,omitempty"` - - // Kind: The kind of item this is. For lists of bucket access control - // entries, this is always storage#bucketAccessControls. - Kind string `json:"kind,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *BucketAccessControls) MarshalJSON() ([]byte, error) { - type noMethod BucketAccessControls - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// Buckets: A list of buckets. -type Buckets struct { - // Items: The list of items. - Items []*Bucket `json:"items,omitempty"` - - // Kind: The kind of item this is. For lists of buckets, this is always - // storage#buckets. - Kind string `json:"kind,omitempty"` - - // NextPageToken: The continuation token, used to page through large - // result sets. Provide this value in a subsequent request to return the - // next page of results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *Buckets) MarshalJSON() ([]byte, error) { - type noMethod Buckets - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// Channel: An notification channel used to watch for resource changes. -type Channel struct { - // Address: The address where notifications are delivered for this - // channel. - Address string `json:"address,omitempty"` - - // Expiration: Date and time of notification channel expiration, - // expressed as a Unix timestamp, in milliseconds. Optional. - Expiration int64 `json:"expiration,omitempty,string"` - - // Id: A UUID or similar unique string that identifies this channel. - Id string `json:"id,omitempty"` - - // Kind: Identifies this as a notification channel used to watch for - // changes to a resource. Value: the fixed string "api#channel". - Kind string `json:"kind,omitempty"` - - // Params: Additional parameters controlling delivery channel behavior. - // Optional. - Params map[string]string `json:"params,omitempty"` - - // Payload: A Boolean value to indicate whether payload is wanted. - // Optional. - Payload bool `json:"payload,omitempty"` - - // ResourceId: An opaque ID that identifies the resource being watched - // on this channel. Stable across different API versions. - ResourceId string `json:"resourceId,omitempty"` - - // ResourceUri: A version-specific identifier for the watched resource. - ResourceUri string `json:"resourceUri,omitempty"` - - // Token: An arbitrary string delivered to the target address with each - // notification delivered over this channel. Optional. - Token string `json:"token,omitempty"` - - // Type: The type of delivery mechanism used for this channel. - Type string `json:"type,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Address") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *Channel) MarshalJSON() ([]byte, error) { - type noMethod Channel - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// ComposeRequest: A Compose request. -type ComposeRequest struct { - // Destination: Properties of the resulting object. - Destination *Object `json:"destination,omitempty"` - - // Kind: The kind of item this is. - Kind string `json:"kind,omitempty"` - - // SourceObjects: The list of source objects that will be concatenated - // into a single object. - SourceObjects []*ComposeRequestSourceObjects `json:"sourceObjects,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Destination") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *ComposeRequest) MarshalJSON() ([]byte, error) { - type noMethod ComposeRequest - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -type ComposeRequestSourceObjects struct { - // Generation: The generation of this object to use as the source. - Generation int64 `json:"generation,omitempty,string"` - - // Name: The source object's name. The source object's bucket is - // implicitly the destination bucket. - Name string `json:"name,omitempty"` - - // ObjectPreconditions: Conditions that must be met for this operation - // to execute. - ObjectPreconditions *ComposeRequestSourceObjectsObjectPreconditions `json:"objectPreconditions,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Generation") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) { - type noMethod ComposeRequestSourceObjects - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// ComposeRequestSourceObjectsObjectPreconditions: Conditions that must -// be met for this operation to execute. -type ComposeRequestSourceObjectsObjectPreconditions struct { - // IfGenerationMatch: Only perform the composition if the generation of - // the source object that would be used matches this value. If this - // value and a generation are both specified, they must be the same - // value or the call will fail. - IfGenerationMatch int64 `json:"ifGenerationMatch,omitempty,string"` - - // ForceSendFields is a list of field names (e.g. "IfGenerationMatch") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) { - type noMethod ComposeRequestSourceObjectsObjectPreconditions - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// Object: An object. -type Object struct { - // Acl: Access controls on the object. - Acl []*ObjectAccessControl `json:"acl,omitempty"` - - // Bucket: The name of the bucket containing this object. - Bucket string `json:"bucket,omitempty"` - - // CacheControl: Cache-Control directive for the object data. - CacheControl string `json:"cacheControl,omitempty"` - - // ComponentCount: Number of underlying components that make up this - // object. Components are accumulated by compose operations. - ComponentCount int64 `json:"componentCount,omitempty"` - - // ContentDisposition: Content-Disposition of the object data. - ContentDisposition string `json:"contentDisposition,omitempty"` - - // ContentEncoding: Content-Encoding of the object data. - ContentEncoding string `json:"contentEncoding,omitempty"` - - // ContentLanguage: Content-Language of the object data. - ContentLanguage string `json:"contentLanguage,omitempty"` - - // ContentType: Content-Type of the object data. - ContentType string `json:"contentType,omitempty"` - - // Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; - // encoded using base64 in big-endian byte order. For more information - // about using the CRC32c checksum, see Hashes and ETags: Best - // Practices. - Crc32c string `json:"crc32c,omitempty"` - - // CustomerEncryption: Metadata of customer-supplied encryption key, if - // the object is encrypted by such a key. - CustomerEncryption *ObjectCustomerEncryption `json:"customerEncryption,omitempty"` - - // Etag: HTTP 1.1 Entity tag for the object. - Etag string `json:"etag,omitempty"` - - // Generation: The content generation of this object. Used for object - // versioning. - Generation int64 `json:"generation,omitempty,string"` - - // Id: The ID of the object. - Id string `json:"id,omitempty"` - - // Kind: The kind of item this is. For objects, this is always - // storage#object. - Kind string `json:"kind,omitempty"` - - // Md5Hash: MD5 hash of the data; encoded using base64. For more - // information about using the MD5 hash, see Hashes and ETags: Best - // Practices. - Md5Hash string `json:"md5Hash,omitempty"` - - // MediaLink: Media download link. - MediaLink string `json:"mediaLink,omitempty"` - - // Metadata: User-provided metadata, in key/value pairs. - Metadata map[string]string `json:"metadata,omitempty"` - - // Metageneration: The version of the metadata for this object at this - // generation. Used for preconditions and for detecting changes in - // metadata. A metageneration number is only meaningful in the context - // of a particular generation of a particular object. - Metageneration int64 `json:"metageneration,omitempty,string"` - - // Name: The name of this object. Required if not specified by URL - // parameter. - Name string `json:"name,omitempty"` - - // Owner: The owner of the object. This will always be the uploader of - // the object. - Owner *ObjectOwner `json:"owner,omitempty"` - - // SelfLink: The link to this object. - SelfLink string `json:"selfLink,omitempty"` - - // Size: Content-Length of the data in bytes. - Size uint64 `json:"size,omitempty,string"` - - // StorageClass: Storage class of the object. - StorageClass string `json:"storageClass,omitempty"` - - // TimeCreated: The creation time of the object in RFC 3339 format. - TimeCreated string `json:"timeCreated,omitempty"` - - // TimeDeleted: The deletion time of the object in RFC 3339 format. Will - // be returned if and only if this version of the object has been - // deleted. - TimeDeleted string `json:"timeDeleted,omitempty"` - - // Updated: The modification time of the object metadata in RFC 3339 - // format. - Updated string `json:"updated,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Acl") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *Object) MarshalJSON() ([]byte, error) { - type noMethod Object - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// ObjectCustomerEncryption: Metadata of customer-supplied encryption -// key, if the object is encrypted by such a key. -type ObjectCustomerEncryption struct { - // EncryptionAlgorithm: The encryption algorithm. - EncryptionAlgorithm string `json:"encryptionAlgorithm,omitempty"` - - // KeySha256: SHA256 hash value of the encryption key. - KeySha256 string `json:"keySha256,omitempty"` - - // ForceSendFields is a list of field names (e.g. "EncryptionAlgorithm") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *ObjectCustomerEncryption) MarshalJSON() ([]byte, error) { - type noMethod ObjectCustomerEncryption - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// ObjectOwner: The owner of the object. This will always be the -// uploader of the object. -type ObjectOwner struct { - // Entity: The entity, in the form user-userId. - Entity string `json:"entity,omitempty"` - - // EntityId: The ID for the entity. - EntityId string `json:"entityId,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Entity") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *ObjectOwner) MarshalJSON() ([]byte, error) { - type noMethod ObjectOwner - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// ObjectAccessControl: An access-control entry. -type ObjectAccessControl struct { - // Bucket: The name of the bucket. - Bucket string `json:"bucket,omitempty"` - - // Domain: The domain associated with the entity, if any. - Domain string `json:"domain,omitempty"` - - // Email: The email address associated with the entity, if any. - Email string `json:"email,omitempty"` - - // Entity: The entity holding the permission, in one of the following - // forms: - // - user-userId - // - user-email - // - group-groupId - // - group-email - // - domain-domain - // - project-team-projectId - // - allUsers - // - allAuthenticatedUsers Examples: - // - The user liz@example.com would be user-liz@example.com. - // - The group example@googlegroups.com would be - // group-example@googlegroups.com. - // - To refer to all members of the Google Apps for Business domain - // example.com, the entity would be domain-example.com. - Entity string `json:"entity,omitempty"` - - // EntityId: The ID for the entity, if any. - EntityId string `json:"entityId,omitempty"` - - // Etag: HTTP 1.1 Entity tag for the access-control entry. - Etag string `json:"etag,omitempty"` - - // Generation: The content generation of the object. - Generation int64 `json:"generation,omitempty,string"` - - // Id: The ID of the access-control entry. - Id string `json:"id,omitempty"` - - // Kind: The kind of item this is. For object access control entries, - // this is always storage#objectAccessControl. - Kind string `json:"kind,omitempty"` - - // Object: The name of the object. - Object string `json:"object,omitempty"` - - // ProjectTeam: The project team associated with the entity, if any. - ProjectTeam *ObjectAccessControlProjectTeam `json:"projectTeam,omitempty"` - - // Role: The access permission for the entity. Can be READER or OWNER. - Role string `json:"role,omitempty"` - - // SelfLink: The link to this access-control entry. - SelfLink string `json:"selfLink,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Bucket") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *ObjectAccessControl) MarshalJSON() ([]byte, error) { - type noMethod ObjectAccessControl - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// ObjectAccessControlProjectTeam: The project team associated with the -// entity, if any. -type ObjectAccessControlProjectTeam struct { - // ProjectNumber: The project number. - ProjectNumber string `json:"projectNumber,omitempty"` - - // Team: The team. Can be owners, editors, or viewers. - Team string `json:"team,omitempty"` - - // ForceSendFields is a list of field names (e.g. "ProjectNumber") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) { - type noMethod ObjectAccessControlProjectTeam - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// ObjectAccessControls: An access-control list. -type ObjectAccessControls struct { - // Items: The list of items. - Items []interface{} `json:"items,omitempty"` - - // Kind: The kind of item this is. For lists of object access control - // entries, this is always storage#objectAccessControls. - Kind string `json:"kind,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *ObjectAccessControls) MarshalJSON() ([]byte, error) { - type noMethod ObjectAccessControls - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// Objects: A list of objects. -type Objects struct { - // Items: The list of items. - Items []*Object `json:"items,omitempty"` - - // Kind: The kind of item this is. For lists of objects, this is always - // storage#objects. - Kind string `json:"kind,omitempty"` - - // NextPageToken: The continuation token, used to page through large - // result sets. Provide this value in a subsequent request to return the - // next page of results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // Prefixes: The list of prefixes of objects matching-but-not-listed up - // to and including the requested delimiter. - Prefixes []string `json:"prefixes,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Items") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *Objects) MarshalJSON() ([]byte, error) { - type noMethod Objects - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// RewriteResponse: A rewrite response. -type RewriteResponse struct { - // Done: true if the copy is finished; otherwise, false if the copy is - // in progress. This property is always present in the response. - Done bool `json:"done,omitempty"` - - // Kind: The kind of item this is. - Kind string `json:"kind,omitempty"` - - // ObjectSize: The total size of the object being copied in bytes. This - // property is always present in the response. - ObjectSize uint64 `json:"objectSize,omitempty,string"` - - // Resource: A resource containing the metadata for the copied-to - // object. This property is present in the response only when copying - // completes. - Resource *Object `json:"resource,omitempty"` - - // RewriteToken: A token to use in subsequent requests to continue - // copying data. This token is present in the response only when there - // is more data to copy. - RewriteToken string `json:"rewriteToken,omitempty"` - - // TotalBytesRewritten: The total bytes written so far, which can be - // used to provide a waiting user with a progress indicator. This - // property is always present in the response. - TotalBytesRewritten uint64 `json:"totalBytesRewritten,omitempty,string"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Done") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *RewriteResponse) MarshalJSON() ([]byte, error) { - type noMethod RewriteResponse - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - -// method id "storage.bucketAccessControls.delete": - -type BucketAccessControlsDeleteCall struct { - s *Service - bucket string - entity string - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Delete: Permanently deletes the ACL entry for the specified entity on -// the specified bucket. -func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall { - c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAccessControlsDeleteCall { - c.ctx_ = ctx - return c -} - -func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.bucketAccessControls.delete" call. -func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return err - } - return nil - // { - // "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", - // "httpMethod": "DELETE", - // "id": "storage.bucketAccessControls.delete", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl/{entity}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.bucketAccessControls.get": - -type BucketAccessControlsGetCall struct { - s *Service - bucket string - entity string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context -} - -// Get: Returns the ACL entry for the specified entity on the specified -// bucket. -func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall { - c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BucketAccessControlsGetCall) IfNoneMatch(entityTag string) *BucketAccessControlsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccessControlsGetCall { - c.ctx_ = ctx - return c -} - -func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.bucketAccessControls.get" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BucketAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the ACL entry for the specified entity on the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.bucketAccessControls.get", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl/{entity}", - // "response": { - // "$ref": "BucketAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.bucketAccessControls.insert": - -type BucketAccessControlsInsertCall struct { - s *Service - bucket string - bucketaccesscontrol *BucketAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Insert: Creates a new ACL entry on the specified bucket. -func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall { - c := &BucketAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.bucketaccesscontrol = bucketaccesscontrol - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAccessControlsInsertCall { - c.ctx_ = ctx - return c -} - -func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) - if err != nil { - return nil, err - } - ctype := "application/json" - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.bucketAccessControls.insert" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BucketAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a new ACL entry on the specified bucket.", - // "httpMethod": "POST", - // "id": "storage.bucketAccessControls.insert", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl", - // "request": { - // "$ref": "BucketAccessControl" - // }, - // "response": { - // "$ref": "BucketAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.bucketAccessControls.list": - -type BucketAccessControlsListCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context -} - -// List: Retrieves ACL entries on the specified bucket. -func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall { - c := &BucketAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BucketAccessControlsListCall) IfNoneMatch(entityTag string) *BucketAccessControlsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAccessControlsListCall { - c.ctx_ = ctx - return c -} - -func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.bucketAccessControls.list" call. -// Exactly one of *BucketAccessControls or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControls.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*BucketAccessControls, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BucketAccessControls{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves ACL entries on the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.bucketAccessControls.list", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl", - // "response": { - // "$ref": "BucketAccessControls" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.bucketAccessControls.patch": - -type BucketAccessControlsPatchCall struct { - s *Service - bucket string - entity string - bucketaccesscontrol *BucketAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Patch: Updates an ACL entry on the specified bucket. This method -// supports patch semantics. -func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall { - c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - c.bucketaccesscontrol = bucketaccesscontrol - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAccessControlsPatchCall { - c.ctx_ = ctx - return c -} - -func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) - if err != nil { - return nil, err - } - ctype := "application/json" - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.bucketAccessControls.patch" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BucketAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "storage.bucketAccessControls.patch", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl/{entity}", - // "request": { - // "$ref": "BucketAccessControl" - // }, - // "response": { - // "$ref": "BucketAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.bucketAccessControls.update": - -type BucketAccessControlsUpdateCall struct { - s *Service - bucket string - entity string - bucketaccesscontrol *BucketAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Update: Updates an ACL entry on the specified bucket. -func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall { - c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - c.bucketaccesscontrol = bucketaccesscontrol - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAccessControlsUpdateCall { - c.ctx_ = ctx - return c -} - -func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) - if err != nil { - return nil, err - } - ctype := "application/json" - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.bucketAccessControls.update" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &BucketAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an ACL entry on the specified bucket.", - // "httpMethod": "PUT", - // "id": "storage.bucketAccessControls.update", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/acl/{entity}", - // "request": { - // "$ref": "BucketAccessControl" - // }, - // "response": { - // "$ref": "BucketAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.buckets.delete": - -type BucketsDeleteCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Delete: Permanently deletes an empty bucket. -func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall { - c := &BucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": If set, only deletes the bucket if its -// metageneration matches this value. -func (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsDeleteCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": If set, only deletes the bucket if its -// metageneration does not match this value. -func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsDeleteCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall { - c.ctx_ = ctx - return c -} - -func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.buckets.delete" call. -func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return err - } - return nil - // { - // "description": "Permanently deletes an empty bucket.", - // "httpMethod": "DELETE", - // "id": "storage.buckets.delete", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "If set, only deletes the bucket if its metageneration matches this value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "If set, only deletes the bucket if its metageneration does not match this value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.buckets.get": - -type BucketsGetCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context -} - -// Get: Returns metadata for the specified bucket. -func (r *BucketsService) Get(bucket string) *BucketsGetCall { - c := &BucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration matches -// the given value. -func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsGetCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration does not -// match the given value. -func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsGetCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit acl and defaultObjectAcl properties. -func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { - c.urlParams_.Set("projection", projection) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BucketsGetCall) IfNoneMatch(entityTag string) *BucketsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall { - c.ctx_ = ctx - return c -} - -func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.buckets.get" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Bucket{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns metadata for the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.buckets.get", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}", - // "response": { - // "$ref": "Bucket" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.buckets.insert": - -type BucketsInsertCall struct { - s *Service - bucket *Bucket - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Insert: Creates a new bucket. -func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsertCall { - c := &BucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.urlParams_.Set("project", projectid) - c.bucket = bucket - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this bucket. -// -// Possible values: -// "authenticatedRead" - Project team owners get OWNER access, and -// allAuthenticatedUsers get READER access. -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to -// their roles. -// "publicRead" - Project team owners get OWNER access, and allUsers -// get READER access. -// "publicReadWrite" - Project team owners get OWNER access, and -// allUsers get WRITER access. -func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// PredefinedDefaultObjectAcl sets the optional parameter -// "predefinedDefaultObjectAcl": Apply a predefined set of default -// object access controls to this bucket. -// -// Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and -// allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get -// READER access. -func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall { - c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the bucket resource -// specifies acl or defaultObjectAcl properties, when it defaults to -// full. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit acl and defaultObjectAcl properties. -func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { - c.urlParams_.Set("projection", projection) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall { - c.ctx_ = ctx - return c -} - -func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) - if err != nil { - return nil, err - } - ctype := "application/json" - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - googleapi.SetOpaque(req.URL) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.buckets.insert" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Bucket{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a new bucket.", - // "httpMethod": "POST", - // "id": "storage.buckets.insert", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "private", - // "projectPrivate", - // "publicRead", - // "publicReadWrite" - // ], - // "enumDescriptions": [ - // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - // "Project team owners get OWNER access.", - // "Project team members get access according to their roles.", - // "Project team owners get OWNER access, and allUsers get READER access.", - // "Project team owners get OWNER access, and allUsers get WRITER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "predefinedDefaultObjectAcl": { - // "description": "Apply a predefined set of default object access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "A valid API project identifier.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b", - // "request": { - // "$ref": "Bucket" - // }, - // "response": { - // "$ref": "Bucket" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.buckets.list": - -type BucketsListCall struct { - s *Service - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context -} - -// List: Retrieves a list of buckets for a given project. -func (r *BucketsService) List(projectid string) *BucketsListCall { - c := &BucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.urlParams_.Set("project", projectid) - return c -} - -// MaxResults sets the optional parameter "maxResults": Maximum number -// of buckets to return. -func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set of -// results to view. -func (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Prefix sets the optional parameter "prefix": Filter results to -// buckets whose names begin with this prefix. -func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall { - c.urlParams_.Set("prefix", prefix) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit acl and defaultObjectAcl properties. -func (c *BucketsListCall) Projection(projection string) *BucketsListCall { - c.urlParams_.Set("projection", projection) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BucketsListCall) IfNoneMatch(entityTag string) *BucketsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall { - c.ctx_ = ctx - return c -} - -func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - googleapi.SetOpaque(req.URL) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.buckets.list" call. -// Exactly one of *Buckets or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Buckets.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Buckets{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of buckets for a given project.", - // "httpMethod": "GET", - // "id": "storage.buckets.list", - // "parameterOrder": [ - // "project" - // ], - // "parameters": { - // "maxResults": { - // "description": "Maximum number of buckets to return.", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of results to view.", - // "location": "query", - // "type": "string" - // }, - // "prefix": { - // "description": "Filter results to buckets whose names begin with this prefix.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "A valid API project identifier.", - // "location": "query", - // "required": true, - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b", - // "response": { - // "$ref": "Buckets" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *BucketsListCall) Pages(ctx context.Context, f func(*Buckets) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "storage.buckets.patch": - -type BucketsPatchCall struct { - s *Service - bucket string - bucket2 *Bucket - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Patch: Updates a bucket. This method supports patch semantics. -func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall { - c := &BucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.bucket2 = bucket2 - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration matches -// the given value. -func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsPatchCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration does not -// match the given value. -func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsPatchCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this bucket. -// -// Possible values: -// "authenticatedRead" - Project team owners get OWNER access, and -// allAuthenticatedUsers get READER access. -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to -// their roles. -// "publicRead" - Project team owners get OWNER access, and allUsers -// get READER access. -// "publicReadWrite" - Project team owners get OWNER access, and -// allUsers get WRITER access. -func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// PredefinedDefaultObjectAcl sets the optional parameter -// "predefinedDefaultObjectAcl": Apply a predefined set of default -// object access controls to this bucket. -// -// Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and -// allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get -// READER access. -func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall { - c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit acl and defaultObjectAcl properties. -func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { - c.urlParams_.Set("projection", projection) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall { - c.ctx_ = ctx - return c -} - -func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) - if err != nil { - return nil, err - } - ctype := "application/json" - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.buckets.patch" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Bucket{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a bucket. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "storage.buckets.patch", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "private", - // "projectPrivate", - // "publicRead", - // "publicReadWrite" - // ], - // "enumDescriptions": [ - // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - // "Project team owners get OWNER access.", - // "Project team members get access according to their roles.", - // "Project team owners get OWNER access, and allUsers get READER access.", - // "Project team owners get OWNER access, and allUsers get WRITER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "predefinedDefaultObjectAcl": { - // "description": "Apply a predefined set of default object access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}", - // "request": { - // "$ref": "Bucket" - // }, - // "response": { - // "$ref": "Bucket" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.buckets.update": - -type BucketsUpdateCall struct { - s *Service - bucket string - bucket2 *Bucket - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Update: Updates a bucket. -func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall { - c := &BucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.bucket2 = bucket2 - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration matches -// the given value. -func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsUpdateCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration does not -// match the given value. -func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsUpdateCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this bucket. -// -// Possible values: -// "authenticatedRead" - Project team owners get OWNER access, and -// allAuthenticatedUsers get READER access. -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to -// their roles. -// "publicRead" - Project team owners get OWNER access, and allUsers -// get READER access. -// "publicReadWrite" - Project team owners get OWNER access, and -// allUsers get WRITER access. -func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// PredefinedDefaultObjectAcl sets the optional parameter -// "predefinedDefaultObjectAcl": Apply a predefined set of default -// object access controls to this bucket. -// -// Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and -// allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get -// READER access. -func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall { - c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit acl and defaultObjectAcl properties. -func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { - c.urlParams_.Set("projection", projection) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall { - c.ctx_ = ctx - return c -} - -func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) - if err != nil { - return nil, err - } - ctype := "application/json" - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.buckets.update" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Bucket{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a bucket.", - // "httpMethod": "PUT", - // "id": "storage.buckets.update", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "private", - // "projectPrivate", - // "publicRead", - // "publicReadWrite" - // ], - // "enumDescriptions": [ - // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - // "Project team owners get OWNER access.", - // "Project team members get access according to their roles.", - // "Project team owners get OWNER access, and allUsers get READER access.", - // "Project team owners get OWNER access, and allUsers get WRITER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "predefinedDefaultObjectAcl": { - // "description": "Apply a predefined set of default object access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}", - // "request": { - // "$ref": "Bucket" - // }, - // "response": { - // "$ref": "Bucket" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.channels.stop": - -type ChannelsStopCall struct { - s *Service - channel *Channel - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Stop: Stop watching resources through this channel -func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall { - c := &ChannelsStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.channel = channel - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall { - c.ctx_ = ctx - return c -} - -func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) - if err != nil { - return nil, err - } - ctype := "application/json" - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - googleapi.SetOpaque(req.URL) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.channels.stop" call. -func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return err - } - return nil - // { - // "description": "Stop watching resources through this channel", - // "httpMethod": "POST", - // "id": "storage.channels.stop", - // "path": "channels/stop", - // "request": { - // "$ref": "Channel", - // "parameterName": "resource" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.defaultObjectAccessControls.delete": - -type DefaultObjectAccessControlsDeleteCall struct { - s *Service - bucket string - entity string - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Delete: Permanently deletes the default object ACL entry for the -// specified entity on the specified bucket. -func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall { - c := &DefaultObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *DefaultObjectAccessControlsDeleteCall { - c.ctx_ = ctx - return c -} - -func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.defaultObjectAccessControls.delete" call. -func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return err - } - return nil - // { - // "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", - // "httpMethod": "DELETE", - // "id": "storage.defaultObjectAccessControls.delete", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.defaultObjectAccessControls.get": - -type DefaultObjectAccessControlsGetCall struct { - s *Service - bucket string - entity string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context -} - -// Get: Returns the default object ACL entry for the specified entity on -// the specified bucket. -func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall { - c := &DefaultObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DefaultObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *DefaultObjectAccessControlsGetCall { - c.ctx_ = ctx - return c -} - -func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.defaultObjectAccessControls.get" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.defaultObjectAccessControls.get", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.defaultObjectAccessControls.insert": - -type DefaultObjectAccessControlsInsertCall struct { - s *Service - bucket string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Insert: Creates a new default object ACL entry on the specified -// bucket. -func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall { - c := &DefaultObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.objectaccesscontrol = objectaccesscontrol - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DefaultObjectAccessControlsInsertCall) Context(ctx context.Context) *DefaultObjectAccessControlsInsertCall { - c.ctx_ = ctx - return c -} - -func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err - } - ctype := "application/json" - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.defaultObjectAccessControls.insert" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a new default object ACL entry on the specified bucket.", - // "httpMethod": "POST", - // "id": "storage.defaultObjectAccessControls.insert", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.defaultObjectAccessControls.list": - -type DefaultObjectAccessControlsListCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context -} - -// List: Retrieves default object ACL entries on the specified bucket. -func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectAccessControlsListCall { - c := &DefaultObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": If present, only return default ACL listing -// if the bucket's current metageneration matches this value. -func (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *DefaultObjectAccessControlsListCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": If present, only return default ACL -// listing if the bucket's current metageneration does not match the -// given value. -func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *DefaultObjectAccessControlsListCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DefaultObjectAccessControlsListCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *DefaultObjectAccessControlsListCall { - c.ctx_ = ctx - return c -} - -func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.defaultObjectAccessControls.list" call. -// Exactly one of *ObjectAccessControls or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControls.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ObjectAccessControls{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves default object ACL entries on the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.defaultObjectAccessControls.list", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl", - // "response": { - // "$ref": "ObjectAccessControls" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.defaultObjectAccessControls.patch": - -type DefaultObjectAccessControlsPatchCall struct { - s *Service - bucket string - entity string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Patch: Updates a default object ACL entry on the specified bucket. -// This method supports patch semantics. -func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall { - c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - c.objectaccesscontrol = objectaccesscontrol - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DefaultObjectAccessControlsPatchCall) Context(ctx context.Context) *DefaultObjectAccessControlsPatchCall { - c.ctx_ = ctx - return c -} - -func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err - } - ctype := "application/json" - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.defaultObjectAccessControls.patch" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a default object ACL entry on the specified bucket. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "storage.defaultObjectAccessControls.patch", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.defaultObjectAccessControls.update": - -type DefaultObjectAccessControlsUpdateCall struct { - s *Service - bucket string - entity string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Update: Updates a default object ACL entry on the specified bucket. -func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall { - c := &DefaultObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity - c.objectaccesscontrol = objectaccesscontrol - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *DefaultObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *DefaultObjectAccessControlsUpdateCall) Context(ctx context.Context) *DefaultObjectAccessControlsUpdateCall { - c.ctx_ = ctx - return c -} - -func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err - } - ctype := "application/json" - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.defaultObjectAccessControls.update" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates a default object ACL entry on the specified bucket.", - // "httpMethod": "PUT", - // "id": "storage.defaultObjectAccessControls.update", - // "parameterOrder": [ - // "bucket", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.objectAccessControls.delete": - -type ObjectAccessControlsDeleteCall struct { - s *Service - bucket string - object string - entity string - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Delete: Permanently deletes the ACL entry for the specified entity on -// the specified object. -func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { - c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.entity = entity - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAccessControlsDeleteCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAccessControlsDeleteCall { - c.ctx_ = ctx - return c -} - -func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - "entity": c.entity, - }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.objectAccessControls.delete" call. -func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return err - } - return nil - // { - // "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", - // "httpMethod": "DELETE", - // "id": "storage.objectAccessControls.delete", - // "parameterOrder": [ - // "bucket", - // "object", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.objectAccessControls.get": - -type ObjectAccessControlsGetCall struct { - s *Service - bucket string - object string - entity string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context -} - -// Get: Returns the ACL entry for the specified entity on the specified -// object. -func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { - c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.entity = entity - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccessControlsGetCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *ObjectAccessControlsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccessControlsGetCall { - c.ctx_ = ctx - return c -} - -func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - "entity": c.entity, - }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.objectAccessControls.get" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the ACL entry for the specified entity on the specified object.", - // "httpMethod": "GET", - // "id": "storage.objectAccessControls.get", - // "parameterOrder": [ - // "bucket", - // "object", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.objectAccessControls.insert": - -type ObjectAccessControlsInsertCall struct { - s *Service - bucket string - object string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Insert: Creates a new ACL entry on the specified object. -func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { - c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.objectaccesscontrol = objectaccesscontrol - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAccessControlsInsertCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAccessControlsInsertCall { - c.ctx_ = ctx - return c -} - -func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err - } - ctype := "application/json" - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.objectAccessControls.insert" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Creates a new ACL entry on the specified object.", - // "httpMethod": "POST", - // "id": "storage.objectAccessControls.insert", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.objectAccessControls.list": - -type ObjectAccessControlsListCall struct { - s *Service - bucket string - object string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context -} - -// List: Retrieves ACL entries on the specified object. -func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { - c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAccessControlsListCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ObjectAccessControlsListCall) IfNoneMatch(entityTag string) *ObjectAccessControlsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAccessControlsListCall { - c.ctx_ = ctx - return c -} - -func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.objectAccessControls.list" call. -// Exactly one of *ObjectAccessControls or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControls.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ObjectAccessControls{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves ACL entries on the specified object.", - // "httpMethod": "GET", - // "id": "storage.objectAccessControls.list", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl", - // "response": { - // "$ref": "ObjectAccessControls" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.objectAccessControls.patch": - -type ObjectAccessControlsPatchCall struct { - s *Service - bucket string - object string - entity string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Patch: Updates an ACL entry on the specified object. This method -// supports patch semantics. -func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { - c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.entity = entity - c.objectaccesscontrol = objectaccesscontrol - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAccessControlsPatchCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAccessControlsPatchCall { - c.ctx_ = ctx - return c -} - -func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err - } - ctype := "application/json" - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - "entity": c.entity, - }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.objectAccessControls.patch" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an ACL entry on the specified object. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "storage.objectAccessControls.patch", - // "parameterOrder": [ - // "bucket", - // "object", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.objectAccessControls.update": - -type ObjectAccessControlsUpdateCall struct { - s *Service - bucket string - object string - entity string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Update: Updates an ACL entry on the specified object. -func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { - c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.entity = entity - c.objectaccesscontrol = objectaccesscontrol - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAccessControlsUpdateCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAccessControlsUpdateCall { - c.ctx_ = ctx - return c -} - -func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err - } - ctype := "application/json" - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - "entity": c.entity, - }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.objectAccessControls.update" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an ACL entry on the specified object.", - // "httpMethod": "PUT", - // "id": "storage.objectAccessControls.update", - // "parameterOrder": [ - // "bucket", - // "object", - // "entity" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.objects.compose": - -type ObjectsComposeCall struct { - s *Service - destinationBucket string - destinationObject string - composerequest *ComposeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Compose: Concatenates a list of existing objects into a new object in -// the same bucket. -func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { - c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.destinationBucket = destinationBucket - c.destinationObject = destinationObject - c.composerequest = composerequest - return c -} - -// DestinationPredefinedAcl sets the optional parameter -// "destinationPredefinedAcl": Apply a predefined set of access controls -// to the destination object. -// -// Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and -// allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get -// READER access. -func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall { - c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. -func (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsComposeCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsComposeCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do and Download -// methods. Any pending HTTP request will be aborted if the provided -// context is canceled. -func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall { - c.ctx_ = ctx - return c -} - -func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) - if err != nil { - return nil, err - } - ctype := "application/json" - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "destinationBucket": c.destinationBucket, - "destinationObject": c.destinationObject, - }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Download fetches the API endpoint's "media" value, instead of the normal -// API response value. If the returned error is nil, the Response is guaranteed to -// have a 2xx status code. Callers must close the Response.Body as usual. -func (c *ObjectsComposeCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("media") - if err != nil { - return nil, err - } - if err := googleapi.CheckMediaResponse(res); err != nil { - res.Body.Close() - return nil, err - } - return res, nil -} - -// Do executes the "storage.objects.compose" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Object{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Concatenates a list of existing objects into a new object in the same bucket.", - // "httpMethod": "POST", - // "id": "storage.objects.compose", - // "parameterOrder": [ - // "destinationBucket", - // "destinationObject" - // ], - // "parameters": { - // "destinationBucket": { - // "description": "Name of the bucket in which to store the new object.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationObject": { - // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationPredefinedAcl": { - // "description": "Apply a predefined set of access controls to the destination object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{destinationBucket}/o/{destinationObject}/compose", - // "request": { - // "$ref": "ComposeRequest" - // }, - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsMediaDownload": true, - // "useMediaDownloadService": true - // } - -} - -// method id "storage.objects.copy": - -type ObjectsCopyCall struct { - s *Service - sourceBucket string - sourceObject string - destinationBucket string - destinationObject string - object *Object - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Copy: Copies a source object to a destination object. Optionally -// overrides metadata. -func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { - c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sourceBucket = sourceBucket - c.sourceObject = sourceObject - c.destinationBucket = destinationBucket - c.destinationObject = destinationObject - c.object = object - return c -} - -// DestinationPredefinedAcl sets the optional parameter -// "destinationPredefinedAcl": Apply a predefined set of access controls -// to the destination object. -// -// Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and -// allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get -// READER access. -func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall { - c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the destination object's -// current generation matches the given value. -func (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the destination object's current generation does not match the given -// value. -func (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the destination object's current metageneration matches the given -// value. -func (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the destination object's current metageneration does not -// match the given value. -func (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// IfSourceGenerationMatch sets the optional parameter -// "ifSourceGenerationMatch": Makes the operation conditional on whether -// the source object's generation matches the given value. -func (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) - return c -} - -// IfSourceGenerationNotMatch sets the optional parameter -// "ifSourceGenerationNotMatch": Makes the operation conditional on -// whether the source object's generation does not match the given -// value. -func (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) - return c -} - -// IfSourceMetagenerationMatch sets the optional parameter -// "ifSourceMetagenerationMatch": Makes the operation conditional on -// whether the source object's current metageneration matches the given -// value. -func (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) - return c -} - -// IfSourceMetagenerationNotMatch sets the optional parameter -// "ifSourceMetagenerationNotMatch": Makes the operation conditional on -// whether the source object's current metageneration does not match the -// given value. -func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the object resource -// specifies the acl property, when it defaults to full. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the acl property. -func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { - c.urlParams_.Set("projection", projection) - return c -} - -// SourceGeneration sets the optional parameter "sourceGeneration": If -// present, selects a specific revision of the source object (as opposed -// to the latest version, the default). -func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyCall { - c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do and Download -// methods. Any pending HTTP request will be aborted if the provided -// context is canceled. -func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall { - c.ctx_ = ctx - return c -} - -func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) - if err != nil { - return nil, err - } - ctype := "application/json" - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "sourceBucket": c.sourceBucket, - "sourceObject": c.sourceObject, - "destinationBucket": c.destinationBucket, - "destinationObject": c.destinationObject, - }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Download fetches the API endpoint's "media" value, instead of the normal -// API response value. If the returned error is nil, the Response is guaranteed to -// have a 2xx status code. Callers must close the Response.Body as usual. -func (c *ObjectsCopyCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("media") - if err != nil { - return nil, err - } - if err := googleapi.CheckMediaResponse(res); err != nil { - res.Body.Close() - return nil, err - } - return res, nil -} - -// Do executes the "storage.objects.copy" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Object{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Copies a source object to a destination object. Optionally overrides metadata.", - // "httpMethod": "POST", - // "id": "storage.objects.copy", - // "parameterOrder": [ - // "sourceBucket", - // "sourceObject", - // "destinationBucket", - // "destinationObject" - // ], - // "parameters": { - // "destinationBucket": { - // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationObject": { - // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationPredefinedAcl": { - // "description": "Apply a predefined set of access controls to the destination object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's generation matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's generation does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "sourceBucket": { - // "description": "Name of the bucket in which to find the source object.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "sourceGeneration": { - // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "sourceObject": { - // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", - // "request": { - // "$ref": "Object" - // }, - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsMediaDownload": true, - // "useMediaDownloadService": true - // } - -} - -// method id "storage.objects.delete": - -type ObjectsDeleteCall struct { - s *Service - bucket string - object string - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Delete: Deletes an object and its metadata. Deletions are permanent -// if versioning is not enabled for the bucket, or if the generation -// parameter is used. -func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { - c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - return c -} - -// Generation sets the optional parameter "generation": If present, -// permanently deletes a specific revision of this object (as opposed to -// the latest version, the default). -func (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. -func (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsDeleteCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. -func (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsDeleteCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsDeleteCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsDeleteCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall { - c.ctx_ = ctx - return c -} - -func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("DELETE", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.objects.delete" call. -func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return err - } - return nil - // { - // "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", - // "httpMethod": "DELETE", - // "id": "storage.objects.delete", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.objects.get": - -type ObjectsGetCall struct { - s *Service - bucket string - object string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context -} - -// Get: Retrieves an object or its metadata. -func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { - c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's generation -// matches the given value. -func (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's generation does not match the given value. -func (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsGetCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsGetCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsGetCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the acl property. -func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { - c.urlParams_.Set("projection", projection) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ObjectsGetCall) IfNoneMatch(entityTag string) *ObjectsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do and Download -// methods. Any pending HTTP request will be aborted if the provided -// context is canceled. -func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall { - c.ctx_ = ctx - return c -} - -func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Download fetches the API endpoint's "media" value, instead of the normal -// API response value. If the returned error is nil, the Response is guaranteed to -// have a 2xx status code. Callers must close the Response.Body as usual. -func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("media") - if err != nil { - return nil, err - } - if err := googleapi.CheckMediaResponse(res); err != nil { - res.Body.Close() - return nil, err - } - return res, nil -} - -// Do executes the "storage.objects.get" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Object{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves an object or its metadata.", - // "httpMethod": "GET", - // "id": "storage.objects.get", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's generation matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's generation does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the acl property." - // ], - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}", - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsMediaDownload": true, - // "useMediaDownloadService": true - // } - -} - -// method id "storage.objects.insert": - -type ObjectsInsertCall struct { - s *Service - bucket string - object *Object - urlParams_ gensupport.URLParams - media_ io.Reader - resumableBuffer_ *gensupport.ResumableBuffer - mediaType_ string - mediaSize_ int64 // mediaSize, if known. Used only for calls to progressUpdater_. - progressUpdater_ googleapi.ProgressUpdater - ctx_ context.Context -} - -// Insert: Stores a new object and metadata. -func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall { - c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - return c -} - -// ContentEncoding sets the optional parameter "contentEncoding": If -// set, sets the contentEncoding property of the final object to this -// value. Setting this parameter is equivalent to setting the -// contentEncoding metadata property. This can be useful when uploading -// an object with uploadType=media to indicate the encoding of the -// content being uploaded. -func (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInsertCall { - c.urlParams_.Set("contentEncoding", contentEncoding) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. -func (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsInsertCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. -func (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsInsertCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsInsertCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsInsertCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// Name sets the optional parameter "name": Name of the object. Required -// when the object metadata is not otherwise provided. Overrides the -// object metadata's name value, if any. For information about how to -// URL encode object names to be path safe, see Encoding URI Path Parts. -func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { - c.urlParams_.Set("name", name) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this object. -// -// Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and -// allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get -// READER access. -func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the object resource -// specifies the acl property, when it defaults to full. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the acl property. -func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { - c.urlParams_.Set("projection", projection) - return c -} - -// Media specifies the media to upload in one or more chunks. The chunk -// size may be controlled by supplying a MediaOption generated by -// googleapi.ChunkSize. The chunk size defaults to -// googleapi.DefaultUploadChunkSize.The Content-Type header used in the -// upload request will be determined by sniffing the contents of r, -// unless a MediaOption generated by googleapi.ContentType is -// supplied. -// At most one of Media and ResumableMedia may be set. -func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *ObjectsInsertCall { - opts := googleapi.ProcessMediaOptions(options) - chunkSize := opts.ChunkSize - if !opts.ForceEmptyContentType { - r, c.mediaType_ = gensupport.DetermineContentType(r, opts.ContentType) - } - c.media_, c.resumableBuffer_ = gensupport.PrepareUpload(r, chunkSize) - return c -} - -// ResumableMedia specifies the media to upload in chunks and can be -// canceled with ctx. -// -// Deprecated: use Media instead. -// -// At most one of Media and ResumableMedia may be set. mediaType -// identifies the MIME media type of the upload, such as "image/png". If -// mediaType is "", it will be auto-detected. The provided ctx will -// supersede any context previously provided to the Context method. -func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall { - c.ctx_ = ctx - rdr := gensupport.ReaderAtToReader(r, size) - rdr, c.mediaType_ = gensupport.DetermineContentType(rdr, mediaType) - c.resumableBuffer_ = gensupport.NewResumableBuffer(rdr, googleapi.DefaultUploadChunkSize) - c.media_ = nil - c.mediaSize_ = size - return c -} - -// ProgressUpdater provides a callback function that will be called -// after every chunk. It should be a low-latency function in order to -// not slow down the upload operation. This should only be called when -// using ResumableMedia (as opposed to Media). -func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall { - c.progressUpdater_ = pu - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -// This context will supersede any context previously provided to the -// ResumableMedia method. -func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall { - c.ctx_ = ctx - return c -} - -func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) - if err != nil { - return nil, err - } - ctype := "application/json" - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") - if c.media_ != nil || c.resumableBuffer_ != nil { - urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1) - protocol := "multipart" - if c.resumableBuffer_ != nil { - protocol = "resumable" - } - c.urlParams_.Set("uploadType", protocol) - } - urls += "?" + c.urlParams_.Encode() - if c.media_ != nil { - var combined io.ReadCloser - combined, ctype = gensupport.CombineBodyMedia(body, ctype, c.media_, c.mediaType_) - defer combined.Close() - body = combined - } - req, _ := http.NewRequest("POST", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - if c.resumableBuffer_ != nil && c.mediaType_ != "" { - req.Header.Set("X-Upload-Content-Type", c.mediaType_) - } - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.objects.insert" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := gensupport.Retry(c.ctx_, func() (*http.Response, error) { - return c.doRequest("json") - }, gensupport.DefaultBackoffStrategy()) - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - if c.resumableBuffer_ != nil { - loc := res.Header.Get("Location") - rx := &gensupport.ResumableUpload{ - Client: c.s.client, - UserAgent: c.s.userAgent(), - URI: loc, - Media: c.resumableBuffer_, - MediaType: c.mediaType_, - Callback: func(curr int64) { - if c.progressUpdater_ != nil { - c.progressUpdater_(curr, c.mediaSize_) - } - }, - } - ctx := c.ctx_ - if ctx == nil { - ctx = context.TODO() - } - res, err = rx.Upload(ctx) - if err != nil { - return nil, err - } - defer res.Body.Close() - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - } - ret := &Object{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Stores a new object and metadata.", - // "httpMethod": "POST", - // "id": "storage.objects.insert", - // "mediaUpload": { - // "accept": [ - // "*/*" - // ], - // "protocols": { - // "resumable": { - // "multipart": true, - // "path": "/resumable/upload/storage/v1/b/{bucket}/o" - // }, - // "simple": { - // "multipart": true, - // "path": "/upload/storage/v1/b/{bucket}/o" - // } - // } - // }, - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "contentEncoding": { - // "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "name": { - // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "query", - // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the acl property." - // ], - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o", - // "request": { - // "$ref": "Object" - // }, - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsMediaDownload": true, - // "supportsMediaUpload": true, - // "useMediaDownloadService": true - // } - -} - -// method id "storage.objects.list": - -type ObjectsListCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context -} - -// List: Retrieves a list of objects matching the criteria. -func (r *ObjectsService) List(bucket string) *ObjectsListCall { - c := &ObjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - return c -} - -// Delimiter sets the optional parameter "delimiter": Returns results in -// a directory-like mode. items will contain only objects whose names, -// aside from the prefix, do not contain delimiter. Objects whose names, -// aside from the prefix, contain delimiter will have their name, -// truncated after the delimiter, returned in prefixes. Duplicate -// prefixes are omitted. -func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall { - c.urlParams_.Set("delimiter", delimiter) - return c -} - -// MaxResults sets the optional parameter "maxResults": Maximum number -// of items plus prefixes to return. As duplicate prefixes are omitted, -// fewer total results may be returned than requested. The default value -// of this parameter is 1,000 items. -func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set of -// results to view. -func (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Prefix sets the optional parameter "prefix": Filter results to -// objects whose names begin with this prefix. -func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall { - c.urlParams_.Set("prefix", prefix) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the acl property. -func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { - c.urlParams_.Set("projection", projection) - return c -} - -// Versions sets the optional parameter "versions": If true, lists all -// versions of an object as distinct results. The default is false. For -// more information, see Object Versioning. -func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall { - c.urlParams_.Set("versions", fmt.Sprint(versions)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ObjectsListCall) IfNoneMatch(entityTag string) *ObjectsListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall { - c.ctx_ = ctx - return c -} - -func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("GET", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.objects.list" call. -// Exactly one of *Objects or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Objects.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Objects{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of objects matching the criteria.", - // "httpMethod": "GET", - // "id": "storage.objects.list", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which to look for objects.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "delimiter": { - // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of results to view.", - // "location": "query", - // "type": "string" - // }, - // "prefix": { - // "description": "Filter results to objects whose names begin with this prefix.", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "versions": { - // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", - // "location": "query", - // "type": "boolean" - // } - // }, - // "path": "b/{bucket}/o", - // "response": { - // "$ref": "Objects" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsSubscription": true - // } - -} - -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "storage.objects.patch": - -type ObjectsPatchCall struct { - s *Service - bucket string - object string - object2 *Object - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Patch: Updates an object's metadata. This method supports patch -// semantics. -func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { - c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.object2 = object2 - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. -func (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPatchCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. -func (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsPatchCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsPatchCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsPatchCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this object. -// -// Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and -// allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get -// READER access. -func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the acl property. -func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { - c.urlParams_.Set("projection", projection) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall { - c.ctx_ = ctx - return c -} - -func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) - if err != nil { - return nil, err - } - ctype := "application/json" - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PATCH", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.objects.patch" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Object{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an object's metadata. This method supports patch semantics.", - // "httpMethod": "PATCH", - // "id": "storage.objects.patch", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the acl property." - // ], - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}", - // "request": { - // "$ref": "Object" - // }, - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.objects.rewrite": - -type ObjectsRewriteCall struct { - s *Service - sourceBucket string - sourceObject string - destinationBucket string - destinationObject string - object *Object - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Rewrite: Rewrites a source object to a destination object. Optionally -// overrides metadata. -func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { - c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sourceBucket = sourceBucket - c.sourceObject = sourceObject - c.destinationBucket = destinationBucket - c.destinationObject = destinationObject - c.object = object - return c -} - -// DestinationPredefinedAcl sets the optional parameter -// "destinationPredefinedAcl": Apply a predefined set of access controls -// to the destination object. -// -// Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and -// allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get -// READER access. -func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall { - c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the destination object's -// current generation matches the given value. -func (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the destination object's current generation does not match the given -// value. -func (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the destination object's current metageneration matches the given -// value. -func (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the destination object's current metageneration does not -// match the given value. -func (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// IfSourceGenerationMatch sets the optional parameter -// "ifSourceGenerationMatch": Makes the operation conditional on whether -// the source object's generation matches the given value. -func (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) - return c -} - -// IfSourceGenerationNotMatch sets the optional parameter -// "ifSourceGenerationNotMatch": Makes the operation conditional on -// whether the source object's generation does not match the given -// value. -func (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) - return c -} - -// IfSourceMetagenerationMatch sets the optional parameter -// "ifSourceMetagenerationMatch": Makes the operation conditional on -// whether the source object's current metageneration matches the given -// value. -func (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) - return c -} - -// IfSourceMetagenerationNotMatch sets the optional parameter -// "ifSourceMetagenerationNotMatch": Makes the operation conditional on -// whether the source object's current metageneration does not match the -// given value. -func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) - return c -} - -// MaxBytesRewrittenPerCall sets the optional parameter -// "maxBytesRewrittenPerCall": The maximum number of bytes that will be -// rewritten per rewrite request. Most callers shouldn't need to specify -// this parameter - it is primarily in place to support testing. If -// specified the value must be an integral multiple of 1 MiB (1048576). -// Also, this only applies to requests where the source and destination -// span locations and/or storage classes. Finally, this value must not -// change across rewrite calls else you'll get an error that the -// rewriteToken is invalid. -func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall int64) *ObjectsRewriteCall { - c.urlParams_.Set("maxBytesRewrittenPerCall", fmt.Sprint(maxBytesRewrittenPerCall)) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the object resource -// specifies the acl property, when it defaults to full. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the acl property. -func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { - c.urlParams_.Set("projection", projection) - return c -} - -// RewriteToken sets the optional parameter "rewriteToken": Include this -// field (from the previous rewrite response) on each rewrite request -// after the first one, until the rewrite response 'done' flag is true. -// Calls that provide a rewriteToken can omit all other request fields, -// but if included those fields must match the values provided in the -// first rewrite request. -func (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCall { - c.urlParams_.Set("rewriteToken", rewriteToken) - return c -} - -// SourceGeneration sets the optional parameter "sourceGeneration": If -// present, selects a specific revision of the source object (as opposed -// to the latest version, the default). -func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRewriteCall { - c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsRewriteCall) Fields(s ...googleapi.Field) *ObjectsRewriteCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsRewriteCall) Context(ctx context.Context) *ObjectsRewriteCall { - c.ctx_ = ctx - return c -} - -func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) - if err != nil { - return nil, err - } - ctype := "application/json" - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "sourceBucket": c.sourceBucket, - "sourceObject": c.sourceObject, - "destinationBucket": c.destinationBucket, - "destinationObject": c.destinationObject, - }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.objects.rewrite" call. -// Exactly one of *RewriteResponse or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *RewriteResponse.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &RewriteResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", - // "httpMethod": "POST", - // "id": "storage.objects.rewrite", - // "parameterOrder": [ - // "sourceBucket", - // "sourceObject", - // "destinationBucket", - // "destinationObject" - // ], - // "parameters": { - // "destinationBucket": { - // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationObject": { - // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationPredefinedAcl": { - // "description": "Apply a predefined set of access controls to the destination object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's generation matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's generation does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "maxBytesRewrittenPerCall": { - // "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "rewriteToken": { - // "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", - // "location": "query", - // "type": "string" - // }, - // "sourceBucket": { - // "description": "Name of the bucket in which to find the source object.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "sourceGeneration": { - // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "sourceObject": { - // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // } - // }, - // "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", - // "request": { - // "$ref": "Object" - // }, - // "response": { - // "$ref": "RewriteResponse" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.objects.update": - -type ObjectsUpdateCall struct { - s *Service - bucket string - object string - object2 *Object - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// Update: Updates an object's metadata. -func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { - c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.object2 = object2 - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. -func (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsUpdateCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. -func (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsUpdateCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsUpdateCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsUpdateCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this object. -// -// Possible values: -// "authenticatedRead" - Object owner gets OWNER access, and -// allAuthenticatedUsers get READER access. -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// project team owners get OWNER access. -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// team owners get READER access. -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// members get access according to their roles. -// "publicRead" - Object owner gets OWNER access, and allUsers get -// READER access. -func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the acl property. -func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { - c.urlParams_.Set("projection", projection) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do and Download -// methods. Any pending HTTP request will be aborted if the provided -// context is canceled. -func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall { - c.ctx_ = ctx - return c -} - -func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) - if err != nil { - return nil, err - } - ctype := "application/json" - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("PUT", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Download fetches the API endpoint's "media" value, instead of the normal -// API response value. If the returned error is nil, the Response is guaranteed to -// have a 2xx status code. Callers must close the Response.Body as usual. -func (c *ObjectsUpdateCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("media") - if err != nil { - return nil, err - } - if err := googleapi.CheckMediaResponse(res); err != nil { - res.Body.Close() - return nil, err - } - return res, nil -} - -// Do executes the "storage.objects.update" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Object{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Updates an object's metadata.", - // "httpMethod": "PUT", - // "id": "storage.objects.update", - // "parameterOrder": [ - // "bucket", - // "object" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which the object resides.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the acl property." - // ], - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/o/{object}", - // "request": { - // "$ref": "Object" - // }, - // "response": { - // "$ref": "Object" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsMediaDownload": true, - // "useMediaDownloadService": true - // } - -} - -// method id "storage.objects.watchAll": - -type ObjectsWatchAllCall struct { - s *Service - bucket string - channel *Channel - urlParams_ gensupport.URLParams - ctx_ context.Context -} - -// WatchAll: Watch for changes on all objects in a bucket. -func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatchAllCall { - c := &ObjectsWatchAllCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.channel = channel - return c -} - -// Delimiter sets the optional parameter "delimiter": Returns results in -// a directory-like mode. items will contain only objects whose names, -// aside from the prefix, do not contain delimiter. Objects whose names, -// aside from the prefix, contain delimiter will have their name, -// truncated after the delimiter, returned in prefixes. Duplicate -// prefixes are omitted. -func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall { - c.urlParams_.Set("delimiter", delimiter) - return c -} - -// MaxResults sets the optional parameter "maxResults": Maximum number -// of items plus prefixes to return. As duplicate prefixes are omitted, -// fewer total results may be returned than requested. The default value -// of this parameter is 1,000 items. -func (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set of -// results to view. -func (c *ObjectsWatchAllCall) PageToken(pageToken string) *ObjectsWatchAllCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Prefix sets the optional parameter "prefix": Filter results to -// objects whose names begin with this prefix. -func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall { - c.urlParams_.Set("prefix", prefix) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: -// "full" - Include all properties. -// "noAcl" - Omit the acl property. -func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall { - c.urlParams_.Set("projection", projection) - return c -} - -// Versions sets the optional parameter "versions": If true, lists all -// versions of an object as distinct results. The default is false. For -// more information, see Object Versioning. -func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall { - c.urlParams_.Set("versions", fmt.Sprint(versions)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsWatchAllCall) Fields(s ...googleapi.Field) *ObjectsWatchAllCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ObjectsWatchAllCall) Context(ctx context.Context) *ObjectsWatchAllCall { - c.ctx_ = ctx - return c -} - -func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) - if err != nil { - return nil, err - } - ctype := "application/json" - c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch") - urls += "?" + c.urlParams_.Encode() - req, _ := http.NewRequest("POST", urls, body) - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) -} - -// Do executes the "storage.objects.watchAll" call. -// Exactly one of *Channel or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Channel.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, err - } - ret := &Channel{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Watch for changes on all objects in a bucket.", - // "httpMethod": "POST", - // "id": "storage.objects.watchAll", - // "parameterOrder": [ - // "bucket" - // ], - // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which to look for objects.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "delimiter": { - // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of results to view.", - // "location": "query", - // "type": "string" - // }, - // "prefix": { - // "description": "Filter results to objects whose names begin with this prefix.", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "versions": { - // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", - // "location": "query", - // "type": "boolean" - // } - // }, - // "path": "b/{bucket}/o/watch", - // "request": { - // "$ref": "Channel", - // "parameterName": "resource" - // }, - // "response": { - // "$ref": "Channel" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsSubscription": true - // } - -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/.travis.yml b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/.travis.yml deleted file mode 100644 index 77152097..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go -sudo: false - -go: - - 1.4 - -install: - - go get -v -t -d google.golang.org/appengine/... - - mkdir sdk - - curl -o sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.24.zip" - - unzip sdk.zip -d sdk - - export APPENGINE_DEV_APPSERVER=$(pwd)/sdk/go_appengine/dev_appserver.py - -script: - - go version - - go test -v google.golang.org/appengine/... - - go test -v -race google.golang.org/appengine/... - - sdk/go_appengine/goapp test -v google.golang.org/appengine/... diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/LICENSE b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/README.md b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/README.md deleted file mode 100644 index 1dbb3341..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Go App Engine packages - -[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine) - -This repository supports the Go runtime on App Engine, -including both classic App Engine and Managed VMs. -It provides APIs for interacting with App Engine services. -Its canonical import path is `google.golang.org/appengine`. - -See https://cloud.google.com/appengine/docs/go/ -for more information. - -File issue reports and feature requests on the [Google App Engine issue -tracker](https://code.google.com/p/googleappengine/issues/entry?template=Go%20defect). - -## Directory structure -The top level directory of this repository is the `appengine` package. It -contains the -basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API -packages are in subdirectories (e.g. `datastore`). - -There is an `internal` subdirectory that contains service protocol buffers, -plus packages required for connectivity to make API calls. App Engine apps -should not directly import any package under `internal`. - -## Updating a Go App Engine app - -This section describes how to update a traditional Go App Engine app to use -these packages. - -### 1. Update YAML files (Managed VMs only) - -The `app.yaml` file (and YAML files for modules) should have these new lines added: -``` -vm: true -``` -See https://cloud.google.com/appengine/docs/go/modules/#Go_Instance_scaling_and_class for details. - -### 2. Update import paths - -The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`. -You will need to update your code to use import paths starting with that; for instance, -code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`. -You can do that manually, or by running this command to recursively update all Go source files in the current directory: -(may require GNU sed) -``` -sed -i '/"appengine/{s,"appengine,"google.golang.org/appengine,;s,appengine_,appengine/,}' \ - $(find . -name '*.go') -``` - -### 3. Update code using deprecated, removed or modified APIs - -Most App Engine services are available with exactly the same API. -A few APIs were cleaned up, and some are not available yet. -This list summarises the differences: - -* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`. -* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`. -* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead. -* `appengine.Datacenter` now takes a `context.Context` argument. -* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels. -* `delay.Call` now returns an error. -* `search.FieldLoadSaver` now handles document metadata. -* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the - `context.Context` instead. -* `aetest` no longer declares its own Context type, and uses the standard one instead. -* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been - deprecated and unused for a long time. -* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature. - Use `appengine.ModuleHostname`and `appengine.ModuleName` instead. -* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated. - Use [Google Cloud Storage](https://godoc.org/google.golang.org/cloud/storage) instead. -* `appengine/socket` is not required on Managed VMs. Use the standard `net` package instead. diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/appengine.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/appengine.go deleted file mode 100644 index be0b5f2b..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/appengine.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package appengine provides basic functionality for Google App Engine. -// -// For more information on how to write Go apps for Google App Engine, see: -// https://cloud.google.com/appengine/docs/go/ -package appengine - -import ( - "net/http" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -// IsDevAppServer reports whether the App Engine app is running in the -// development App Server. -func IsDevAppServer() bool { - return internal.IsDevAppServer() -} - -// NewContext returns a context for an in-flight HTTP request. -// This function is cheap. -func NewContext(req *http.Request) context.Context { - return WithContext(context.Background(), req) -} - -// WithContext returns a copy of the parent context -// and associates it with an in-flight HTTP request. -// This function is cheap. -func WithContext(parent context.Context, req *http.Request) context.Context { - return internal.WithContext(parent, req) -} - -// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call. - -// BlobKey is a key for a blobstore blob. -// -// Conceptually, this type belongs in the blobstore package, but it lives in -// the appengine package to avoid a circular dependency: blobstore depends on -// datastore, and datastore needs to refer to the BlobKey type. -type BlobKey string - -// GeoPoint represents a location as latitude/longitude in degrees. -type GeoPoint struct { - Lat, Lng float64 -} - -// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude. -func (g GeoPoint) Valid() bool { - return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180 -} - -// APICallFunc defines a function type for handling an API call. -// See WithCallOverride. -type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error - -// WithAPICallFunc returns a copy of the parent context -// that will cause API calls to invoke f instead of their normal operation. -// -// This is intended for advanced users only. -func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context { - return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f)) -} - -// APICall performs an API call. -// -// This is not intended for general use; it is exported for use in conjunction -// with WithAPICallFunc. -func APICall(ctx context.Context, service, method string, in, out proto.Message) error { - return internal.Call(ctx, service, method, in, out) -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/appengine_vm.go deleted file mode 100644 index 2f775906..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/appengine_vm.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package appengine - -import ( - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -// The comment below must not be changed. -// It is used by go-app-builder to recognise that this package has -// the Main function to use in the synthetic main. -// The gophers party all night; the rabbits provide the beats. - -// Main is the principal entry point for a Managed VMs app. -// It installs a trivial health checker if one isn't already registered, -// and starts listening on port 8080 (overridden by the $PORT environment -// variable). -// -// See https://cloud.google.com/appengine/docs/managed-vms/custom-runtimes#health_check_requests -// for details on how to do your own health checking. -// -// Main never returns. -// -// Main is designed so that the app's main package looks like this: -// -// package main -// -// import ( -// "google.golang.org/appengine" -// -// _ "myapp/package0" -// _ "myapp/package1" -// ) -// -// func main() { -// appengine.Main() -// } -// -// The "myapp/packageX" packages are expected to register HTTP handlers -// in their init functions. -func Main() { - internal.Main() -} - -// BackgroundContext returns a context not associated with a request. -// This should only be used when not servicing a request. -// This only works on Managed VMs. -func BackgroundContext() context.Context { - return internal.BackgroundContext() -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/errors.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/errors.go deleted file mode 100644 index 16d0772e..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/errors.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// This file provides error functions for common API failure modes. - -package appengine - -import ( - "fmt" - - "google.golang.org/appengine/internal" -) - -// IsOverQuota reports whether err represents an API call failure -// due to insufficient available quota. -func IsOverQuota(err error) bool { - callErr, ok := err.(*internal.CallError) - return ok && callErr.Code == 4 -} - -// MultiError is returned by batch operations when there are errors with -// particular elements. Errors will be in a one-to-one correspondence with -// the input elements; successful elements will have a nil entry. -type MultiError []error - -func (m MultiError) Error() string { - s, n := "", 0 - for _, e := range m { - if e != nil { - if n == 0 { - s = e.Error() - } - n++ - } - } - switch n { - case 0: - return "(0 errors)" - case 1: - return s - case 2: - return s + " (and 1 other error)" - } - return fmt.Sprintf("%s (and %d other errors)", s, n-1) -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/identity.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/identity.go deleted file mode 100644 index b8dcf8f3..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/identity.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import ( - "time" - - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/app_identity" - modpb "google.golang.org/appengine/internal/modules" -) - -// AppID returns the application ID for the current application. -// The string will be a plain application ID (e.g. "appid"), with a -// domain prefix for custom domain deployments (e.g. "example.com:appid"). -func AppID(c context.Context) string { return internal.AppID(c) } - -// DefaultVersionHostname returns the standard hostname of the default version -// of the current application (e.g. "my-app.appspot.com"). This is suitable for -// use in constructing URLs. -func DefaultVersionHostname(c context.Context) string { - return internal.DefaultVersionHostname(c) -} - -// ModuleName returns the module name of the current instance. -func ModuleName(c context.Context) string { - return internal.ModuleName(c) -} - -// ModuleHostname returns a hostname of a module instance. -// If module is the empty string, it refers to the module of the current instance. -// If version is empty, it refers to the version of the current instance if valid, -// or the default version of the module of the current instance. -// If instance is empty, ModuleHostname returns the load-balancing hostname. -func ModuleHostname(c context.Context, module, version, instance string) (string, error) { - req := &modpb.GetHostnameRequest{} - if module != "" { - req.Module = &module - } - if version != "" { - req.Version = &version - } - if instance != "" { - req.Instance = &instance - } - res := &modpb.GetHostnameResponse{} - if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil { - return "", err - } - return *res.Hostname, nil -} - -// VersionID returns the version ID for the current application. -// It will be of the form "X.Y", where X is specified in app.yaml, -// and Y is a number generated when each version of the app is uploaded. -// It does not include a module name. -func VersionID(c context.Context) string { return internal.VersionID(c) } - -// InstanceID returns a mostly-unique identifier for this instance. -func InstanceID() string { return internal.InstanceID() } - -// Datacenter returns an identifier for the datacenter that the instance is running in. -func Datacenter(c context.Context) string { return internal.Datacenter(c) } - -// ServerSoftware returns the App Engine release version. -// In production, it looks like "Google App Engine/X.Y.Z". -// In the development appserver, it looks like "Development/X.Y". -func ServerSoftware() string { return internal.ServerSoftware() } - -// RequestID returns a string that uniquely identifies the request. -func RequestID(c context.Context) string { return internal.RequestID(c) } - -// AccessToken generates an OAuth2 access token for the specified scopes on -// behalf of service account of this application. This token will expire after -// the returned time. -func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) { - req := &pb.GetAccessTokenRequest{Scope: scopes} - res := &pb.GetAccessTokenResponse{} - - err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res) - if err != nil { - return "", time.Time{}, err - } - return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil -} - -// Certificate represents a public certificate for the app. -type Certificate struct { - KeyName string - Data []byte // PEM-encoded X.509 certificate -} - -// PublicCertificates retrieves the public certificates for the app. -// They can be used to verify a signature returned by SignBytes. -func PublicCertificates(c context.Context) ([]Certificate, error) { - req := &pb.GetPublicCertificateForAppRequest{} - res := &pb.GetPublicCertificateForAppResponse{} - if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil { - return nil, err - } - var cs []Certificate - for _, pc := range res.PublicCertificateList { - cs = append(cs, Certificate{ - KeyName: pc.GetKeyName(), - Data: []byte(pc.GetX509CertificatePem()), - }) - } - return cs, nil -} - -// ServiceAccount returns a string representing the service account name, in -// the form of an email address (typically app_id@appspot.gserviceaccount.com). -func ServiceAccount(c context.Context) (string, error) { - req := &pb.GetServiceAccountNameRequest{} - res := &pb.GetServiceAccountNameResponse{} - - err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res) - if err != nil { - return "", err - } - return res.GetServiceAccountName(), err -} - -// SignBytes signs bytes using a private key unique to your application. -func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) { - req := &pb.SignForAppRequest{BytesToSign: bytes} - res := &pb.SignForAppResponse{} - - if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil { - return "", nil, err - } - return res.GetKeyName(), res.GetSignatureBytes(), nil -} - -func init() { - internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name) - internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name) -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api.go deleted file mode 100644 index aa139d4d..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api.go +++ /dev/null @@ -1,640 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package internal - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" - - basepb "google.golang.org/appengine/internal/base" - logpb "google.golang.org/appengine/internal/log" - remotepb "google.golang.org/appengine/internal/remote_api" -) - -const ( - apiPath = "/rpc_http" -) - -var ( - // Incoming headers. - ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket") - dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo") - traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context") - curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") - userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP") - remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr") - - // Outgoing headers. - apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint") - apiEndpointHeaderValue = []string{"app-engine-apis"} - apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method") - apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"} - apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline") - apiContentType = http.CanonicalHeaderKey("Content-Type") - apiContentTypeValue = []string{"application/octet-stream"} - logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count") - - apiHTTPClient = &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: limitDial, - }, - } -) - -func apiURL() *url.URL { - host, port := "appengine.googleapis.internal", "10001" - if h := os.Getenv("API_HOST"); h != "" { - host = h - } - if p := os.Getenv("API_PORT"); p != "" { - port = p - } - return &url.URL{ - Scheme: "http", - Host: host + ":" + port, - Path: apiPath, - } -} - -func handleHTTP(w http.ResponseWriter, r *http.Request) { - c := &context{ - req: r, - outHeader: w.Header(), - apiURL: apiURL(), - } - stopFlushing := make(chan int) - - ctxs.Lock() - ctxs.m[r] = c - ctxs.Unlock() - defer func() { - ctxs.Lock() - delete(ctxs.m, r) - ctxs.Unlock() - }() - - // Patch up RemoteAddr so it looks reasonable. - if addr := r.Header.Get(userIPHeader); addr != "" { - r.RemoteAddr = addr - } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { - r.RemoteAddr = addr - } else { - // Should not normally reach here, but pick a sensible default anyway. - r.RemoteAddr = "127.0.0.1" - } - // The address in the headers will most likely be of these forms: - // 123.123.123.123 - // 2001:db8::1 - // net/http.Request.RemoteAddr is specified to be in "IP:port" form. - if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - // Assume the remote address is only a host; add a default port. - r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") - } - - // Start goroutine responsible for flushing app logs. - // This is done after adding c to ctx.m (and stopped before removing it) - // because flushing logs requires making an API call. - go c.logFlusher(stopFlushing) - - executeRequestSafely(c, r) - c.outHeader = nil // make sure header changes aren't respected any more - - stopFlushing <- 1 // any logging beyond this point will be dropped - - // Flush any pending logs asynchronously. - c.pendingLogs.Lock() - flushes := c.pendingLogs.flushes - if len(c.pendingLogs.lines) > 0 { - flushes++ - } - c.pendingLogs.Unlock() - go c.flushLog(false) - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) - - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } -} - -func executeRequestSafely(c *context, r *http.Request) { - defer func() { - if x := recover(); x != nil { - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 - } - }() - - http.DefaultServeMux.ServeHTTP(c, r) -} - -func renderPanic(x interface{}) string { - buf := make([]byte, 16<<10) // 16 KB should be plenty - buf = buf[:runtime.Stack(buf, false)] - - // Remove the first few stack frames: - // this func - // the recover closure in the caller - // That will root the stack trace at the site of the panic. - const ( - skipStart = "internal.renderPanic" - skipFrames = 2 - ) - start := bytes.Index(buf, []byte(skipStart)) - p := start - for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ { - p = bytes.IndexByte(buf[p+1:], '\n') + p + 1 - if p < 0 { - break - } - } - if p >= 0 { - // buf[start:p+1] is the block to remove. - // Copy buf[p+1:] over buf[start:] and shrink buf. - copy(buf[start:], buf[p+1:]) - buf = buf[:len(buf)-(p+1-start)] - } - - // Add panic heading. - head := fmt.Sprintf("panic: %v\n\n", x) - if len(head) > len(buf) { - // Extremely unlikely to happen. - return head - } - copy(buf[len(head):], buf) - copy(buf, head) - - return string(buf) -} - -var ctxs = struct { - sync.Mutex - m map[*http.Request]*context - bg *context // background context, lazily initialized - // dec is used by tests to decorate the netcontext.Context returned - // for a given request. This allows tests to add overrides (such as - // WithAppIDOverride) to the context. The map is nil outside tests. - dec map[*http.Request]func(netcontext.Context) netcontext.Context -}{ - m: make(map[*http.Request]*context), -} - -// context represents the context of an in-flight HTTP request. -// It implements the appengine.Context and http.ResponseWriter interfaces. -type context struct { - req *http.Request - - outCode int - outHeader http.Header - outBody []byte - - pendingLogs struct { - sync.Mutex - lines []*logpb.UserAppLogLine - flushes int - } - - apiURL *url.URL -} - -var contextKey = "holds a *context" - -func fromContext(ctx netcontext.Context) *context { - c, _ := ctx.Value(&contextKey).(*context) - return c -} - -func withContext(parent netcontext.Context, c *context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) - if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { - ctx = withNamespace(ctx, ns) - } - return ctx -} - -func toContext(c *context) netcontext.Context { - return withContext(netcontext.Background(), c) -} - -func IncomingHeaders(ctx netcontext.Context) http.Header { - if c := fromContext(ctx); c != nil { - return c.req.Header - } - return nil -} - -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { - ctxs.Lock() - c := ctxs.m[req] - d := ctxs.dec[req] - ctxs.Unlock() - - if d != nil { - parent = d(parent) - } - - if c == nil { - // Someone passed in an http.Request that is not in-flight. - // We panic here rather than panicking at a later point - // so that stack traces will be more sensible. - log.Panic("appengine: NewContext passed an unknown http.Request") - } - return withContext(parent, c) -} - -func BackgroundContext() netcontext.Context { - ctxs.Lock() - defer ctxs.Unlock() - - if ctxs.bg != nil { - return toContext(ctxs.bg) - } - - // Compute background security ticket. - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - ticket := fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - - ctxs.bg = &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, - }, - apiURL: apiURL(), - } - - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go ctxs.bg.logFlusher(make(chan int)) - - return toContext(ctxs.bg) -} - -// RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. It returns a closure to delete -// the registration. -// It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) func() { - c := &context{ - req: req, - apiURL: apiURL, - } - ctxs.Lock() - defer ctxs.Unlock() - if _, ok := ctxs.m[req]; ok { - log.Panic("req already associated with context") - } - if _, ok := ctxs.dec[req]; ok { - log.Panic("req already associated with context") - } - if ctxs.dec == nil { - ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context) - } - ctxs.m[req] = c - ctxs.dec[req] = decorate - - return func() { - ctxs.Lock() - delete(ctxs.m, req) - delete(ctxs.dec, req) - ctxs.Unlock() - } -} - -var errTimeout = &CallError{ - Detail: "Deadline exceeded", - Code: int32(remotepb.RpcError_CANCELLED), - Timeout: true, -} - -func (c *context) Header() http.Header { return c.outHeader } - -// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status -// codes do not permit a response body (nor response entity headers such as -// Content-Length, Content-Type, etc). -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -func (c *context) Write(b []byte) (int, error) { - if c.outCode == 0 { - c.WriteHeader(http.StatusOK) - } - if len(b) > 0 && !bodyAllowedForStatus(c.outCode) { - return 0, http.ErrBodyNotAllowed - } - c.outBody = append(c.outBody, b...) - return len(b), nil -} - -func (c *context) WriteHeader(code int) { - if c.outCode != 0 { - logf(c, 3, "WriteHeader called multiple times on request.") // error level - return - } - c.outCode = code -} - -func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { - hreq := &http.Request{ - Method: "POST", - URL: c.apiURL, - Header: http.Header{ - apiEndpointHeader: apiEndpointHeaderValue, - apiMethodHeader: apiMethodHeaderValue, - apiContentType: apiContentTypeValue, - apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)}, - }, - Body: ioutil.NopCloser(bytes.NewReader(body)), - ContentLength: int64(len(body)), - Host: c.apiURL.Host, - } - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) - } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) - } - - tr := apiHTTPClient.Transport.(*http.Transport) - - var timedOut int32 // atomic; set to 1 if timed out - t := time.AfterFunc(timeout, func() { - atomic.StoreInt32(&timedOut, 1) - tr.CancelRequest(hreq) - }) - defer t.Stop() - defer func() { - // Check if timeout was exceeded. - if atomic.LoadInt32(&timedOut) != 0 { - err = errTimeout - } - }() - - hresp, err := apiHTTPClient.Do(hreq) - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge HTTP failed: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - defer hresp.Body.Close() - hrespBody, err := ioutil.ReadAll(hresp.Body) - if hresp.StatusCode != 200 { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge response bad: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return hrespBody, nil -} - -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { - if f, ctx, ok := callOverrideFromContext(ctx); ok { - return f(ctx, service, method, in, out) - } - - // Handle already-done contexts quickly. - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errors.New("not an App Engine context") - } - - // Apply transaction modifications if we're in a transaction. - if t := transactionFromContext(ctx); t != nil { - if t.finished { - return errors.New("transaction context has expired") - } - applyTransaction(in, &t.transaction) - } - - // Default RPC timeout is 60s. - timeout := 60 * time.Second - if deadline, ok := ctx.Deadline(); ok { - timeout = deadline.Sub(time.Now()) - } - - data, err := proto.Marshal(in) - if err != nil { - return err - } - - ticket := c.req.Header.Get(ticketHeader) - req := &remotepb.Request{ - ServiceName: &service, - Method: &method, - Request: data, - RequestId: &ticket, - } - hreqBody, err := proto.Marshal(req) - if err != nil { - return err - } - - hrespBody, err := c.post(hreqBody, timeout) - if err != nil { - return err - } - - res := &remotepb.Response{} - if err := proto.Unmarshal(hrespBody, res); err != nil { - return err - } - if res.RpcError != nil { - ce := &CallError{ - Detail: res.RpcError.GetDetail(), - Code: *res.RpcError.Code, - } - switch remotepb.RpcError_ErrorCode(ce.Code) { - case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED: - ce.Timeout = true - } - return ce - } - if res.ApplicationError != nil { - return &APIError{ - Service: *req.ServiceName, - Detail: res.ApplicationError.GetDetail(), - Code: *res.ApplicationError.Code, - } - } - if res.Exception != nil || res.JavaException != nil { - // This shouldn't happen, but let's be defensive. - return &CallError{ - Detail: "service bridge returned exception", - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return proto.Unmarshal(res.Response, out) -} - -func (c *context) Request() *http.Request { - return c.req -} - -func (c *context) addLogLine(ll *logpb.UserAppLogLine) { - // Truncate long log lines. - // TODO(dsymonds): Check if this is still necessary. - const lim = 8 << 10 - if len(*ll.Message) > lim { - suffix := fmt.Sprintf("...(length %d)", len(*ll.Message)) - ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix) - } - - c.pendingLogs.Lock() - c.pendingLogs.lines = append(c.pendingLogs.lines, ll) - c.pendingLogs.Unlock() -} - -var logLevelName = map[int64]string{ - 0: "DEBUG", - 1: "INFO", - 2: "WARNING", - 3: "ERROR", - 4: "CRITICAL", -} - -func logf(c *context, level int64, format string, args ...interface{}) { - s := fmt.Sprintf(format, args...) - s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - log.Print(logLevelName[level] + ": " + s) -} - -// flushLog attempts to flush any pending logs to the appserver. -// It should not be called concurrently. -func (c *context) flushLog(force bool) (flushed bool) { - c.pendingLogs.Lock() - // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. - n, rem := 0, 30<<20 - for ; n < len(c.pendingLogs.lines); n++ { - ll := c.pendingLogs.lines[n] - // Each log line will require about 3 bytes of overhead. - nb := proto.Size(ll) + 3 - if nb > rem { - break - } - rem -= nb - } - lines := c.pendingLogs.lines[:n] - c.pendingLogs.lines = c.pendingLogs.lines[n:] - c.pendingLogs.Unlock() - - if len(lines) == 0 && !force { - // Nothing to flush. - return false - } - - rescueLogs := false - defer func() { - if rescueLogs { - c.pendingLogs.Lock() - c.pendingLogs.lines = append(lines, c.pendingLogs.lines...) - c.pendingLogs.Unlock() - } - }() - - buf, err := proto.Marshal(&logpb.UserAppLogGroup{ - LogLine: lines, - }) - if err != nil { - log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err) - rescueLogs = true - return false - } - - req := &logpb.FlushRequest{ - Logs: buf, - } - res := &basepb.VoidProto{} - c.pendingLogs.Lock() - c.pendingLogs.flushes++ - c.pendingLogs.Unlock() - if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil { - log.Printf("internal.flushLog: Flush RPC: %v", err) - rescueLogs = true - return false - } - return true -} - -const ( - // Log flushing parameters. - flushInterval = 1 * time.Second - forceFlushInterval = 60 * time.Second -) - -func (c *context) logFlusher(stop <-chan int) { - lastFlush := time.Now() - tick := time.NewTicker(flushInterval) - for { - select { - case <-stop: - // Request finished. - tick.Stop() - return - case <-tick.C: - force := time.Now().Sub(lastFlush) > forceFlushInterval - if c.flushLog(force) { - lastFlush = time.Now() - } - } - } -} - -func ContextForTesting(req *http.Request) netcontext.Context { - return toContext(&context{req: req}) -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api_classic.go deleted file mode 100644 index 1c072e9d..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api_classic.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package internal - -import ( - "errors" - "net/http" - "time" - - "appengine" - "appengine_internal" - basepb "appengine_internal/base" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" -) - -var contextKey = "holds an appengine.Context" - -func fromContext(ctx netcontext.Context) appengine.Context { - c, _ := ctx.Value(&contextKey).(appengine.Context) - return c -} - -// This is only for classic App Engine adapters. -func ClassicContextFromContext(ctx netcontext.Context) appengine.Context { - return fromContext(ctx) -} - -func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) - - s := &basepb.StringProto{} - c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) - if ns := s.GetValue(); ns != "" { - ctx = NamespacedContext(ctx, ns) - } - - return ctx -} - -func IncomingHeaders(ctx netcontext.Context) http.Header { - if c := fromContext(ctx); c != nil { - if req, ok := c.Request().(*http.Request); ok { - return req.Header - } - } - return nil -} - -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { - c := appengine.NewContext(req) - return withContext(parent, c) -} - -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { - if f, ctx, ok := callOverrideFromContext(ctx); ok { - return f(ctx, service, method, in, out) - } - - // Handle already-done contexts quickly. - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errors.New("not an App Engine context") - } - - // Apply transaction modifications if we're in a transaction. - if t := transactionFromContext(ctx); t != nil { - if t.finished { - return errors.New("transaction context has expired") - } - applyTransaction(in, &t.transaction) - } - - var opts *appengine_internal.CallOptions - if d, ok := ctx.Deadline(); ok { - opts = &appengine_internal.CallOptions{ - Timeout: d.Sub(time.Now()), - } - } - - err := c.Call(service, method, in, out, opts) - switch v := err.(type) { - case *appengine_internal.APIError: - return &APIError{ - Service: v.Service, - Detail: v.Detail, - Code: v.Code, - } - case *appengine_internal.CallError: - return &CallError{ - Detail: v.Detail, - Code: v.Code, - Timeout: v.Timeout, - } - } - return err -} - -func handleHTTP(w http.ResponseWriter, r *http.Request) { - panic("handleHTTP called; this should be impossible") -} - -func logf(c appengine.Context, level int64, format string, args ...interface{}) { - var fn func(format string, args ...interface{}) - switch level { - case 0: - fn = c.Debugf - case 1: - fn = c.Infof - case 2: - fn = c.Warningf - case 3: - fn = c.Errorf - case 4: - fn = c.Criticalf - default: - // This shouldn't happen. - fn = c.Criticalf - } - fn(format, args...) -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api_common.go deleted file mode 100644 index ec5383e6..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/api_common.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import ( - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" -) - -type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error - -var callOverrideKey = "holds []CallOverrideFunc" - -func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { - // We avoid appending to any existing call override - // so we don't risk overwriting a popped stack below. - var cofs []CallOverrideFunc - if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok { - cofs = append(cofs, uf...) - } - cofs = append(cofs, f) - return netcontext.WithValue(ctx, &callOverrideKey, cofs) -} - -func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { - cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) - if len(cofs) == 0 { - return nil, nil, false - } - // We found a list of overrides; grab the last, and reconstitute a - // context that will hide it. - f := cofs[len(cofs)-1] - ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) - return f, ctx, true -} - -type logOverrideFunc func(level int64, format string, args ...interface{}) - -var logOverrideKey = "holds a logOverrideFunc" - -func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { - return netcontext.WithValue(ctx, &logOverrideKey, f) -} - -var appIDOverrideKey = "holds a string, being the full app ID" - -func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { - return netcontext.WithValue(ctx, &appIDOverrideKey, appID) -} - -var namespaceKey = "holds the namespace string" - -func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { - return netcontext.WithValue(ctx, &namespaceKey, ns) -} - -func NamespaceFromContext(ctx netcontext.Context) string { - // If there's no namespace, return the empty string. - ns, _ := ctx.Value(&namespaceKey).(string) - return ns -} - -// FullyQualifiedAppID returns the fully-qualified application ID. -// This may contain a partition prefix (e.g. "s~" for High Replication apps), -// or a domain prefix (e.g. "example.com:"). -func FullyQualifiedAppID(ctx netcontext.Context) string { - if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { - return id - } - return fullyQualifiedAppID(ctx) -} - -func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { - if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { - f(level, format, args...) - return - } - logf(fromContext(ctx), level, format, args...) -} - -// NamespacedContext wraps a Context to support namespaces. -func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { - n := &namespacedContext{ - namespace: namespace, - } - return withNamespace(WithCallOverride(ctx, n.call), namespace) -} - -type namespacedContext struct { - namespace string -} - -func (n *namespacedContext) call(ctx netcontext.Context, service, method string, in, out proto.Message) error { - // Apply any namespace mods. - if mod, ok := NamespaceMods[service]; ok { - mod(in, n.namespace) - } - return Call(ctx, service, method, in, out) -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_id.go deleted file mode 100644 index 11df8c07..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_id.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import ( - "strings" -) - -func parseFullAppID(appid string) (partition, domain, displayID string) { - if i := strings.Index(appid, "~"); i != -1 { - partition, appid = appid[:i], appid[i+1:] - } - if i := strings.Index(appid, ":"); i != -1 { - domain, appid = appid[:i], appid[i+1:] - } - return partition, domain, appid -} - -// appID returns "appid" or "domain.com:appid". -func appID(fullAppID string) string { - _, dom, dis := parseFullAppID(fullAppID) - if dom != "" { - return dom + ":" + dis - } - return dis -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go deleted file mode 100644 index 87d9701b..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go +++ /dev/null @@ -1,296 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto -// DO NOT EDIT! - -/* -Package app_identity is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/app_identity/app_identity_service.proto - -It has these top-level messages: - AppIdentityServiceError - SignForAppRequest - SignForAppResponse - GetPublicCertificateForAppRequest - PublicCertificate - GetPublicCertificateForAppResponse - GetServiceAccountNameRequest - GetServiceAccountNameResponse - GetAccessTokenRequest - GetAccessTokenResponse - GetDefaultGcsBucketNameRequest - GetDefaultGcsBucketNameResponse -*/ -package app_identity - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type AppIdentityServiceError_ErrorCode int32 - -const ( - AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0 - AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9 - AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000 - AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001 - AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002 - AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003 - AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005 - AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006 -) - -var AppIdentityServiceError_ErrorCode_name = map[int32]string{ - 0: "SUCCESS", - 9: "UNKNOWN_SCOPE", - 1000: "BLOB_TOO_LARGE", - 1001: "DEADLINE_EXCEEDED", - 1002: "NOT_A_VALID_APP", - 1003: "UNKNOWN_ERROR", - 1005: "NOT_ALLOWED", - 1006: "NOT_IMPLEMENTED", -} -var AppIdentityServiceError_ErrorCode_value = map[string]int32{ - "SUCCESS": 0, - "UNKNOWN_SCOPE": 9, - "BLOB_TOO_LARGE": 1000, - "DEADLINE_EXCEEDED": 1001, - "NOT_A_VALID_APP": 1002, - "UNKNOWN_ERROR": 1003, - "NOT_ALLOWED": 1005, - "NOT_IMPLEMENTED": 1006, -} - -func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode { - p := new(AppIdentityServiceError_ErrorCode) - *p = x - return p -} -func (x AppIdentityServiceError_ErrorCode) String() string { - return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x)) -} -func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode") - if err != nil { - return err - } - *x = AppIdentityServiceError_ErrorCode(value) - return nil -} - -type AppIdentityServiceError struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} } -func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) } -func (*AppIdentityServiceError) ProtoMessage() {} - -type SignForAppRequest struct { - BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign" json:"bytes_to_sign,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} } -func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) } -func (*SignForAppRequest) ProtoMessage() {} - -func (m *SignForAppRequest) GetBytesToSign() []byte { - if m != nil { - return m.BytesToSign - } - return nil -} - -type SignForAppResponse struct { - KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"` - SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes" json:"signature_bytes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} } -func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) } -func (*SignForAppResponse) ProtoMessage() {} - -func (m *SignForAppResponse) GetKeyName() string { - if m != nil && m.KeyName != nil { - return *m.KeyName - } - return "" -} - -func (m *SignForAppResponse) GetSignatureBytes() []byte { - if m != nil { - return m.SignatureBytes - } - return nil -} - -type GetPublicCertificateForAppRequest struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} } -func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) } -func (*GetPublicCertificateForAppRequest) ProtoMessage() {} - -type PublicCertificate struct { - KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"` - X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem" json:"x509_certificate_pem,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PublicCertificate) Reset() { *m = PublicCertificate{} } -func (m *PublicCertificate) String() string { return proto.CompactTextString(m) } -func (*PublicCertificate) ProtoMessage() {} - -func (m *PublicCertificate) GetKeyName() string { - if m != nil && m.KeyName != nil { - return *m.KeyName - } - return "" -} - -func (m *PublicCertificate) GetX509CertificatePem() string { - if m != nil && m.X509CertificatePem != nil { - return *m.X509CertificatePem - } - return "" -} - -type GetPublicCertificateForAppResponse struct { - PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list" json:"public_certificate_list,omitempty"` - MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second" json:"max_client_cache_time_in_second,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} } -func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) } -func (*GetPublicCertificateForAppResponse) ProtoMessage() {} - -func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate { - if m != nil { - return m.PublicCertificateList - } - return nil -} - -func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 { - if m != nil && m.MaxClientCacheTimeInSecond != nil { - return *m.MaxClientCacheTimeInSecond - } - return 0 -} - -type GetServiceAccountNameRequest struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} } -func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetServiceAccountNameRequest) ProtoMessage() {} - -type GetServiceAccountNameResponse struct { - ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name" json:"service_account_name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} } -func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) } -func (*GetServiceAccountNameResponse) ProtoMessage() {} - -func (m *GetServiceAccountNameResponse) GetServiceAccountName() string { - if m != nil && m.ServiceAccountName != nil { - return *m.ServiceAccountName - } - return "" -} - -type GetAccessTokenRequest struct { - Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"` - ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id" json:"service_account_id,omitempty"` - ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name" json:"service_account_name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} } -func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) } -func (*GetAccessTokenRequest) ProtoMessage() {} - -func (m *GetAccessTokenRequest) GetScope() []string { - if m != nil { - return m.Scope - } - return nil -} - -func (m *GetAccessTokenRequest) GetServiceAccountId() int64 { - if m != nil && m.ServiceAccountId != nil { - return *m.ServiceAccountId - } - return 0 -} - -func (m *GetAccessTokenRequest) GetServiceAccountName() string { - if m != nil && m.ServiceAccountName != nil { - return *m.ServiceAccountName - } - return "" -} - -type GetAccessTokenResponse struct { - AccessToken *string `protobuf:"bytes,1,opt,name=access_token" json:"access_token,omitempty"` - ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time" json:"expiration_time,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} } -func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) } -func (*GetAccessTokenResponse) ProtoMessage() {} - -func (m *GetAccessTokenResponse) GetAccessToken() string { - if m != nil && m.AccessToken != nil { - return *m.AccessToken - } - return "" -} - -func (m *GetAccessTokenResponse) GetExpirationTime() int64 { - if m != nil && m.ExpirationTime != nil { - return *m.ExpirationTime - } - return 0 -} - -type GetDefaultGcsBucketNameRequest struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} } -func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {} - -type GetDefaultGcsBucketNameResponse struct { - DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name" json:"default_gcs_bucket_name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} } -func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) } -func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {} - -func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string { - if m != nil && m.DefaultGcsBucketName != nil { - return *m.DefaultGcsBucketName - } - return "" -} - -func init() { -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto deleted file mode 100644 index 19610ca5..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto2"; -option go_package = "app_identity"; - -package appengine; - -message AppIdentityServiceError { - enum ErrorCode { - SUCCESS = 0; - UNKNOWN_SCOPE = 9; - BLOB_TOO_LARGE = 1000; - DEADLINE_EXCEEDED = 1001; - NOT_A_VALID_APP = 1002; - UNKNOWN_ERROR = 1003; - NOT_ALLOWED = 1005; - NOT_IMPLEMENTED = 1006; - } -} - -message SignForAppRequest { - optional bytes bytes_to_sign = 1; -} - -message SignForAppResponse { - optional string key_name = 1; - optional bytes signature_bytes = 2; -} - -message GetPublicCertificateForAppRequest { -} - -message PublicCertificate { - optional string key_name = 1; - optional string x509_certificate_pem = 2; -} - -message GetPublicCertificateForAppResponse { - repeated PublicCertificate public_certificate_list = 1; - optional int64 max_client_cache_time_in_second = 2; -} - -message GetServiceAccountNameRequest { -} - -message GetServiceAccountNameResponse { - optional string service_account_name = 1; -} - -message GetAccessTokenRequest { - repeated string scope = 1; - optional int64 service_account_id = 2; - optional string service_account_name = 3; -} - -message GetAccessTokenResponse { - optional string access_token = 1; - optional int64 expiration_time = 2; -} - -message GetDefaultGcsBucketNameRequest { -} - -message GetDefaultGcsBucketNameResponse { - optional string default_gcs_bucket_name = 1; -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/base/api_base.pb.go deleted file mode 100644 index 36a19565..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/base/api_base.pb.go +++ /dev/null @@ -1,133 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/base/api_base.proto -// DO NOT EDIT! - -/* -Package base is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/base/api_base.proto - -It has these top-level messages: - StringProto - Integer32Proto - Integer64Proto - BoolProto - DoubleProto - BytesProto - VoidProto -*/ -package base - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type StringProto struct { - Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StringProto) Reset() { *m = StringProto{} } -func (m *StringProto) String() string { return proto.CompactTextString(m) } -func (*StringProto) ProtoMessage() {} - -func (m *StringProto) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type Integer32Proto struct { - Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Integer32Proto) Reset() { *m = Integer32Proto{} } -func (m *Integer32Proto) String() string { return proto.CompactTextString(m) } -func (*Integer32Proto) ProtoMessage() {} - -func (m *Integer32Proto) GetValue() int32 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Integer64Proto struct { - Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Integer64Proto) Reset() { *m = Integer64Proto{} } -func (m *Integer64Proto) String() string { return proto.CompactTextString(m) } -func (*Integer64Proto) ProtoMessage() {} - -func (m *Integer64Proto) GetValue() int64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type BoolProto struct { - Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BoolProto) Reset() { *m = BoolProto{} } -func (m *BoolProto) String() string { return proto.CompactTextString(m) } -func (*BoolProto) ProtoMessage() {} - -func (m *BoolProto) GetValue() bool { - if m != nil && m.Value != nil { - return *m.Value - } - return false -} - -type DoubleProto struct { - Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DoubleProto) Reset() { *m = DoubleProto{} } -func (m *DoubleProto) String() string { return proto.CompactTextString(m) } -func (*DoubleProto) ProtoMessage() {} - -func (m *DoubleProto) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type BytesProto struct { - Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BytesProto) Reset() { *m = BytesProto{} } -func (m *BytesProto) String() string { return proto.CompactTextString(m) } -func (*BytesProto) ProtoMessage() {} - -func (m *BytesProto) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type VoidProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *VoidProto) Reset() { *m = VoidProto{} } -func (m *VoidProto) String() string { return proto.CompactTextString(m) } -func (*VoidProto) ProtoMessage() {} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/base/api_base.proto deleted file mode 100644 index 56cd7a3c..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/base/api_base.proto +++ /dev/null @@ -1,33 +0,0 @@ -// Built-in base types for API calls. Primarily useful as return types. - -syntax = "proto2"; -option go_package = "base"; - -package appengine.base; - -message StringProto { - required string value = 1; -} - -message Integer32Proto { - required int32 value = 1; -} - -message Integer64Proto { - required int64 value = 1; -} - -message BoolProto { - required bool value = 1; -} - -message DoubleProto { - required double value = 1; -} - -message BytesProto { - required bytes value = 1 [ctype=CORD]; -} - -message VoidProto { -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go deleted file mode 100644 index 8613cb73..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go +++ /dev/null @@ -1,2778 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto -// DO NOT EDIT! - -/* -Package datastore is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/datastore/datastore_v3.proto - -It has these top-level messages: - Action - PropertyValue - Property - Path - Reference - User - EntityProto - CompositeProperty - Index - CompositeIndex - IndexPostfix - IndexPosition - Snapshot - InternalHeader - Transaction - Query - CompiledQuery - CompiledCursor - Cursor - Error - Cost - GetRequest - GetResponse - PutRequest - PutResponse - TouchRequest - TouchResponse - DeleteRequest - DeleteResponse - NextRequest - QueryResult - AllocateIdsRequest - AllocateIdsResponse - CompositeIndices - AddActionsRequest - AddActionsResponse - BeginTransactionRequest - CommitResponse -*/ -package datastore - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type Property_Meaning int32 - -const ( - Property_NO_MEANING Property_Meaning = 0 - Property_BLOB Property_Meaning = 14 - Property_TEXT Property_Meaning = 15 - Property_BYTESTRING Property_Meaning = 16 - Property_ATOM_CATEGORY Property_Meaning = 1 - Property_ATOM_LINK Property_Meaning = 2 - Property_ATOM_TITLE Property_Meaning = 3 - Property_ATOM_CONTENT Property_Meaning = 4 - Property_ATOM_SUMMARY Property_Meaning = 5 - Property_ATOM_AUTHOR Property_Meaning = 6 - Property_GD_WHEN Property_Meaning = 7 - Property_GD_EMAIL Property_Meaning = 8 - Property_GEORSS_POINT Property_Meaning = 9 - Property_GD_IM Property_Meaning = 10 - Property_GD_PHONENUMBER Property_Meaning = 11 - Property_GD_POSTALADDRESS Property_Meaning = 12 - Property_GD_RATING Property_Meaning = 13 - Property_BLOBKEY Property_Meaning = 17 - Property_ENTITY_PROTO Property_Meaning = 19 - Property_INDEX_VALUE Property_Meaning = 18 -) - -var Property_Meaning_name = map[int32]string{ - 0: "NO_MEANING", - 14: "BLOB", - 15: "TEXT", - 16: "BYTESTRING", - 1: "ATOM_CATEGORY", - 2: "ATOM_LINK", - 3: "ATOM_TITLE", - 4: "ATOM_CONTENT", - 5: "ATOM_SUMMARY", - 6: "ATOM_AUTHOR", - 7: "GD_WHEN", - 8: "GD_EMAIL", - 9: "GEORSS_POINT", - 10: "GD_IM", - 11: "GD_PHONENUMBER", - 12: "GD_POSTALADDRESS", - 13: "GD_RATING", - 17: "BLOBKEY", - 19: "ENTITY_PROTO", - 18: "INDEX_VALUE", -} -var Property_Meaning_value = map[string]int32{ - "NO_MEANING": 0, - "BLOB": 14, - "TEXT": 15, - "BYTESTRING": 16, - "ATOM_CATEGORY": 1, - "ATOM_LINK": 2, - "ATOM_TITLE": 3, - "ATOM_CONTENT": 4, - "ATOM_SUMMARY": 5, - "ATOM_AUTHOR": 6, - "GD_WHEN": 7, - "GD_EMAIL": 8, - "GEORSS_POINT": 9, - "GD_IM": 10, - "GD_PHONENUMBER": 11, - "GD_POSTALADDRESS": 12, - "GD_RATING": 13, - "BLOBKEY": 17, - "ENTITY_PROTO": 19, - "INDEX_VALUE": 18, -} - -func (x Property_Meaning) Enum() *Property_Meaning { - p := new(Property_Meaning) - *p = x - return p -} -func (x Property_Meaning) String() string { - return proto.EnumName(Property_Meaning_name, int32(x)) -} -func (x *Property_Meaning) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning") - if err != nil { - return err - } - *x = Property_Meaning(value) - return nil -} - -type Property_FtsTokenizationOption int32 - -const ( - Property_HTML Property_FtsTokenizationOption = 1 - Property_ATOM Property_FtsTokenizationOption = 2 -) - -var Property_FtsTokenizationOption_name = map[int32]string{ - 1: "HTML", - 2: "ATOM", -} -var Property_FtsTokenizationOption_value = map[string]int32{ - "HTML": 1, - "ATOM": 2, -} - -func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption { - p := new(Property_FtsTokenizationOption) - *p = x - return p -} -func (x Property_FtsTokenizationOption) String() string { - return proto.EnumName(Property_FtsTokenizationOption_name, int32(x)) -} -func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption") - if err != nil { - return err - } - *x = Property_FtsTokenizationOption(value) - return nil -} - -type EntityProto_Kind int32 - -const ( - EntityProto_GD_CONTACT EntityProto_Kind = 1 - EntityProto_GD_EVENT EntityProto_Kind = 2 - EntityProto_GD_MESSAGE EntityProto_Kind = 3 -) - -var EntityProto_Kind_name = map[int32]string{ - 1: "GD_CONTACT", - 2: "GD_EVENT", - 3: "GD_MESSAGE", -} -var EntityProto_Kind_value = map[string]int32{ - "GD_CONTACT": 1, - "GD_EVENT": 2, - "GD_MESSAGE": 3, -} - -func (x EntityProto_Kind) Enum() *EntityProto_Kind { - p := new(EntityProto_Kind) - *p = x - return p -} -func (x EntityProto_Kind) String() string { - return proto.EnumName(EntityProto_Kind_name, int32(x)) -} -func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind") - if err != nil { - return err - } - *x = EntityProto_Kind(value) - return nil -} - -type Index_Property_Direction int32 - -const ( - Index_Property_ASCENDING Index_Property_Direction = 1 - Index_Property_DESCENDING Index_Property_Direction = 2 -) - -var Index_Property_Direction_name = map[int32]string{ - 1: "ASCENDING", - 2: "DESCENDING", -} -var Index_Property_Direction_value = map[string]int32{ - "ASCENDING": 1, - "DESCENDING": 2, -} - -func (x Index_Property_Direction) Enum() *Index_Property_Direction { - p := new(Index_Property_Direction) - *p = x - return p -} -func (x Index_Property_Direction) String() string { - return proto.EnumName(Index_Property_Direction_name, int32(x)) -} -func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction") - if err != nil { - return err - } - *x = Index_Property_Direction(value) - return nil -} - -type CompositeIndex_State int32 - -const ( - CompositeIndex_WRITE_ONLY CompositeIndex_State = 1 - CompositeIndex_READ_WRITE CompositeIndex_State = 2 - CompositeIndex_DELETED CompositeIndex_State = 3 - CompositeIndex_ERROR CompositeIndex_State = 4 -) - -var CompositeIndex_State_name = map[int32]string{ - 1: "WRITE_ONLY", - 2: "READ_WRITE", - 3: "DELETED", - 4: "ERROR", -} -var CompositeIndex_State_value = map[string]int32{ - "WRITE_ONLY": 1, - "READ_WRITE": 2, - "DELETED": 3, - "ERROR": 4, -} - -func (x CompositeIndex_State) Enum() *CompositeIndex_State { - p := new(CompositeIndex_State) - *p = x - return p -} -func (x CompositeIndex_State) String() string { - return proto.EnumName(CompositeIndex_State_name, int32(x)) -} -func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State") - if err != nil { - return err - } - *x = CompositeIndex_State(value) - return nil -} - -type Snapshot_Status int32 - -const ( - Snapshot_INACTIVE Snapshot_Status = 0 - Snapshot_ACTIVE Snapshot_Status = 1 -) - -var Snapshot_Status_name = map[int32]string{ - 0: "INACTIVE", - 1: "ACTIVE", -} -var Snapshot_Status_value = map[string]int32{ - "INACTIVE": 0, - "ACTIVE": 1, -} - -func (x Snapshot_Status) Enum() *Snapshot_Status { - p := new(Snapshot_Status) - *p = x - return p -} -func (x Snapshot_Status) String() string { - return proto.EnumName(Snapshot_Status_name, int32(x)) -} -func (x *Snapshot_Status) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status") - if err != nil { - return err - } - *x = Snapshot_Status(value) - return nil -} - -type Query_Hint int32 - -const ( - Query_ORDER_FIRST Query_Hint = 1 - Query_ANCESTOR_FIRST Query_Hint = 2 - Query_FILTER_FIRST Query_Hint = 3 -) - -var Query_Hint_name = map[int32]string{ - 1: "ORDER_FIRST", - 2: "ANCESTOR_FIRST", - 3: "FILTER_FIRST", -} -var Query_Hint_value = map[string]int32{ - "ORDER_FIRST": 1, - "ANCESTOR_FIRST": 2, - "FILTER_FIRST": 3, -} - -func (x Query_Hint) Enum() *Query_Hint { - p := new(Query_Hint) - *p = x - return p -} -func (x Query_Hint) String() string { - return proto.EnumName(Query_Hint_name, int32(x)) -} -func (x *Query_Hint) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint") - if err != nil { - return err - } - *x = Query_Hint(value) - return nil -} - -type Query_Filter_Operator int32 - -const ( - Query_Filter_LESS_THAN Query_Filter_Operator = 1 - Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2 - Query_Filter_GREATER_THAN Query_Filter_Operator = 3 - Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4 - Query_Filter_EQUAL Query_Filter_Operator = 5 - Query_Filter_IN Query_Filter_Operator = 6 - Query_Filter_EXISTS Query_Filter_Operator = 7 -) - -var Query_Filter_Operator_name = map[int32]string{ - 1: "LESS_THAN", - 2: "LESS_THAN_OR_EQUAL", - 3: "GREATER_THAN", - 4: "GREATER_THAN_OR_EQUAL", - 5: "EQUAL", - 6: "IN", - 7: "EXISTS", -} -var Query_Filter_Operator_value = map[string]int32{ - "LESS_THAN": 1, - "LESS_THAN_OR_EQUAL": 2, - "GREATER_THAN": 3, - "GREATER_THAN_OR_EQUAL": 4, - "EQUAL": 5, - "IN": 6, - "EXISTS": 7, -} - -func (x Query_Filter_Operator) Enum() *Query_Filter_Operator { - p := new(Query_Filter_Operator) - *p = x - return p -} -func (x Query_Filter_Operator) String() string { - return proto.EnumName(Query_Filter_Operator_name, int32(x)) -} -func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator") - if err != nil { - return err - } - *x = Query_Filter_Operator(value) - return nil -} - -type Query_Order_Direction int32 - -const ( - Query_Order_ASCENDING Query_Order_Direction = 1 - Query_Order_DESCENDING Query_Order_Direction = 2 -) - -var Query_Order_Direction_name = map[int32]string{ - 1: "ASCENDING", - 2: "DESCENDING", -} -var Query_Order_Direction_value = map[string]int32{ - "ASCENDING": 1, - "DESCENDING": 2, -} - -func (x Query_Order_Direction) Enum() *Query_Order_Direction { - p := new(Query_Order_Direction) - *p = x - return p -} -func (x Query_Order_Direction) String() string { - return proto.EnumName(Query_Order_Direction_name, int32(x)) -} -func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction") - if err != nil { - return err - } - *x = Query_Order_Direction(value) - return nil -} - -type Error_ErrorCode int32 - -const ( - Error_BAD_REQUEST Error_ErrorCode = 1 - Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2 - Error_INTERNAL_ERROR Error_ErrorCode = 3 - Error_NEED_INDEX Error_ErrorCode = 4 - Error_TIMEOUT Error_ErrorCode = 5 - Error_PERMISSION_DENIED Error_ErrorCode = 6 - Error_BIGTABLE_ERROR Error_ErrorCode = 7 - Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8 - Error_CAPABILITY_DISABLED Error_ErrorCode = 9 - Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10 - Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11 -) - -var Error_ErrorCode_name = map[int32]string{ - 1: "BAD_REQUEST", - 2: "CONCURRENT_TRANSACTION", - 3: "INTERNAL_ERROR", - 4: "NEED_INDEX", - 5: "TIMEOUT", - 6: "PERMISSION_DENIED", - 7: "BIGTABLE_ERROR", - 8: "COMMITTED_BUT_STILL_APPLYING", - 9: "CAPABILITY_DISABLED", - 10: "TRY_ALTERNATE_BACKEND", - 11: "SAFE_TIME_TOO_OLD", -} -var Error_ErrorCode_value = map[string]int32{ - "BAD_REQUEST": 1, - "CONCURRENT_TRANSACTION": 2, - "INTERNAL_ERROR": 3, - "NEED_INDEX": 4, - "TIMEOUT": 5, - "PERMISSION_DENIED": 6, - "BIGTABLE_ERROR": 7, - "COMMITTED_BUT_STILL_APPLYING": 8, - "CAPABILITY_DISABLED": 9, - "TRY_ALTERNATE_BACKEND": 10, - "SAFE_TIME_TOO_OLD": 11, -} - -func (x Error_ErrorCode) Enum() *Error_ErrorCode { - p := new(Error_ErrorCode) - *p = x - return p -} -func (x Error_ErrorCode) String() string { - return proto.EnumName(Error_ErrorCode_name, int32(x)) -} -func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode") - if err != nil { - return err - } - *x = Error_ErrorCode(value) - return nil -} - -type PutRequest_AutoIdPolicy int32 - -const ( - PutRequest_CURRENT PutRequest_AutoIdPolicy = 0 - PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1 -) - -var PutRequest_AutoIdPolicy_name = map[int32]string{ - 0: "CURRENT", - 1: "SEQUENTIAL", -} -var PutRequest_AutoIdPolicy_value = map[string]int32{ - "CURRENT": 0, - "SEQUENTIAL": 1, -} - -func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy { - p := new(PutRequest_AutoIdPolicy) - *p = x - return p -} -func (x PutRequest_AutoIdPolicy) String() string { - return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x)) -} -func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy") - if err != nil { - return err - } - *x = PutRequest_AutoIdPolicy(value) - return nil -} - -type Action struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *Action) Reset() { *m = Action{} } -func (m *Action) String() string { return proto.CompactTextString(m) } -func (*Action) ProtoMessage() {} - -type PropertyValue struct { - Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"` - BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"` - StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"` - Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue" json:"pointvalue,omitempty"` - Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue" json:"uservalue,omitempty"` - Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue" json:"referencevalue,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue) Reset() { *m = PropertyValue{} } -func (m *PropertyValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue) ProtoMessage() {} - -func (m *PropertyValue) GetInt64Value() int64 { - if m != nil && m.Int64Value != nil { - return *m.Int64Value - } - return 0 -} - -func (m *PropertyValue) GetBooleanValue() bool { - if m != nil && m.BooleanValue != nil { - return *m.BooleanValue - } - return false -} - -func (m *PropertyValue) GetStringValue() string { - if m != nil && m.StringValue != nil { - return *m.StringValue - } - return "" -} - -func (m *PropertyValue) GetDoubleValue() float64 { - if m != nil && m.DoubleValue != nil { - return *m.DoubleValue - } - return 0 -} - -func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue { - if m != nil { - return m.Pointvalue - } - return nil -} - -func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue { - if m != nil { - return m.Uservalue - } - return nil -} - -func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue { - if m != nil { - return m.Referencevalue - } - return nil -} - -type PropertyValue_PointValue struct { - X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"` - Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} } -func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_PointValue) ProtoMessage() {} - -func (m *PropertyValue_PointValue) GetX() float64 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -func (m *PropertyValue_PointValue) GetY() float64 { - if m != nil && m.Y != nil { - return *m.Y - } - return 0 -} - -type PropertyValue_UserValue struct { - Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"` - AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain" json:"auth_domain,omitempty"` - Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"` - FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity" json:"federated_identity,omitempty"` - FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider" json:"federated_provider,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} } -func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_UserValue) ProtoMessage() {} - -func (m *PropertyValue_UserValue) GetEmail() string { - if m != nil && m.Email != nil { - return *m.Email - } - return "" -} - -func (m *PropertyValue_UserValue) GetAuthDomain() string { - if m != nil && m.AuthDomain != nil { - return *m.AuthDomain - } - return "" -} - -func (m *PropertyValue_UserValue) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *PropertyValue_UserValue) GetFederatedIdentity() string { - if m != nil && m.FederatedIdentity != nil { - return *m.FederatedIdentity - } - return "" -} - -func (m *PropertyValue_UserValue) GetFederatedProvider() string { - if m != nil && m.FederatedProvider != nil { - return *m.FederatedProvider - } - return "" -} - -type PropertyValue_ReferenceValue struct { - App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"` - Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement" json:"pathelement,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} } -func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_ReferenceValue) ProtoMessage() {} - -func (m *PropertyValue_ReferenceValue) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *PropertyValue_ReferenceValue) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement { - if m != nil { - return m.Pathelement - } - return nil -} - -type PropertyValue_ReferenceValue_PathElement struct { - Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"` - Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"` - Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue_ReferenceValue_PathElement) Reset() { - *m = PropertyValue_ReferenceValue_PathElement{} -} -func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {} - -func (m *PropertyValue_ReferenceValue_PathElement) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *PropertyValue_ReferenceValue_PathElement) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -type Property struct { - Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"` - MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri" json:"meaning_uri,omitempty"` - Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` - Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"` - Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"` - Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"` - FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"` - Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Property) Reset() { *m = Property{} } -func (m *Property) String() string { return proto.CompactTextString(m) } -func (*Property) ProtoMessage() {} - -const Default_Property_Meaning Property_Meaning = Property_NO_MEANING -const Default_Property_Searchable bool = false -const Default_Property_Locale string = "en" - -func (m *Property) GetMeaning() Property_Meaning { - if m != nil && m.Meaning != nil { - return *m.Meaning - } - return Default_Property_Meaning -} - -func (m *Property) GetMeaningUri() string { - if m != nil && m.MeaningUri != nil { - return *m.MeaningUri - } - return "" -} - -func (m *Property) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *Property) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -func (m *Property) GetMultiple() bool { - if m != nil && m.Multiple != nil { - return *m.Multiple - } - return false -} - -func (m *Property) GetSearchable() bool { - if m != nil && m.Searchable != nil { - return *m.Searchable - } - return Default_Property_Searchable -} - -func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption { - if m != nil && m.FtsTokenizationOption != nil { - return *m.FtsTokenizationOption - } - return Property_HTML -} - -func (m *Property) GetLocale() string { - if m != nil && m.Locale != nil { - return *m.Locale - } - return Default_Property_Locale -} - -type Path struct { - Element []*Path_Element `protobuf:"group,1,rep,name=Element" json:"element,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Path) Reset() { *m = Path{} } -func (m *Path) String() string { return proto.CompactTextString(m) } -func (*Path) ProtoMessage() {} - -func (m *Path) GetElement() []*Path_Element { - if m != nil { - return m.Element - } - return nil -} - -type Path_Element struct { - Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"` - Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"` - Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Path_Element) Reset() { *m = Path_Element{} } -func (m *Path_Element) String() string { return proto.CompactTextString(m) } -func (*Path_Element) ProtoMessage() {} - -func (m *Path_Element) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -func (m *Path_Element) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *Path_Element) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -type Reference struct { - App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"` - Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Reference) Reset() { *m = Reference{} } -func (m *Reference) String() string { return proto.CompactTextString(m) } -func (*Reference) ProtoMessage() {} - -func (m *Reference) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Reference) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *Reference) GetPath() *Path { - if m != nil { - return m.Path - } - return nil -} - -type User struct { - Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"` - AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain" json:"auth_domain,omitempty"` - Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"` - FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity" json:"federated_identity,omitempty"` - FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider" json:"federated_provider,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *User) Reset() { *m = User{} } -func (m *User) String() string { return proto.CompactTextString(m) } -func (*User) ProtoMessage() {} - -func (m *User) GetEmail() string { - if m != nil && m.Email != nil { - return *m.Email - } - return "" -} - -func (m *User) GetAuthDomain() string { - if m != nil && m.AuthDomain != nil { - return *m.AuthDomain - } - return "" -} - -func (m *User) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *User) GetFederatedIdentity() string { - if m != nil && m.FederatedIdentity != nil { - return *m.FederatedIdentity - } - return "" -} - -func (m *User) GetFederatedProvider() string { - if m != nil && m.FederatedProvider != nil { - return *m.FederatedProvider - } - return "" -} - -type EntityProto struct { - Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"` - EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group" json:"entity_group,omitempty"` - Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"` - Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"` - KindUri *string `protobuf:"bytes,5,opt,name=kind_uri" json:"kind_uri,omitempty"` - Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` - RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property" json:"raw_property,omitempty"` - Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EntityProto) Reset() { *m = EntityProto{} } -func (m *EntityProto) String() string { return proto.CompactTextString(m) } -func (*EntityProto) ProtoMessage() {} - -func (m *EntityProto) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *EntityProto) GetEntityGroup() *Path { - if m != nil { - return m.EntityGroup - } - return nil -} - -func (m *EntityProto) GetOwner() *User { - if m != nil { - return m.Owner - } - return nil -} - -func (m *EntityProto) GetKind() EntityProto_Kind { - if m != nil && m.Kind != nil { - return *m.Kind - } - return EntityProto_GD_CONTACT -} - -func (m *EntityProto) GetKindUri() string { - if m != nil && m.KindUri != nil { - return *m.KindUri - } - return "" -} - -func (m *EntityProto) GetProperty() []*Property { - if m != nil { - return m.Property - } - return nil -} - -func (m *EntityProto) GetRawProperty() []*Property { - if m != nil { - return m.RawProperty - } - return nil -} - -func (m *EntityProto) GetRank() int32 { - if m != nil && m.Rank != nil { - return *m.Rank - } - return 0 -} - -type CompositeProperty struct { - IndexId *int64 `protobuf:"varint,1,req,name=index_id" json:"index_id,omitempty"` - Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompositeProperty) Reset() { *m = CompositeProperty{} } -func (m *CompositeProperty) String() string { return proto.CompactTextString(m) } -func (*CompositeProperty) ProtoMessage() {} - -func (m *CompositeProperty) GetIndexId() int64 { - if m != nil && m.IndexId != nil { - return *m.IndexId - } - return 0 -} - -func (m *CompositeProperty) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -type Index struct { - EntityType *string `protobuf:"bytes,1,req,name=entity_type" json:"entity_type,omitempty"` - Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"` - Property []*Index_Property `protobuf:"group,2,rep,name=Property" json:"property,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Index) Reset() { *m = Index{} } -func (m *Index) String() string { return proto.CompactTextString(m) } -func (*Index) ProtoMessage() {} - -func (m *Index) GetEntityType() string { - if m != nil && m.EntityType != nil { - return *m.EntityType - } - return "" -} - -func (m *Index) GetAncestor() bool { - if m != nil && m.Ancestor != nil { - return *m.Ancestor - } - return false -} - -func (m *Index) GetProperty() []*Index_Property { - if m != nil { - return m.Property - } - return nil -} - -type Index_Property struct { - Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` - Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Index_Property) Reset() { *m = Index_Property{} } -func (m *Index_Property) String() string { return proto.CompactTextString(m) } -func (*Index_Property) ProtoMessage() {} - -const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING - -func (m *Index_Property) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *Index_Property) GetDirection() Index_Property_Direction { - if m != nil && m.Direction != nil { - return *m.Direction - } - return Default_Index_Property_Direction -} - -type CompositeIndex struct { - AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` - Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"` - Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"` - State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"` - OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,def=0" json:"only_use_if_required,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompositeIndex) Reset() { *m = CompositeIndex{} } -func (m *CompositeIndex) String() string { return proto.CompactTextString(m) } -func (*CompositeIndex) ProtoMessage() {} - -const Default_CompositeIndex_OnlyUseIfRequired bool = false - -func (m *CompositeIndex) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *CompositeIndex) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *CompositeIndex) GetDefinition() *Index { - if m != nil { - return m.Definition - } - return nil -} - -func (m *CompositeIndex) GetState() CompositeIndex_State { - if m != nil && m.State != nil { - return *m.State - } - return CompositeIndex_WRITE_ONLY -} - -func (m *CompositeIndex) GetOnlyUseIfRequired() bool { - if m != nil && m.OnlyUseIfRequired != nil { - return *m.OnlyUseIfRequired - } - return Default_CompositeIndex_OnlyUseIfRequired -} - -type IndexPostfix struct { - IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value" json:"index_value,omitempty"` - Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"` - Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *IndexPostfix) Reset() { *m = IndexPostfix{} } -func (m *IndexPostfix) String() string { return proto.CompactTextString(m) } -func (*IndexPostfix) ProtoMessage() {} - -const Default_IndexPostfix_Before bool = true - -func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue { - if m != nil { - return m.IndexValue - } - return nil -} - -func (m *IndexPostfix) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *IndexPostfix) GetBefore() bool { - if m != nil && m.Before != nil { - return *m.Before - } - return Default_IndexPostfix_Before -} - -type IndexPostfix_IndexValue struct { - PropertyName *string `protobuf:"bytes,1,req,name=property_name" json:"property_name,omitempty"` - Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} } -func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) } -func (*IndexPostfix_IndexValue) ProtoMessage() {} - -func (m *IndexPostfix_IndexValue) GetPropertyName() string { - if m != nil && m.PropertyName != nil { - return *m.PropertyName - } - return "" -} - -func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -type IndexPosition struct { - Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` - Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *IndexPosition) Reset() { *m = IndexPosition{} } -func (m *IndexPosition) String() string { return proto.CompactTextString(m) } -func (*IndexPosition) ProtoMessage() {} - -const Default_IndexPosition_Before bool = true - -func (m *IndexPosition) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *IndexPosition) GetBefore() bool { - if m != nil && m.Before != nil { - return *m.Before - } - return Default_IndexPosition_Before -} - -type Snapshot struct { - Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} - -func (m *Snapshot) GetTs() int64 { - if m != nil && m.Ts != nil { - return *m.Ts - } - return 0 -} - -type InternalHeader struct { - Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *InternalHeader) Reset() { *m = InternalHeader{} } -func (m *InternalHeader) String() string { return proto.CompactTextString(m) } -func (*InternalHeader) ProtoMessage() {} - -func (m *InternalHeader) GetQos() string { - if m != nil && m.Qos != nil { - return *m.Qos - } - return "" -} - -type Transaction struct { - Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` - Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"` - App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"` - MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Transaction) Reset() { *m = Transaction{} } -func (m *Transaction) String() string { return proto.CompactTextString(m) } -func (*Transaction) ProtoMessage() {} - -const Default_Transaction_MarkChanges bool = false - -func (m *Transaction) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *Transaction) GetHandle() uint64 { - if m != nil && m.Handle != nil { - return *m.Handle - } - return 0 -} - -func (m *Transaction) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Transaction) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_Transaction_MarkChanges -} - -type Query struct { - Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"` - App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,29,opt,name=name_space" json:"name_space,omitempty"` - Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` - Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"` - Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter" json:"filter,omitempty"` - SearchQuery *string `protobuf:"bytes,8,opt,name=search_query" json:"search_query,omitempty"` - Order []*Query_Order `protobuf:"group,9,rep,name=Order" json:"order,omitempty"` - Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"` - Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"` - Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"` - Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"` - CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"` - EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor" json:"end_compiled_cursor,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index" json:"composite_index,omitempty"` - RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,def=0" json:"require_perfect_plan,omitempty"` - KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,def=0" json:"keys_only,omitempty"` - Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"` - Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"` - FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms" json:"failover_ms,omitempty"` - Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"` - PropertyName []string `protobuf:"bytes,33,rep,name=property_name" json:"property_name,omitempty"` - GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name" json:"group_by_property_name,omitempty"` - Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"` - MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds" json:"min_safe_time_seconds,omitempty"` - SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name" json:"safe_replica_name,omitempty"` - PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,def=0" json:"persist_offset,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Query) Reset() { *m = Query{} } -func (m *Query) String() string { return proto.CompactTextString(m) } -func (*Query) ProtoMessage() {} - -const Default_Query_Offset int32 = 0 -const Default_Query_RequirePerfectPlan bool = false -const Default_Query_KeysOnly bool = false -const Default_Query_Compile bool = false -const Default_Query_PersistOffset bool = false - -func (m *Query) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *Query) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Query) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *Query) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *Query) GetAncestor() *Reference { - if m != nil { - return m.Ancestor - } - return nil -} - -func (m *Query) GetFilter() []*Query_Filter { - if m != nil { - return m.Filter - } - return nil -} - -func (m *Query) GetSearchQuery() string { - if m != nil && m.SearchQuery != nil { - return *m.SearchQuery - } - return "" -} - -func (m *Query) GetOrder() []*Query_Order { - if m != nil { - return m.Order - } - return nil -} - -func (m *Query) GetHint() Query_Hint { - if m != nil && m.Hint != nil { - return *m.Hint - } - return Query_ORDER_FIRST -} - -func (m *Query) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *Query) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_Query_Offset -} - -func (m *Query) GetLimit() int32 { - if m != nil && m.Limit != nil { - return *m.Limit - } - return 0 -} - -func (m *Query) GetCompiledCursor() *CompiledCursor { - if m != nil { - return m.CompiledCursor - } - return nil -} - -func (m *Query) GetEndCompiledCursor() *CompiledCursor { - if m != nil { - return m.EndCompiledCursor - } - return nil -} - -func (m *Query) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *Query) GetRequirePerfectPlan() bool { - if m != nil && m.RequirePerfectPlan != nil { - return *m.RequirePerfectPlan - } - return Default_Query_RequirePerfectPlan -} - -func (m *Query) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return Default_Query_KeysOnly -} - -func (m *Query) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *Query) GetCompile() bool { - if m != nil && m.Compile != nil { - return *m.Compile - } - return Default_Query_Compile -} - -func (m *Query) GetFailoverMs() int64 { - if m != nil && m.FailoverMs != nil { - return *m.FailoverMs - } - return 0 -} - -func (m *Query) GetStrong() bool { - if m != nil && m.Strong != nil { - return *m.Strong - } - return false -} - -func (m *Query) GetPropertyName() []string { - if m != nil { - return m.PropertyName - } - return nil -} - -func (m *Query) GetGroupByPropertyName() []string { - if m != nil { - return m.GroupByPropertyName - } - return nil -} - -func (m *Query) GetDistinct() bool { - if m != nil && m.Distinct != nil { - return *m.Distinct - } - return false -} - -func (m *Query) GetMinSafeTimeSeconds() int64 { - if m != nil && m.MinSafeTimeSeconds != nil { - return *m.MinSafeTimeSeconds - } - return 0 -} - -func (m *Query) GetSafeReplicaName() []string { - if m != nil { - return m.SafeReplicaName - } - return nil -} - -func (m *Query) GetPersistOffset() bool { - if m != nil && m.PersistOffset != nil { - return *m.PersistOffset - } - return Default_Query_PersistOffset -} - -type Query_Filter struct { - Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"` - Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Query_Filter) Reset() { *m = Query_Filter{} } -func (m *Query_Filter) String() string { return proto.CompactTextString(m) } -func (*Query_Filter) ProtoMessage() {} - -func (m *Query_Filter) GetOp() Query_Filter_Operator { - if m != nil && m.Op != nil { - return *m.Op - } - return Query_Filter_LESS_THAN -} - -func (m *Query_Filter) GetProperty() []*Property { - if m != nil { - return m.Property - } - return nil -} - -type Query_Order struct { - Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"` - Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Query_Order) Reset() { *m = Query_Order{} } -func (m *Query_Order) String() string { return proto.CompactTextString(m) } -func (*Query_Order) ProtoMessage() {} - -const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING - -func (m *Query_Order) GetProperty() string { - if m != nil && m.Property != nil { - return *m.Property - } - return "" -} - -func (m *Query_Order) GetDirection() Query_Order_Direction { - if m != nil && m.Direction != nil { - return *m.Direction - } - return Default_Query_Order_Direction -} - -type CompiledQuery struct { - Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan" json:"primaryscan,omitempty"` - Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan" json:"mergejoinscan,omitempty"` - IndexDef *Index `protobuf:"bytes,21,opt,name=index_def" json:"index_def,omitempty"` - Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` - Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` - KeysOnly *bool `protobuf:"varint,12,req,name=keys_only" json:"keys_only,omitempty"` - PropertyName []string `protobuf:"bytes,24,rep,name=property_name" json:"property_name,omitempty"` - DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size" json:"distinct_infix_size,omitempty"` - Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter" json:"entityfilter,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledQuery) Reset() { *m = CompiledQuery{} } -func (m *CompiledQuery) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery) ProtoMessage() {} - -const Default_CompiledQuery_Offset int32 = 0 - -func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan { - if m != nil { - return m.Primaryscan - } - return nil -} - -func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan { - if m != nil { - return m.Mergejoinscan - } - return nil -} - -func (m *CompiledQuery) GetIndexDef() *Index { - if m != nil { - return m.IndexDef - } - return nil -} - -func (m *CompiledQuery) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_CompiledQuery_Offset -} - -func (m *CompiledQuery) GetLimit() int32 { - if m != nil && m.Limit != nil { - return *m.Limit - } - return 0 -} - -func (m *CompiledQuery) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return false -} - -func (m *CompiledQuery) GetPropertyName() []string { - if m != nil { - return m.PropertyName - } - return nil -} - -func (m *CompiledQuery) GetDistinctInfixSize() int32 { - if m != nil && m.DistinctInfixSize != nil { - return *m.DistinctInfixSize - } - return 0 -} - -func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter { - if m != nil { - return m.Entityfilter - } - return nil -} - -type CompiledQuery_PrimaryScan struct { - IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"` - StartKey *string `protobuf:"bytes,3,opt,name=start_key" json:"start_key,omitempty"` - StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive" json:"start_inclusive,omitempty"` - EndKey *string `protobuf:"bytes,5,opt,name=end_key" json:"end_key,omitempty"` - EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive" json:"end_inclusive,omitempty"` - StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value" json:"start_postfix_value,omitempty"` - EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value" json:"end_postfix_value,omitempty"` - EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us" json:"end_unapplied_log_timestamp_us,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} } -func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_PrimaryScan) ProtoMessage() {} - -func (m *CompiledQuery_PrimaryScan) GetIndexName() string { - if m != nil && m.IndexName != nil { - return *m.IndexName - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetStartKey() string { - if m != nil && m.StartKey != nil { - return *m.StartKey - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool { - if m != nil && m.StartInclusive != nil { - return *m.StartInclusive - } - return false -} - -func (m *CompiledQuery_PrimaryScan) GetEndKey() string { - if m != nil && m.EndKey != nil { - return *m.EndKey - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool { - if m != nil && m.EndInclusive != nil { - return *m.EndInclusive - } - return false -} - -func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string { - if m != nil { - return m.StartPostfixValue - } - return nil -} - -func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string { - if m != nil { - return m.EndPostfixValue - } - return nil -} - -func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 { - if m != nil && m.EndUnappliedLogTimestampUs != nil { - return *m.EndUnappliedLogTimestampUs - } - return 0 -} - -type CompiledQuery_MergeJoinScan struct { - IndexName *string `protobuf:"bytes,8,req,name=index_name" json:"index_name,omitempty"` - PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value" json:"prefix_value,omitempty"` - ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,def=0" json:"value_prefix,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} } -func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_MergeJoinScan) ProtoMessage() {} - -const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false - -func (m *CompiledQuery_MergeJoinScan) GetIndexName() string { - if m != nil && m.IndexName != nil { - return *m.IndexName - } - return "" -} - -func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string { - if m != nil { - return m.PrefixValue - } - return nil -} - -func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool { - if m != nil && m.ValuePrefix != nil { - return *m.ValuePrefix - } - return Default_CompiledQuery_MergeJoinScan_ValuePrefix -} - -type CompiledQuery_EntityFilter struct { - Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"` - Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"` - Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} } -func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_EntityFilter) ProtoMessage() {} - -const Default_CompiledQuery_EntityFilter_Distinct bool = false - -func (m *CompiledQuery_EntityFilter) GetDistinct() bool { - if m != nil && m.Distinct != nil { - return *m.Distinct - } - return Default_CompiledQuery_EntityFilter_Distinct -} - -func (m *CompiledQuery_EntityFilter) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference { - if m != nil { - return m.Ancestor - } - return nil -} - -type CompiledCursor struct { - Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position" json:"position,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledCursor) Reset() { *m = CompiledCursor{} } -func (m *CompiledCursor) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor) ProtoMessage() {} - -func (m *CompiledCursor) GetPosition() *CompiledCursor_Position { - if m != nil { - return m.Position - } - return nil -} - -type CompiledCursor_Position struct { - StartKey *string `protobuf:"bytes,27,opt,name=start_key" json:"start_key,omitempty"` - Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue" json:"indexvalue,omitempty"` - Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"` - StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,def=1" json:"start_inclusive,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} } -func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor_Position) ProtoMessage() {} - -const Default_CompiledCursor_Position_StartInclusive bool = true - -func (m *CompiledCursor_Position) GetStartKey() string { - if m != nil && m.StartKey != nil { - return *m.StartKey - } - return "" -} - -func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue { - if m != nil { - return m.Indexvalue - } - return nil -} - -func (m *CompiledCursor_Position) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *CompiledCursor_Position) GetStartInclusive() bool { - if m != nil && m.StartInclusive != nil { - return *m.StartInclusive - } - return Default_CompiledCursor_Position_StartInclusive -} - -type CompiledCursor_Position_IndexValue struct { - Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"` - Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} } -func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor_Position_IndexValue) ProtoMessage() {} - -func (m *CompiledCursor_Position_IndexValue) GetProperty() string { - if m != nil && m.Property != nil { - return *m.Property - } - return "" -} - -func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -type Cursor struct { - Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"` - App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Cursor) Reset() { *m = Cursor{} } -func (m *Cursor) String() string { return proto.CompactTextString(m) } -func (*Cursor) ProtoMessage() {} - -func (m *Cursor) GetCursor() uint64 { - if m != nil && m.Cursor != nil { - return *m.Cursor - } - return 0 -} - -func (m *Cursor) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -type Error struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *Error) Reset() { *m = Error{} } -func (m *Error) String() string { return proto.CompactTextString(m) } -func (*Error) ProtoMessage() {} - -type Cost struct { - IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes" json:"index_writes,omitempty"` - IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes" json:"index_write_bytes,omitempty"` - EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes" json:"entity_writes,omitempty"` - EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes" json:"entity_write_bytes,omitempty"` - Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost" json:"commitcost,omitempty"` - ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta" json:"approximate_storage_delta,omitempty"` - IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates" json:"id_sequence_updates,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Cost) Reset() { *m = Cost{} } -func (m *Cost) String() string { return proto.CompactTextString(m) } -func (*Cost) ProtoMessage() {} - -func (m *Cost) GetIndexWrites() int32 { - if m != nil && m.IndexWrites != nil { - return *m.IndexWrites - } - return 0 -} - -func (m *Cost) GetIndexWriteBytes() int32 { - if m != nil && m.IndexWriteBytes != nil { - return *m.IndexWriteBytes - } - return 0 -} - -func (m *Cost) GetEntityWrites() int32 { - if m != nil && m.EntityWrites != nil { - return *m.EntityWrites - } - return 0 -} - -func (m *Cost) GetEntityWriteBytes() int32 { - if m != nil && m.EntityWriteBytes != nil { - return *m.EntityWriteBytes - } - return 0 -} - -func (m *Cost) GetCommitcost() *Cost_CommitCost { - if m != nil { - return m.Commitcost - } - return nil -} - -func (m *Cost) GetApproximateStorageDelta() int32 { - if m != nil && m.ApproximateStorageDelta != nil { - return *m.ApproximateStorageDelta - } - return 0 -} - -func (m *Cost) GetIdSequenceUpdates() int32 { - if m != nil && m.IdSequenceUpdates != nil { - return *m.IdSequenceUpdates - } - return 0 -} - -type Cost_CommitCost struct { - RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts" json:"requested_entity_puts,omitempty"` - RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes" json:"requested_entity_deletes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} } -func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) } -func (*Cost_CommitCost) ProtoMessage() {} - -func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 { - if m != nil && m.RequestedEntityPuts != nil { - return *m.RequestedEntityPuts - } - return 0 -} - -func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 { - if m != nil && m.RequestedEntityDeletes != nil { - return *m.RequestedEntityDeletes - } - return 0 -} - -type GetRequest struct { - Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` - FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms" json:"failover_ms,omitempty"` - Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"` - AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,def=0" json:"allow_deferred,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetRequest) Reset() { *m = GetRequest{} } -func (m *GetRequest) String() string { return proto.CompactTextString(m) } -func (*GetRequest) ProtoMessage() {} - -const Default_GetRequest_AllowDeferred bool = false - -func (m *GetRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *GetRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *GetRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *GetRequest) GetFailoverMs() int64 { - if m != nil && m.FailoverMs != nil { - return *m.FailoverMs - } - return 0 -} - -func (m *GetRequest) GetStrong() bool { - if m != nil && m.Strong != nil { - return *m.Strong - } - return false -} - -func (m *GetRequest) GetAllowDeferred() bool { - if m != nil && m.AllowDeferred != nil { - return *m.AllowDeferred - } - return Default_GetRequest_AllowDeferred -} - -type GetResponse struct { - Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity" json:"entity,omitempty"` - Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"` - InOrder *bool `protobuf:"varint,6,opt,name=in_order,def=1" json:"in_order,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetResponse) Reset() { *m = GetResponse{} } -func (m *GetResponse) String() string { return proto.CompactTextString(m) } -func (*GetResponse) ProtoMessage() {} - -const Default_GetResponse_InOrder bool = true - -func (m *GetResponse) GetEntity() []*GetResponse_Entity { - if m != nil { - return m.Entity - } - return nil -} - -func (m *GetResponse) GetDeferred() []*Reference { - if m != nil { - return m.Deferred - } - return nil -} - -func (m *GetResponse) GetInOrder() bool { - if m != nil && m.InOrder != nil { - return *m.InOrder - } - return Default_GetResponse_InOrder -} - -type GetResponse_Entity struct { - Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"` - Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"` - Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} } -func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) } -func (*GetResponse_Entity) ProtoMessage() {} - -func (m *GetResponse_Entity) GetEntity() *EntityProto { - if m != nil { - return m.Entity - } - return nil -} - -func (m *GetResponse_Entity) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *GetResponse_Entity) GetVersion() int64 { - if m != nil && m.Version != nil { - return *m.Version - } - return 0 -} - -type PutRequest struct { - Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"` - Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"` - Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index" json:"composite_index,omitempty"` - Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` - Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` - MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PutRequest) Reset() { *m = PutRequest{} } -func (m *PutRequest) String() string { return proto.CompactTextString(m) } -func (*PutRequest) ProtoMessage() {} - -const Default_PutRequest_Trusted bool = false -const Default_PutRequest_Force bool = false -const Default_PutRequest_MarkChanges bool = false -const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT - -func (m *PutRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *PutRequest) GetEntity() []*EntityProto { - if m != nil { - return m.Entity - } - return nil -} - -func (m *PutRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *PutRequest) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *PutRequest) GetTrusted() bool { - if m != nil && m.Trusted != nil { - return *m.Trusted - } - return Default_PutRequest_Trusted -} - -func (m *PutRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_PutRequest_Force -} - -func (m *PutRequest) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_PutRequest_MarkChanges -} - -func (m *PutRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy { - if m != nil && m.AutoIdPolicy != nil { - return *m.AutoIdPolicy - } - return Default_PutRequest_AutoIdPolicy -} - -type PutResponse struct { - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"` - Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PutResponse) Reset() { *m = PutResponse{} } -func (m *PutResponse) String() string { return proto.CompactTextString(m) } -func (*PutResponse) ProtoMessage() {} - -func (m *PutResponse) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *PutResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *PutResponse) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type TouchRequest struct { - Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index" json:"composite_index,omitempty"` - Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *TouchRequest) Reset() { *m = TouchRequest{} } -func (m *TouchRequest) String() string { return proto.CompactTextString(m) } -func (*TouchRequest) ProtoMessage() {} - -const Default_TouchRequest_Force bool = false - -func (m *TouchRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *TouchRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *TouchRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_TouchRequest_Force -} - -func (m *TouchRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -type TouchResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *TouchResponse) Reset() { *m = TouchResponse{} } -func (m *TouchResponse) String() string { return proto.CompactTextString(m) } -func (*TouchResponse) ProtoMessage() {} - -func (m *TouchResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -type DeleteRequest struct { - Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"` - Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"` - Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` - Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` - MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } -func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteRequest) ProtoMessage() {} - -const Default_DeleteRequest_Trusted bool = false -const Default_DeleteRequest_Force bool = false -const Default_DeleteRequest_MarkChanges bool = false - -func (m *DeleteRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *DeleteRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *DeleteRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *DeleteRequest) GetTrusted() bool { - if m != nil && m.Trusted != nil { - return *m.Trusted - } - return Default_DeleteRequest_Trusted -} - -func (m *DeleteRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_DeleteRequest_Force -} - -func (m *DeleteRequest) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_DeleteRequest_MarkChanges -} - -func (m *DeleteRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -type DeleteResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } -func (m *DeleteResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteResponse) ProtoMessage() {} - -func (m *DeleteResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *DeleteResponse) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type NextRequest struct { - Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"` - Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"` - Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` - Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"` - Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NextRequest) Reset() { *m = NextRequest{} } -func (m *NextRequest) String() string { return proto.CompactTextString(m) } -func (*NextRequest) ProtoMessage() {} - -const Default_NextRequest_Offset int32 = 0 -const Default_NextRequest_Compile bool = false - -func (m *NextRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *NextRequest) GetCursor() *Cursor { - if m != nil { - return m.Cursor - } - return nil -} - -func (m *NextRequest) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *NextRequest) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_NextRequest_Offset -} - -func (m *NextRequest) GetCompile() bool { - if m != nil && m.Compile != nil { - return *m.Compile - } - return Default_NextRequest_Compile -} - -type QueryResult struct { - Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"` - Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"` - SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results" json:"skipped_results,omitempty"` - MoreResults *bool `protobuf:"varint,3,req,name=more_results" json:"more_results,omitempty"` - KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only" json:"keys_only,omitempty"` - IndexOnly *bool `protobuf:"varint,9,opt,name=index_only" json:"index_only,omitempty"` - SmallOps *bool `protobuf:"varint,10,opt,name=small_ops" json:"small_ops,omitempty"` - CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query" json:"compiled_query,omitempty"` - CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"` - Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"` - Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *QueryResult) Reset() { *m = QueryResult{} } -func (m *QueryResult) String() string { return proto.CompactTextString(m) } -func (*QueryResult) ProtoMessage() {} - -func (m *QueryResult) GetCursor() *Cursor { - if m != nil { - return m.Cursor - } - return nil -} - -func (m *QueryResult) GetResult() []*EntityProto { - if m != nil { - return m.Result - } - return nil -} - -func (m *QueryResult) GetSkippedResults() int32 { - if m != nil && m.SkippedResults != nil { - return *m.SkippedResults - } - return 0 -} - -func (m *QueryResult) GetMoreResults() bool { - if m != nil && m.MoreResults != nil { - return *m.MoreResults - } - return false -} - -func (m *QueryResult) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return false -} - -func (m *QueryResult) GetIndexOnly() bool { - if m != nil && m.IndexOnly != nil { - return *m.IndexOnly - } - return false -} - -func (m *QueryResult) GetSmallOps() bool { - if m != nil && m.SmallOps != nil { - return *m.SmallOps - } - return false -} - -func (m *QueryResult) GetCompiledQuery() *CompiledQuery { - if m != nil { - return m.CompiledQuery - } - return nil -} - -func (m *QueryResult) GetCompiledCursor() *CompiledCursor { - if m != nil { - return m.CompiledCursor - } - return nil -} - -func (m *QueryResult) GetIndex() []*CompositeIndex { - if m != nil { - return m.Index - } - return nil -} - -func (m *QueryResult) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type AllocateIdsRequest struct { - Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` - ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key" json:"model_key,omitempty"` - Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` - Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"` - Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } -func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } -func (*AllocateIdsRequest) ProtoMessage() {} - -func (m *AllocateIdsRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AllocateIdsRequest) GetModelKey() *Reference { - if m != nil { - return m.ModelKey - } - return nil -} - -func (m *AllocateIdsRequest) GetSize() int64 { - if m != nil && m.Size != nil { - return *m.Size - } - return 0 -} - -func (m *AllocateIdsRequest) GetMax() int64 { - if m != nil && m.Max != nil { - return *m.Max - } - return 0 -} - -func (m *AllocateIdsRequest) GetReserve() []*Reference { - if m != nil { - return m.Reserve - } - return nil -} - -type AllocateIdsResponse struct { - Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"` - End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"` - Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } -func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } -func (*AllocateIdsResponse) ProtoMessage() {} - -func (m *AllocateIdsResponse) GetStart() int64 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *AllocateIdsResponse) GetEnd() int64 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func (m *AllocateIdsResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -type CompositeIndices struct { - Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompositeIndices) Reset() { *m = CompositeIndices{} } -func (m *CompositeIndices) String() string { return proto.CompactTextString(m) } -func (*CompositeIndices) ProtoMessage() {} - -func (m *CompositeIndices) GetIndex() []*CompositeIndex { - if m != nil { - return m.Index - } - return nil -} - -type AddActionsRequest struct { - Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` - Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` - Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} } -func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) } -func (*AddActionsRequest) ProtoMessage() {} - -func (m *AddActionsRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AddActionsRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *AddActionsRequest) GetAction() []*Action { - if m != nil { - return m.Action - } - return nil -} - -type AddActionsResponse struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} } -func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) } -func (*AddActionsResponse) ProtoMessage() {} - -type BeginTransactionRequest struct { - Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` - App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` - AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,def=0" json:"allow_multiple_eg,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } -func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } -func (*BeginTransactionRequest) ProtoMessage() {} - -const Default_BeginTransactionRequest_AllowMultipleEg bool = false - -func (m *BeginTransactionRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *BeginTransactionRequest) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *BeginTransactionRequest) GetAllowMultipleEg() bool { - if m != nil && m.AllowMultipleEg != nil { - return *m.AllowMultipleEg - } - return Default_BeginTransactionRequest_AllowMultipleEg -} - -type CommitResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CommitResponse) Reset() { *m = CommitResponse{} } -func (m *CommitResponse) String() string { return proto.CompactTextString(m) } -func (*CommitResponse) ProtoMessage() {} - -func (m *CommitResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *CommitResponse) GetVersion() []*CommitResponse_Version { - if m != nil { - return m.Version - } - return nil -} - -type CommitResponse_Version struct { - RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key" json:"root_entity_key,omitempty"` - Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} } -func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) } -func (*CommitResponse_Version) ProtoMessage() {} - -func (m *CommitResponse_Version) GetRootEntityKey() *Reference { - if m != nil { - return m.RootEntityKey - } - return nil -} - -func (m *CommitResponse_Version) GetVersion() int64 { - if m != nil && m.Version != nil { - return *m.Version - } - return 0 -} - -func init() { -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto deleted file mode 100644 index e76f126f..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto +++ /dev/null @@ -1,541 +0,0 @@ -syntax = "proto2"; -option go_package = "datastore"; - -package appengine; - -message Action{} - -message PropertyValue { - optional int64 int64Value = 1; - optional bool booleanValue = 2; - optional string stringValue = 3; - optional double doubleValue = 4; - - optional group PointValue = 5 { - required double x = 6; - required double y = 7; - } - - optional group UserValue = 8 { - required string email = 9; - required string auth_domain = 10; - optional string nickname = 11; - optional string federated_identity = 21; - optional string federated_provider = 22; - } - - optional group ReferenceValue = 12 { - required string app = 13; - optional string name_space = 20; - repeated group PathElement = 14 { - required string type = 15; - optional int64 id = 16; - optional string name = 17; - } - } -} - -message Property { - enum Meaning { - NO_MEANING = 0; - BLOB = 14; - TEXT = 15; - BYTESTRING = 16; - - ATOM_CATEGORY = 1; - ATOM_LINK = 2; - ATOM_TITLE = 3; - ATOM_CONTENT = 4; - ATOM_SUMMARY = 5; - ATOM_AUTHOR = 6; - - GD_WHEN = 7; - GD_EMAIL = 8; - GEORSS_POINT = 9; - GD_IM = 10; - - GD_PHONENUMBER = 11; - GD_POSTALADDRESS = 12; - - GD_RATING = 13; - - BLOBKEY = 17; - ENTITY_PROTO = 19; - - INDEX_VALUE = 18; - }; - - optional Meaning meaning = 1 [default = NO_MEANING]; - optional string meaning_uri = 2; - - required string name = 3; - - required PropertyValue value = 5; - - required bool multiple = 4; - - optional bool searchable = 6 [default=false]; - - enum FtsTokenizationOption { - HTML = 1; - ATOM = 2; - } - - optional FtsTokenizationOption fts_tokenization_option = 8; - - optional string locale = 9 [default = "en"]; -} - -message Path { - repeated group Element = 1 { - required string type = 2; - optional int64 id = 3; - optional string name = 4; - } -} - -message Reference { - required string app = 13; - optional string name_space = 20; - required Path path = 14; -} - -message User { - required string email = 1; - required string auth_domain = 2; - optional string nickname = 3; - optional string federated_identity = 6; - optional string federated_provider = 7; -} - -message EntityProto { - required Reference key = 13; - required Path entity_group = 16; - optional User owner = 17; - - enum Kind { - GD_CONTACT = 1; - GD_EVENT = 2; - GD_MESSAGE = 3; - } - optional Kind kind = 4; - optional string kind_uri = 5; - - repeated Property property = 14; - repeated Property raw_property = 15; - - optional int32 rank = 18; -} - -message CompositeProperty { - required int64 index_id = 1; - repeated string value = 2; -} - -message Index { - required string entity_type = 1; - required bool ancestor = 5; - repeated group Property = 2 { - required string name = 3; - enum Direction { - ASCENDING = 1; - DESCENDING = 2; - } - optional Direction direction = 4 [default = ASCENDING]; - } -} - -message CompositeIndex { - required string app_id = 1; - required int64 id = 2; - required Index definition = 3; - - enum State { - WRITE_ONLY = 1; - READ_WRITE = 2; - DELETED = 3; - ERROR = 4; - } - required State state = 4; - - optional bool only_use_if_required = 6 [default = false]; -} - -message IndexPostfix { - message IndexValue { - required string property_name = 1; - required PropertyValue value = 2; - } - - repeated IndexValue index_value = 1; - - optional Reference key = 2; - - optional bool before = 3 [default=true]; -} - -message IndexPosition { - optional string key = 1; - - optional bool before = 2 [default=true]; -} - -message Snapshot { - enum Status { - INACTIVE = 0; - ACTIVE = 1; - } - - required int64 ts = 1; -} - -message InternalHeader { - optional string qos = 1; -} - -message Transaction { - optional InternalHeader header = 4; - required fixed64 handle = 1; - required string app = 2; - optional bool mark_changes = 3 [default = false]; -} - -message Query { - optional InternalHeader header = 39; - - required string app = 1; - optional string name_space = 29; - - optional string kind = 3; - optional Reference ancestor = 17; - - repeated group Filter = 4 { - enum Operator { - LESS_THAN = 1; - LESS_THAN_OR_EQUAL = 2; - GREATER_THAN = 3; - GREATER_THAN_OR_EQUAL = 4; - EQUAL = 5; - IN = 6; - EXISTS = 7; - } - - required Operator op = 6; - repeated Property property = 14; - } - - optional string search_query = 8; - - repeated group Order = 9 { - enum Direction { - ASCENDING = 1; - DESCENDING = 2; - } - - required string property = 10; - optional Direction direction = 11 [default = ASCENDING]; - } - - enum Hint { - ORDER_FIRST = 1; - ANCESTOR_FIRST = 2; - FILTER_FIRST = 3; - } - optional Hint hint = 18; - - optional int32 count = 23; - - optional int32 offset = 12 [default = 0]; - - optional int32 limit = 16; - - optional CompiledCursor compiled_cursor = 30; - optional CompiledCursor end_compiled_cursor = 31; - - repeated CompositeIndex composite_index = 19; - - optional bool require_perfect_plan = 20 [default = false]; - - optional bool keys_only = 21 [default = false]; - - optional Transaction transaction = 22; - - optional bool compile = 25 [default = false]; - - optional int64 failover_ms = 26; - - optional bool strong = 32; - - repeated string property_name = 33; - - repeated string group_by_property_name = 34; - - optional bool distinct = 24; - - optional int64 min_safe_time_seconds = 35; - - repeated string safe_replica_name = 36; - - optional bool persist_offset = 37 [default=false]; -} - -message CompiledQuery { - required group PrimaryScan = 1 { - optional string index_name = 2; - - optional string start_key = 3; - optional bool start_inclusive = 4; - optional string end_key = 5; - optional bool end_inclusive = 6; - - repeated string start_postfix_value = 22; - repeated string end_postfix_value = 23; - - optional int64 end_unapplied_log_timestamp_us = 19; - } - - repeated group MergeJoinScan = 7 { - required string index_name = 8; - - repeated string prefix_value = 9; - - optional bool value_prefix = 20 [default=false]; - } - - optional Index index_def = 21; - - optional int32 offset = 10 [default = 0]; - - optional int32 limit = 11; - - required bool keys_only = 12; - - repeated string property_name = 24; - - optional int32 distinct_infix_size = 25; - - optional group EntityFilter = 13 { - optional bool distinct = 14 [default=false]; - - optional string kind = 17; - optional Reference ancestor = 18; - } -} - -message CompiledCursor { - optional group Position = 2 { - optional string start_key = 27; - - repeated group IndexValue = 29 { - optional string property = 30; - required PropertyValue value = 31; - } - - optional Reference key = 32; - - optional bool start_inclusive = 28 [default=true]; - } -} - -message Cursor { - required fixed64 cursor = 1; - - optional string app = 2; -} - -message Error { - enum ErrorCode { - BAD_REQUEST = 1; - CONCURRENT_TRANSACTION = 2; - INTERNAL_ERROR = 3; - NEED_INDEX = 4; - TIMEOUT = 5; - PERMISSION_DENIED = 6; - BIGTABLE_ERROR = 7; - COMMITTED_BUT_STILL_APPLYING = 8; - CAPABILITY_DISABLED = 9; - TRY_ALTERNATE_BACKEND = 10; - SAFE_TIME_TOO_OLD = 11; - } -} - -message Cost { - optional int32 index_writes = 1; - optional int32 index_write_bytes = 2; - optional int32 entity_writes = 3; - optional int32 entity_write_bytes = 4; - optional group CommitCost = 5 { - optional int32 requested_entity_puts = 6; - optional int32 requested_entity_deletes = 7; - }; - optional int32 approximate_storage_delta = 8; - optional int32 id_sequence_updates = 9; -} - -message GetRequest { - optional InternalHeader header = 6; - - repeated Reference key = 1; - optional Transaction transaction = 2; - - optional int64 failover_ms = 3; - - optional bool strong = 4; - - optional bool allow_deferred = 5 [default=false]; -} - -message GetResponse { - repeated group Entity = 1 { - optional EntityProto entity = 2; - optional Reference key = 4; - - optional int64 version = 3; - } - - repeated Reference deferred = 5; - - optional bool in_order = 6 [default=true]; -} - -message PutRequest { - optional InternalHeader header = 11; - - repeated EntityProto entity = 1; - optional Transaction transaction = 2; - repeated CompositeIndex composite_index = 3; - - optional bool trusted = 4 [default = false]; - - optional bool force = 7 [default = false]; - - optional bool mark_changes = 8 [default = false]; - repeated Snapshot snapshot = 9; - - enum AutoIdPolicy { - CURRENT = 0; - SEQUENTIAL = 1; - } - optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT]; -} - -message PutResponse { - repeated Reference key = 1; - optional Cost cost = 2; - repeated int64 version = 3; -} - -message TouchRequest { - optional InternalHeader header = 10; - - repeated Reference key = 1; - repeated CompositeIndex composite_index = 2; - optional bool force = 3 [default = false]; - repeated Snapshot snapshot = 9; -} - -message TouchResponse { - optional Cost cost = 1; -} - -message DeleteRequest { - optional InternalHeader header = 10; - - repeated Reference key = 6; - optional Transaction transaction = 5; - - optional bool trusted = 4 [default = false]; - - optional bool force = 7 [default = false]; - - optional bool mark_changes = 8 [default = false]; - repeated Snapshot snapshot = 9; -} - -message DeleteResponse { - optional Cost cost = 1; - repeated int64 version = 3; -} - -message NextRequest { - optional InternalHeader header = 5; - - required Cursor cursor = 1; - optional int32 count = 2; - - optional int32 offset = 4 [default = 0]; - - optional bool compile = 3 [default = false]; -} - -message QueryResult { - optional Cursor cursor = 1; - - repeated EntityProto result = 2; - - optional int32 skipped_results = 7; - - required bool more_results = 3; - - optional bool keys_only = 4; - - optional bool index_only = 9; - - optional bool small_ops = 10; - - optional CompiledQuery compiled_query = 5; - - optional CompiledCursor compiled_cursor = 6; - - repeated CompositeIndex index = 8; - - repeated int64 version = 11; -} - -message AllocateIdsRequest { - optional InternalHeader header = 4; - - optional Reference model_key = 1; - - optional int64 size = 2; - - optional int64 max = 3; - - repeated Reference reserve = 5; -} - -message AllocateIdsResponse { - required int64 start = 1; - required int64 end = 2; - optional Cost cost = 3; -} - -message CompositeIndices { - repeated CompositeIndex index = 1; -} - -message AddActionsRequest { - optional InternalHeader header = 3; - - required Transaction transaction = 1; - repeated Action action = 2; -} - -message AddActionsResponse { -} - -message BeginTransactionRequest { - optional InternalHeader header = 3; - - required string app = 1; - optional bool allow_multiple_eg = 2 [default = false]; -} - -message CommitResponse { - optional Cost cost = 1; - - repeated group Version = 3 { - required Reference root_entity_key = 4; - required int64 version = 5; - } -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity.go deleted file mode 100644 index d538701a..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import netcontext "golang.org/x/net/context" - -// These functions are implementations of the wrapper functions -// in ../appengine/identity.go. See that file for commentary. - -func AppID(c netcontext.Context) string { - return appID(FullyQualifiedAppID(c)) -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity_classic.go deleted file mode 100644 index e6b9227c..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity_classic.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package internal - -import ( - "appengine" - - netcontext "golang.org/x/net/context" -) - -func DefaultVersionHostname(ctx netcontext.Context) string { - return appengine.DefaultVersionHostname(fromContext(ctx)) -} - -func RequestID(ctx netcontext.Context) string { return appengine.RequestID(fromContext(ctx)) } -func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } -func ServerSoftware() string { return appengine.ServerSoftware() } -func ModuleName(ctx netcontext.Context) string { return appengine.ModuleName(fromContext(ctx)) } -func VersionID(ctx netcontext.Context) string { return appengine.VersionID(fromContext(ctx)) } -func InstanceID() string { return appengine.InstanceID() } -func IsDevAppServer() bool { return appengine.IsDevAppServer() } - -func fullyQualifiedAppID(ctx netcontext.Context) string { return fromContext(ctx).FullyQualifiedAppID() } diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity_vm.go deleted file mode 100644 index ebe68b78..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/identity_vm.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package internal - -import ( - "net/http" - "os" - - netcontext "golang.org/x/net/context" -) - -// These functions are implementations of the wrapper functions -// in ../appengine/identity.go. See that file for commentary. - -const ( - hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname" - hRequestLogId = "X-AppEngine-Request-Log-Id" - hDatacenter = "X-AppEngine-Datacenter" -) - -func ctxHeaders(ctx netcontext.Context) http.Header { - return fromContext(ctx).Request().Header -} - -func DefaultVersionHostname(ctx netcontext.Context) string { - return ctxHeaders(ctx).Get(hDefaultVersionHostname) -} - -func RequestID(ctx netcontext.Context) string { - return ctxHeaders(ctx).Get(hRequestLogId) -} - -func Datacenter(ctx netcontext.Context) string { - return ctxHeaders(ctx).Get(hDatacenter) -} - -func ServerSoftware() string { - // TODO(dsymonds): Remove fallback when we've verified this. - if s := os.Getenv("SERVER_SOFTWARE"); s != "" { - return s - } - return "Google App Engine/1.x.x" -} - -// TODO(dsymonds): Remove the metadata fetches. - -func ModuleName(_ netcontext.Context) string { - if s := os.Getenv("GAE_MODULE_NAME"); s != "" { - return s - } - return string(mustGetMetadata("instance/attributes/gae_backend_name")) -} - -func VersionID(_ netcontext.Context) string { - if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { - return s1 + "." + s2 - } - return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version")) -} - -func InstanceID() string { - if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" { - return s - } - return string(mustGetMetadata("instance/attributes/gae_backend_instance")) -} - -func partitionlessAppID() string { - // gae_project has everything except the partition prefix. - appID := os.Getenv("GAE_LONG_APP_ID") - if appID == "" { - appID = string(mustGetMetadata("instance/attributes/gae_project")) - } - return appID -} - -func fullyQualifiedAppID(_ netcontext.Context) string { - appID := partitionlessAppID() - - part := os.Getenv("GAE_PARTITION") - if part == "" { - part = string(mustGetMetadata("instance/attributes/gae_partition")) - } - - if part != "" { - appID = part + "~" + appID - } - return appID -} - -func IsDevAppServer() bool { - return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/internal.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/internal.go deleted file mode 100644 index 66e8d768..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/internal.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package internal provides support for package appengine. -// -// Programs should not use this package directly. Its API is not stable. -// Use packages appengine and appengine/* instead. -package internal - -import ( - "fmt" - "io" - "log" - "net/http" - "net/url" - "os" - - "github.com/golang/protobuf/proto" - - remotepb "google.golang.org/appengine/internal/remote_api" -) - -// errorCodeMaps is a map of service name to the error code map for the service. -var errorCodeMaps = make(map[string]map[int32]string) - -// RegisterErrorCodeMap is called from API implementations to register their -// error code map. This should only be called from init functions. -func RegisterErrorCodeMap(service string, m map[int32]string) { - errorCodeMaps[service] = m -} - -type timeoutCodeKey struct { - service string - code int32 -} - -// timeoutCodes is the set of service+code pairs that represent timeouts. -var timeoutCodes = make(map[timeoutCodeKey]bool) - -func RegisterTimeoutErrorCode(service string, code int32) { - timeoutCodes[timeoutCodeKey{service, code}] = true -} - -// APIError is the type returned by appengine.Context's Call method -// when an API call fails in an API-specific way. This may be, for instance, -// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE. -type APIError struct { - Service string - Detail string - Code int32 // API-specific error code -} - -func (e *APIError) Error() string { - if e.Code == 0 { - if e.Detail == "" { - return "APIError " - } - return e.Detail - } - s := fmt.Sprintf("API error %d", e.Code) - if m, ok := errorCodeMaps[e.Service]; ok { - s += " (" + e.Service + ": " + m[e.Code] + ")" - } else { - // Shouldn't happen, but provide a bit more detail if it does. - s = e.Service + " " + s - } - if e.Detail != "" { - s += ": " + e.Detail - } - return s -} - -func (e *APIError) IsTimeout() bool { - return timeoutCodes[timeoutCodeKey{e.Service, e.Code}] -} - -// CallError is the type returned by appengine.Context's Call method when an -// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED. -type CallError struct { - Detail string - Code int32 - // TODO: Remove this if we get a distinguishable error code. - Timeout bool -} - -func (e *CallError) Error() string { - var msg string - switch remotepb.RpcError_ErrorCode(e.Code) { - case remotepb.RpcError_UNKNOWN: - return e.Detail - case remotepb.RpcError_OVER_QUOTA: - msg = "Over quota" - case remotepb.RpcError_CAPABILITY_DISABLED: - msg = "Capability disabled" - case remotepb.RpcError_CANCELLED: - msg = "Canceled" - default: - msg = fmt.Sprintf("Call error %d", e.Code) - } - s := msg + ": " + e.Detail - if e.Timeout { - s += " (timeout)" - } - return s -} - -func (e *CallError) IsTimeout() bool { - return e.Timeout -} - -func Main() { - installHealthChecker(http.DefaultServeMux) - - port := "8080" - if s := os.Getenv("PORT"); s != "" { - port = s - } - - if err := http.ListenAndServe(":"+port, http.HandlerFunc(handleHTTP)); err != nil { - log.Fatalf("http.ListenAndServe: %v", err) - } -} - -func installHealthChecker(mux *http.ServeMux) { - // If no health check handler has been installed by this point, add a trivial one. - const healthPath = "/_ah/health" - hreq := &http.Request{ - Method: "GET", - URL: &url.URL{ - Path: healthPath, - }, - } - if _, pat := mux.Handler(hreq); pat != healthPath { - mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, "ok") - }) - } -} - -// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace. -// The function should be prepared to be called on the same message more than once; it should only modify the -// RPC request the first time. -var NamespaceMods = make(map[string]func(m proto.Message, namespace string)) diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/log/log_service.pb.go deleted file mode 100644 index 20c595be..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/log/log_service.pb.go +++ /dev/null @@ -1,899 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/log/log_service.proto -// DO NOT EDIT! - -/* -Package log is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/log/log_service.proto - -It has these top-level messages: - LogServiceError - UserAppLogLine - UserAppLogGroup - FlushRequest - SetStatusRequest - LogOffset - LogLine - RequestLog - LogModuleVersion - LogReadRequest - LogReadResponse - LogUsageRecord - LogUsageRequest - LogUsageResponse -*/ -package log - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type LogServiceError_ErrorCode int32 - -const ( - LogServiceError_OK LogServiceError_ErrorCode = 0 - LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1 - LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2 -) - -var LogServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "INVALID_REQUEST", - 2: "STORAGE_ERROR", -} -var LogServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "INVALID_REQUEST": 1, - "STORAGE_ERROR": 2, -} - -func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode { - p := new(LogServiceError_ErrorCode) - *p = x - return p -} -func (x LogServiceError_ErrorCode) String() string { - return proto.EnumName(LogServiceError_ErrorCode_name, int32(x)) -} -func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode") - if err != nil { - return err - } - *x = LogServiceError_ErrorCode(value) - return nil -} - -type LogServiceError struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogServiceError) Reset() { *m = LogServiceError{} } -func (m *LogServiceError) String() string { return proto.CompactTextString(m) } -func (*LogServiceError) ProtoMessage() {} - -type UserAppLogLine struct { - TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec" json:"timestamp_usec,omitempty"` - Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` - Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} } -func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) } -func (*UserAppLogLine) ProtoMessage() {} - -func (m *UserAppLogLine) GetTimestampUsec() int64 { - if m != nil && m.TimestampUsec != nil { - return *m.TimestampUsec - } - return 0 -} - -func (m *UserAppLogLine) GetLevel() int64 { - if m != nil && m.Level != nil { - return *m.Level - } - return 0 -} - -func (m *UserAppLogLine) GetMessage() string { - if m != nil && m.Message != nil { - return *m.Message - } - return "" -} - -type UserAppLogGroup struct { - LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line" json:"log_line,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} } -func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) } -func (*UserAppLogGroup) ProtoMessage() {} - -func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine { - if m != nil { - return m.LogLine - } - return nil -} - -type FlushRequest struct { - Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FlushRequest) Reset() { *m = FlushRequest{} } -func (m *FlushRequest) String() string { return proto.CompactTextString(m) } -func (*FlushRequest) ProtoMessage() {} - -func (m *FlushRequest) GetLogs() []byte { - if m != nil { - return m.Logs - } - return nil -} - -type SetStatusRequest struct { - Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} } -func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) } -func (*SetStatusRequest) ProtoMessage() {} - -func (m *SetStatusRequest) GetStatus() string { - if m != nil && m.Status != nil { - return *m.Status - } - return "" -} - -type LogOffset struct { - RequestId []byte `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogOffset) Reset() { *m = LogOffset{} } -func (m *LogOffset) String() string { return proto.CompactTextString(m) } -func (*LogOffset) ProtoMessage() {} - -func (m *LogOffset) GetRequestId() []byte { - if m != nil { - return m.RequestId - } - return nil -} - -type LogLine struct { - Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"` - Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` - LogMessage *string `protobuf:"bytes,3,req,name=log_message" json:"log_message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogLine) Reset() { *m = LogLine{} } -func (m *LogLine) String() string { return proto.CompactTextString(m) } -func (*LogLine) ProtoMessage() {} - -func (m *LogLine) GetTime() int64 { - if m != nil && m.Time != nil { - return *m.Time - } - return 0 -} - -func (m *LogLine) GetLevel() int32 { - if m != nil && m.Level != nil { - return *m.Level - } - return 0 -} - -func (m *LogLine) GetLogMessage() string { - if m != nil && m.LogMessage != nil { - return *m.LogMessage - } - return "" -} - -type RequestLog struct { - AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` - ModuleId *string `protobuf:"bytes,37,opt,name=module_id,def=default" json:"module_id,omitempty"` - VersionId *string `protobuf:"bytes,2,req,name=version_id" json:"version_id,omitempty"` - RequestId []byte `protobuf:"bytes,3,req,name=request_id" json:"request_id,omitempty"` - Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"` - Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"` - Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"` - StartTime *int64 `protobuf:"varint,6,req,name=start_time" json:"start_time,omitempty"` - EndTime *int64 `protobuf:"varint,7,req,name=end_time" json:"end_time,omitempty"` - Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"` - Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"` - Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"` - Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"` - HttpVersion *string `protobuf:"bytes,12,req,name=http_version" json:"http_version,omitempty"` - Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"` - ResponseSize *int64 `protobuf:"varint,14,req,name=response_size" json:"response_size,omitempty"` - Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"` - UserAgent *string `protobuf:"bytes,16,opt,name=user_agent" json:"user_agent,omitempty"` - UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry" json:"url_map_entry,omitempty"` - Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"` - ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles" json:"api_mcycles,omitempty"` - Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"` - Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"` - TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name" json:"task_queue_name,omitempty"` - TaskName *string `protobuf:"bytes,23,opt,name=task_name" json:"task_name,omitempty"` - WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request" json:"was_loading_request,omitempty"` - PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time" json:"pending_time,omitempty"` - ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,def=-1" json:"replica_index,omitempty"` - Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"` - CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key" json:"clone_key,omitempty"` - Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"` - LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete" json:"lines_incomplete,omitempty"` - AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release" json:"app_engine_release,omitempty"` - ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason" json:"exit_reason,omitempty"` - WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time" json:"was_throttled_for_time,omitempty"` - WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests" json:"was_throttled_for_requests,omitempty"` - ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time" json:"throttled_time,omitempty"` - ServerName []byte `protobuf:"bytes,34,opt,name=server_name" json:"server_name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RequestLog) Reset() { *m = RequestLog{} } -func (m *RequestLog) String() string { return proto.CompactTextString(m) } -func (*RequestLog) ProtoMessage() {} - -const Default_RequestLog_ModuleId string = "default" -const Default_RequestLog_ReplicaIndex int32 = -1 -const Default_RequestLog_Finished bool = true - -func (m *RequestLog) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *RequestLog) GetModuleId() string { - if m != nil && m.ModuleId != nil { - return *m.ModuleId - } - return Default_RequestLog_ModuleId -} - -func (m *RequestLog) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -func (m *RequestLog) GetRequestId() []byte { - if m != nil { - return m.RequestId - } - return nil -} - -func (m *RequestLog) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *RequestLog) GetIp() string { - if m != nil && m.Ip != nil { - return *m.Ip - } - return "" -} - -func (m *RequestLog) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *RequestLog) GetStartTime() int64 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *RequestLog) GetEndTime() int64 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *RequestLog) GetLatency() int64 { - if m != nil && m.Latency != nil { - return *m.Latency - } - return 0 -} - -func (m *RequestLog) GetMcycles() int64 { - if m != nil && m.Mcycles != nil { - return *m.Mcycles - } - return 0 -} - -func (m *RequestLog) GetMethod() string { - if m != nil && m.Method != nil { - return *m.Method - } - return "" -} - -func (m *RequestLog) GetResource() string { - if m != nil && m.Resource != nil { - return *m.Resource - } - return "" -} - -func (m *RequestLog) GetHttpVersion() string { - if m != nil && m.HttpVersion != nil { - return *m.HttpVersion - } - return "" -} - -func (m *RequestLog) GetStatus() int32 { - if m != nil && m.Status != nil { - return *m.Status - } - return 0 -} - -func (m *RequestLog) GetResponseSize() int64 { - if m != nil && m.ResponseSize != nil { - return *m.ResponseSize - } - return 0 -} - -func (m *RequestLog) GetReferrer() string { - if m != nil && m.Referrer != nil { - return *m.Referrer - } - return "" -} - -func (m *RequestLog) GetUserAgent() string { - if m != nil && m.UserAgent != nil { - return *m.UserAgent - } - return "" -} - -func (m *RequestLog) GetUrlMapEntry() string { - if m != nil && m.UrlMapEntry != nil { - return *m.UrlMapEntry - } - return "" -} - -func (m *RequestLog) GetCombined() string { - if m != nil && m.Combined != nil { - return *m.Combined - } - return "" -} - -func (m *RequestLog) GetApiMcycles() int64 { - if m != nil && m.ApiMcycles != nil { - return *m.ApiMcycles - } - return 0 -} - -func (m *RequestLog) GetHost() string { - if m != nil && m.Host != nil { - return *m.Host - } - return "" -} - -func (m *RequestLog) GetCost() float64 { - if m != nil && m.Cost != nil { - return *m.Cost - } - return 0 -} - -func (m *RequestLog) GetTaskQueueName() string { - if m != nil && m.TaskQueueName != nil { - return *m.TaskQueueName - } - return "" -} - -func (m *RequestLog) GetTaskName() string { - if m != nil && m.TaskName != nil { - return *m.TaskName - } - return "" -} - -func (m *RequestLog) GetWasLoadingRequest() bool { - if m != nil && m.WasLoadingRequest != nil { - return *m.WasLoadingRequest - } - return false -} - -func (m *RequestLog) GetPendingTime() int64 { - if m != nil && m.PendingTime != nil { - return *m.PendingTime - } - return 0 -} - -func (m *RequestLog) GetReplicaIndex() int32 { - if m != nil && m.ReplicaIndex != nil { - return *m.ReplicaIndex - } - return Default_RequestLog_ReplicaIndex -} - -func (m *RequestLog) GetFinished() bool { - if m != nil && m.Finished != nil { - return *m.Finished - } - return Default_RequestLog_Finished -} - -func (m *RequestLog) GetCloneKey() []byte { - if m != nil { - return m.CloneKey - } - return nil -} - -func (m *RequestLog) GetLine() []*LogLine { - if m != nil { - return m.Line - } - return nil -} - -func (m *RequestLog) GetLinesIncomplete() bool { - if m != nil && m.LinesIncomplete != nil { - return *m.LinesIncomplete - } - return false -} - -func (m *RequestLog) GetAppEngineRelease() []byte { - if m != nil { - return m.AppEngineRelease - } - return nil -} - -func (m *RequestLog) GetExitReason() int32 { - if m != nil && m.ExitReason != nil { - return *m.ExitReason - } - return 0 -} - -func (m *RequestLog) GetWasThrottledForTime() bool { - if m != nil && m.WasThrottledForTime != nil { - return *m.WasThrottledForTime - } - return false -} - -func (m *RequestLog) GetWasThrottledForRequests() bool { - if m != nil && m.WasThrottledForRequests != nil { - return *m.WasThrottledForRequests - } - return false -} - -func (m *RequestLog) GetThrottledTime() int64 { - if m != nil && m.ThrottledTime != nil { - return *m.ThrottledTime - } - return 0 -} - -func (m *RequestLog) GetServerName() []byte { - if m != nil { - return m.ServerName - } - return nil -} - -type LogModuleVersion struct { - ModuleId *string `protobuf:"bytes,1,opt,name=module_id,def=default" json:"module_id,omitempty"` - VersionId *string `protobuf:"bytes,2,opt,name=version_id" json:"version_id,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} } -func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) } -func (*LogModuleVersion) ProtoMessage() {} - -const Default_LogModuleVersion_ModuleId string = "default" - -func (m *LogModuleVersion) GetModuleId() string { - if m != nil && m.ModuleId != nil { - return *m.ModuleId - } - return Default_LogModuleVersion_ModuleId -} - -func (m *LogModuleVersion) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -type LogReadRequest struct { - AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` - VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"` - ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version" json:"module_version,omitempty"` - StartTime *int64 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"` - EndTime *int64 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"` - Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"` - RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id" json:"request_id,omitempty"` - MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level" json:"minimum_log_level,omitempty"` - IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete" json:"include_incomplete,omitempty"` - Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"` - CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex" json:"combined_log_regex,omitempty"` - HostRegex *string `protobuf:"bytes,15,opt,name=host_regex" json:"host_regex,omitempty"` - ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index" json:"replica_index,omitempty"` - IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs" json:"include_app_logs,omitempty"` - AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request" json:"app_logs_per_request,omitempty"` - IncludeHost *bool `protobuf:"varint,11,opt,name=include_host" json:"include_host,omitempty"` - IncludeAll *bool `protobuf:"varint,12,opt,name=include_all" json:"include_all,omitempty"` - CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator" json:"cache_iterator,omitempty"` - NumShards *int32 `protobuf:"varint,18,opt,name=num_shards" json:"num_shards,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogReadRequest) Reset() { *m = LogReadRequest{} } -func (m *LogReadRequest) String() string { return proto.CompactTextString(m) } -func (*LogReadRequest) ProtoMessage() {} - -func (m *LogReadRequest) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *LogReadRequest) GetVersionId() []string { - if m != nil { - return m.VersionId - } - return nil -} - -func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion { - if m != nil { - return m.ModuleVersion - } - return nil -} - -func (m *LogReadRequest) GetStartTime() int64 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogReadRequest) GetEndTime() int64 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogReadRequest) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *LogReadRequest) GetRequestId() [][]byte { - if m != nil { - return m.RequestId - } - return nil -} - -func (m *LogReadRequest) GetMinimumLogLevel() int32 { - if m != nil && m.MinimumLogLevel != nil { - return *m.MinimumLogLevel - } - return 0 -} - -func (m *LogReadRequest) GetIncludeIncomplete() bool { - if m != nil && m.IncludeIncomplete != nil { - return *m.IncludeIncomplete - } - return false -} - -func (m *LogReadRequest) GetCount() int64 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *LogReadRequest) GetCombinedLogRegex() string { - if m != nil && m.CombinedLogRegex != nil { - return *m.CombinedLogRegex - } - return "" -} - -func (m *LogReadRequest) GetHostRegex() string { - if m != nil && m.HostRegex != nil { - return *m.HostRegex - } - return "" -} - -func (m *LogReadRequest) GetReplicaIndex() int32 { - if m != nil && m.ReplicaIndex != nil { - return *m.ReplicaIndex - } - return 0 -} - -func (m *LogReadRequest) GetIncludeAppLogs() bool { - if m != nil && m.IncludeAppLogs != nil { - return *m.IncludeAppLogs - } - return false -} - -func (m *LogReadRequest) GetAppLogsPerRequest() int32 { - if m != nil && m.AppLogsPerRequest != nil { - return *m.AppLogsPerRequest - } - return 0 -} - -func (m *LogReadRequest) GetIncludeHost() bool { - if m != nil && m.IncludeHost != nil { - return *m.IncludeHost - } - return false -} - -func (m *LogReadRequest) GetIncludeAll() bool { - if m != nil && m.IncludeAll != nil { - return *m.IncludeAll - } - return false -} - -func (m *LogReadRequest) GetCacheIterator() bool { - if m != nil && m.CacheIterator != nil { - return *m.CacheIterator - } - return false -} - -func (m *LogReadRequest) GetNumShards() int32 { - if m != nil && m.NumShards != nil { - return *m.NumShards - } - return 0 -} - -type LogReadResponse struct { - Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"` - Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"` - LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time" json:"last_end_time,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogReadResponse) Reset() { *m = LogReadResponse{} } -func (m *LogReadResponse) String() string { return proto.CompactTextString(m) } -func (*LogReadResponse) ProtoMessage() {} - -func (m *LogReadResponse) GetLog() []*RequestLog { - if m != nil { - return m.Log - } - return nil -} - -func (m *LogReadResponse) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *LogReadResponse) GetLastEndTime() int64 { - if m != nil && m.LastEndTime != nil { - return *m.LastEndTime - } - return 0 -} - -type LogUsageRecord struct { - VersionId *string `protobuf:"bytes,1,opt,name=version_id" json:"version_id,omitempty"` - StartTime *int32 `protobuf:"varint,2,opt,name=start_time" json:"start_time,omitempty"` - EndTime *int32 `protobuf:"varint,3,opt,name=end_time" json:"end_time,omitempty"` - Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` - TotalSize *int64 `protobuf:"varint,5,opt,name=total_size" json:"total_size,omitempty"` - Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} } -func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) } -func (*LogUsageRecord) ProtoMessage() {} - -func (m *LogUsageRecord) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -func (m *LogUsageRecord) GetStartTime() int32 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogUsageRecord) GetEndTime() int32 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogUsageRecord) GetCount() int64 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *LogUsageRecord) GetTotalSize() int64 { - if m != nil && m.TotalSize != nil { - return *m.TotalSize - } - return 0 -} - -func (m *LogUsageRecord) GetRecords() int32 { - if m != nil && m.Records != nil { - return *m.Records - } - return 0 -} - -type LogUsageRequest struct { - AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` - VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"` - StartTime *int32 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"` - EndTime *int32 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"` - ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,def=1" json:"resolution_hours,omitempty"` - CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions" json:"combine_versions,omitempty"` - UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version" json:"usage_version,omitempty"` - VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only" json:"versions_only,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} } -func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) } -func (*LogUsageRequest) ProtoMessage() {} - -const Default_LogUsageRequest_ResolutionHours uint32 = 1 - -func (m *LogUsageRequest) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *LogUsageRequest) GetVersionId() []string { - if m != nil { - return m.VersionId - } - return nil -} - -func (m *LogUsageRequest) GetStartTime() int32 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogUsageRequest) GetEndTime() int32 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogUsageRequest) GetResolutionHours() uint32 { - if m != nil && m.ResolutionHours != nil { - return *m.ResolutionHours - } - return Default_LogUsageRequest_ResolutionHours -} - -func (m *LogUsageRequest) GetCombineVersions() bool { - if m != nil && m.CombineVersions != nil { - return *m.CombineVersions - } - return false -} - -func (m *LogUsageRequest) GetUsageVersion() int32 { - if m != nil && m.UsageVersion != nil { - return *m.UsageVersion - } - return 0 -} - -func (m *LogUsageRequest) GetVersionsOnly() bool { - if m != nil && m.VersionsOnly != nil { - return *m.VersionsOnly - } - return false -} - -type LogUsageResponse struct { - Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"` - Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} } -func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) } -func (*LogUsageResponse) ProtoMessage() {} - -func (m *LogUsageResponse) GetUsage() []*LogUsageRecord { - if m != nil { - return m.Usage - } - return nil -} - -func (m *LogUsageResponse) GetSummary() *LogUsageRecord { - if m != nil { - return m.Summary - } - return nil -} - -func init() { -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/log/log_service.proto deleted file mode 100644 index 8981dc47..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/log/log_service.proto +++ /dev/null @@ -1,150 +0,0 @@ -syntax = "proto2"; -option go_package = "log"; - -package appengine; - -message LogServiceError { - enum ErrorCode { - OK = 0; - INVALID_REQUEST = 1; - STORAGE_ERROR = 2; - } -} - -message UserAppLogLine { - required int64 timestamp_usec = 1; - required int64 level = 2; - required string message = 3; -} - -message UserAppLogGroup { - repeated UserAppLogLine log_line = 2; -} - -message FlushRequest { - optional bytes logs = 1; -} - -message SetStatusRequest { - required string status = 1; -} - - -message LogOffset { - optional bytes request_id = 1; -} - -message LogLine { - required int64 time = 1; - required int32 level = 2; - required string log_message = 3; -} - -message RequestLog { - required string app_id = 1; - optional string module_id = 37 [default="default"]; - required string version_id = 2; - required bytes request_id = 3; - optional LogOffset offset = 35; - required string ip = 4; - optional string nickname = 5; - required int64 start_time = 6; - required int64 end_time = 7; - required int64 latency = 8; - required int64 mcycles = 9; - required string method = 10; - required string resource = 11; - required string http_version = 12; - required int32 status = 13; - required int64 response_size = 14; - optional string referrer = 15; - optional string user_agent = 16; - required string url_map_entry = 17; - required string combined = 18; - optional int64 api_mcycles = 19; - optional string host = 20; - optional double cost = 21; - - optional string task_queue_name = 22; - optional string task_name = 23; - - optional bool was_loading_request = 24; - optional int64 pending_time = 25; - optional int32 replica_index = 26 [default = -1]; - optional bool finished = 27 [default = true]; - optional bytes clone_key = 28; - - repeated LogLine line = 29; - - optional bool lines_incomplete = 36; - optional bytes app_engine_release = 38; - - optional int32 exit_reason = 30; - optional bool was_throttled_for_time = 31; - optional bool was_throttled_for_requests = 32; - optional int64 throttled_time = 33; - - optional bytes server_name = 34; -} - -message LogModuleVersion { - optional string module_id = 1 [default="default"]; - optional string version_id = 2; -} - -message LogReadRequest { - required string app_id = 1; - repeated string version_id = 2; - repeated LogModuleVersion module_version = 19; - - optional int64 start_time = 3; - optional int64 end_time = 4; - optional LogOffset offset = 5; - repeated bytes request_id = 6; - - optional int32 minimum_log_level = 7; - optional bool include_incomplete = 8; - optional int64 count = 9; - - optional string combined_log_regex = 14; - optional string host_regex = 15; - optional int32 replica_index = 16; - - optional bool include_app_logs = 10; - optional int32 app_logs_per_request = 17; - optional bool include_host = 11; - optional bool include_all = 12; - optional bool cache_iterator = 13; - optional int32 num_shards = 18; -} - -message LogReadResponse { - repeated RequestLog log = 1; - optional LogOffset offset = 2; - optional int64 last_end_time = 3; -} - -message LogUsageRecord { - optional string version_id = 1; - optional int32 start_time = 2; - optional int32 end_time = 3; - optional int64 count = 4; - optional int64 total_size = 5; - optional int32 records = 6; -} - -message LogUsageRequest { - required string app_id = 1; - repeated string version_id = 2; - optional int32 start_time = 3; - optional int32 end_time = 4; - optional uint32 resolution_hours = 5 [default = 1]; - optional bool combine_versions = 6; - optional int32 usage_version = 7; - optional bool versions_only = 8; -} - -message LogUsageResponse { - repeated LogUsageRecord usage = 1; - optional LogUsageRecord summary = 2; -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/metadata.go deleted file mode 100644 index 9cc1f71d..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/metadata.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file has code for accessing metadata. -// -// References: -// https://cloud.google.com/compute/docs/metadata - -import ( - "fmt" - "io/ioutil" - "log" - "net/http" - "net/url" -) - -const ( - metadataHost = "metadata" - metadataPath = "/computeMetadata/v1/" -) - -var ( - metadataRequestHeaders = http.Header{ - "Metadata-Flavor": []string{"Google"}, - } -) - -// TODO(dsymonds): Do we need to support default values, like Python? -func mustGetMetadata(key string) []byte { - b, err := getMetadata(key) - if err != nil { - log.Fatalf("Metadata fetch failed: %v", err) - } - return b -} - -func getMetadata(key string) ([]byte, error) { - // TODO(dsymonds): May need to use url.Parse to support keys with query args. - req := &http.Request{ - Method: "GET", - URL: &url.URL{ - Scheme: "http", - Host: metadataHost, - Path: metadataPath + key, - }, - Header: metadataRequestHeaders, - Host: metadataHost, - } - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode != 200 { - return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode) - } - return ioutil.ReadAll(resp.Body) -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go deleted file mode 100644 index a0145ed3..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go +++ /dev/null @@ -1,375 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/modules/modules_service.proto -// DO NOT EDIT! - -/* -Package modules is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/modules/modules_service.proto - -It has these top-level messages: - ModulesServiceError - GetModulesRequest - GetModulesResponse - GetVersionsRequest - GetVersionsResponse - GetDefaultVersionRequest - GetDefaultVersionResponse - GetNumInstancesRequest - GetNumInstancesResponse - SetNumInstancesRequest - SetNumInstancesResponse - StartModuleRequest - StartModuleResponse - StopModuleRequest - StopModuleResponse - GetHostnameRequest - GetHostnameResponse -*/ -package modules - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type ModulesServiceError_ErrorCode int32 - -const ( - ModulesServiceError_OK ModulesServiceError_ErrorCode = 0 - ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1 - ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2 - ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3 - ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4 - ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5 -) - -var ModulesServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "INVALID_MODULE", - 2: "INVALID_VERSION", - 3: "INVALID_INSTANCES", - 4: "TRANSIENT_ERROR", - 5: "UNEXPECTED_STATE", -} -var ModulesServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "INVALID_MODULE": 1, - "INVALID_VERSION": 2, - "INVALID_INSTANCES": 3, - "TRANSIENT_ERROR": 4, - "UNEXPECTED_STATE": 5, -} - -func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode { - p := new(ModulesServiceError_ErrorCode) - *p = x - return p -} -func (x ModulesServiceError_ErrorCode) String() string { - return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x)) -} -func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode") - if err != nil { - return err - } - *x = ModulesServiceError_ErrorCode(value) - return nil -} - -type ModulesServiceError struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} } -func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) } -func (*ModulesServiceError) ProtoMessage() {} - -type GetModulesRequest struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} } -func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) } -func (*GetModulesRequest) ProtoMessage() {} - -type GetModulesResponse struct { - Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} } -func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) } -func (*GetModulesResponse) ProtoMessage() {} - -func (m *GetModulesResponse) GetModule() []string { - if m != nil { - return m.Module - } - return nil -} - -type GetVersionsRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} } -func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) } -func (*GetVersionsRequest) ProtoMessage() {} - -func (m *GetVersionsRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -type GetVersionsResponse struct { - Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} } -func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) } -func (*GetVersionsResponse) ProtoMessage() {} - -func (m *GetVersionsResponse) GetVersion() []string { - if m != nil { - return m.Version - } - return nil -} - -type GetDefaultVersionRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} } -func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) } -func (*GetDefaultVersionRequest) ProtoMessage() {} - -func (m *GetDefaultVersionRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -type GetDefaultVersionResponse struct { - Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} } -func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) } -func (*GetDefaultVersionResponse) ProtoMessage() {} - -func (m *GetDefaultVersionResponse) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type GetNumInstancesRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} } -func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) } -func (*GetNumInstancesRequest) ProtoMessage() {} - -func (m *GetNumInstancesRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *GetNumInstancesRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type GetNumInstancesResponse struct { - Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} } -func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) } -func (*GetNumInstancesResponse) ProtoMessage() {} - -func (m *GetNumInstancesResponse) GetInstances() int64 { - if m != nil && m.Instances != nil { - return *m.Instances - } - return 0 -} - -type SetNumInstancesRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} } -func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) } -func (*SetNumInstancesRequest) ProtoMessage() {} - -func (m *SetNumInstancesRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *SetNumInstancesRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -func (m *SetNumInstancesRequest) GetInstances() int64 { - if m != nil && m.Instances != nil { - return *m.Instances - } - return 0 -} - -type SetNumInstancesResponse struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} } -func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) } -func (*SetNumInstancesResponse) ProtoMessage() {} - -type StartModuleRequest struct { - Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} } -func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) } -func (*StartModuleRequest) ProtoMessage() {} - -func (m *StartModuleRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *StartModuleRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type StartModuleResponse struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} } -func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) } -func (*StartModuleResponse) ProtoMessage() {} - -type StopModuleRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} } -func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) } -func (*StopModuleRequest) ProtoMessage() {} - -func (m *StopModuleRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *StopModuleRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type StopModuleResponse struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} } -func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) } -func (*StopModuleResponse) ProtoMessage() {} - -type GetHostnameRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} } -func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) } -func (*GetHostnameRequest) ProtoMessage() {} - -func (m *GetHostnameRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *GetHostnameRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -func (m *GetHostnameRequest) GetInstance() string { - if m != nil && m.Instance != nil { - return *m.Instance - } - return "" -} - -type GetHostnameResponse struct { - Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} } -func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) } -func (*GetHostnameResponse) ProtoMessage() {} - -func (m *GetHostnameResponse) GetHostname() string { - if m != nil && m.Hostname != nil { - return *m.Hostname - } - return "" -} - -func init() { -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/modules/modules_service.proto deleted file mode 100644 index d29f0065..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/modules/modules_service.proto +++ /dev/null @@ -1,80 +0,0 @@ -syntax = "proto2"; -option go_package = "modules"; - -package appengine; - -message ModulesServiceError { - enum ErrorCode { - OK = 0; - INVALID_MODULE = 1; - INVALID_VERSION = 2; - INVALID_INSTANCES = 3; - TRANSIENT_ERROR = 4; - UNEXPECTED_STATE = 5; - } -} - -message GetModulesRequest { -} - -message GetModulesResponse { - repeated string module = 1; -} - -message GetVersionsRequest { - optional string module = 1; -} - -message GetVersionsResponse { - repeated string version = 1; -} - -message GetDefaultVersionRequest { - optional string module = 1; -} - -message GetDefaultVersionResponse { - required string version = 1; -} - -message GetNumInstancesRequest { - optional string module = 1; - optional string version = 2; -} - -message GetNumInstancesResponse { - required int64 instances = 1; -} - -message SetNumInstancesRequest { - optional string module = 1; - optional string version = 2; - required int64 instances = 3; -} - -message SetNumInstancesResponse {} - -message StartModuleRequest { - required string module = 1; - required string version = 2; -} - -message StartModuleResponse {} - -message StopModuleRequest { - optional string module = 1; - optional string version = 2; -} - -message StopModuleResponse {} - -message GetHostnameRequest { - optional string module = 1; - optional string version = 2; - optional string instance = 3; -} - -message GetHostnameResponse { - required string hostname = 1; -} - diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/net.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/net.go deleted file mode 100644 index 3b94cf0c..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/net.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file implements a network dialer that limits the number of concurrent connections. -// It is only used for API calls. - -import ( - "log" - "net" - "runtime" - "sync" - "time" -) - -var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable. - -func limitRelease() { - // non-blocking - select { - case <-limitSem: - default: - // This should not normally happen. - log.Print("appengine: unbalanced limitSem release!") - } -} - -func limitDial(network, addr string) (net.Conn, error) { - limitSem <- 1 - - // Dial with a timeout in case the API host is MIA. - // The connection should normally be very fast. - conn, err := net.DialTimeout(network, addr, 500*time.Millisecond) - if err != nil { - limitRelease() - return nil, err - } - lc := &limitConn{Conn: conn} - runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required - return lc, nil -} - -type limitConn struct { - close sync.Once - net.Conn -} - -func (lc *limitConn) Close() error { - defer lc.close.Do(func() { - limitRelease() - runtime.SetFinalizer(lc, nil) - }) - return lc.Conn.Close() -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/regen.sh deleted file mode 100644 index 2fdb546a..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/regen.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -e -# -# This script rebuilds the generated code for the protocol buffers. -# To run this you will need protoc and goprotobuf installed; -# see https://github.com/golang/protobuf for instructions. - -PKG=google.golang.org/appengine - -function die() { - echo 1>&2 $* - exit 1 -} - -# Sanity check that the right tools are accessible. -for tool in go protoc protoc-gen-go; do - q=$(which $tool) || die "didn't find $tool" - echo 1>&2 "$tool: $q" -done - -echo -n 1>&2 "finding package dir... " -pkgdir=$(go list -f '{{.Dir}}' $PKG) -echo 1>&2 $pkgdir -base=$(echo $pkgdir | sed "s,/$PKG\$,,") -echo 1>&2 "base: $base" -cd $base - -# Run protoc once per package. -for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do - echo 1>&2 "* $dir" - protoc --go_out=. $dir/*.proto -done - -for f in $(find $PKG/internal -name '*.pb.go'); do - # Remove proto.RegisterEnum calls. - # These cause duplicate registration panics when these packages - # are used on classic App Engine. proto.RegisterEnum only affects - # parsing the text format; we don't care about that. - # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17 - sed -i '/proto.RegisterEnum/d' $f -done diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go deleted file mode 100644 index 526bd39e..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go +++ /dev/null @@ -1,231 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/remote_api/remote_api.proto -// DO NOT EDIT! - -/* -Package remote_api is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/remote_api/remote_api.proto - -It has these top-level messages: - Request - ApplicationError - RpcError - Response -*/ -package remote_api - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type RpcError_ErrorCode int32 - -const ( - RpcError_UNKNOWN RpcError_ErrorCode = 0 - RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1 - RpcError_PARSE_ERROR RpcError_ErrorCode = 2 - RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3 - RpcError_OVER_QUOTA RpcError_ErrorCode = 4 - RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5 - RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6 - RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7 - RpcError_BAD_REQUEST RpcError_ErrorCode = 8 - RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9 - RpcError_CANCELLED RpcError_ErrorCode = 10 - RpcError_REPLAY_ERROR RpcError_ErrorCode = 11 - RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12 -) - -var RpcError_ErrorCode_name = map[int32]string{ - 0: "UNKNOWN", - 1: "CALL_NOT_FOUND", - 2: "PARSE_ERROR", - 3: "SECURITY_VIOLATION", - 4: "OVER_QUOTA", - 5: "REQUEST_TOO_LARGE", - 6: "CAPABILITY_DISABLED", - 7: "FEATURE_DISABLED", - 8: "BAD_REQUEST", - 9: "RESPONSE_TOO_LARGE", - 10: "CANCELLED", - 11: "REPLAY_ERROR", - 12: "DEADLINE_EXCEEDED", -} -var RpcError_ErrorCode_value = map[string]int32{ - "UNKNOWN": 0, - "CALL_NOT_FOUND": 1, - "PARSE_ERROR": 2, - "SECURITY_VIOLATION": 3, - "OVER_QUOTA": 4, - "REQUEST_TOO_LARGE": 5, - "CAPABILITY_DISABLED": 6, - "FEATURE_DISABLED": 7, - "BAD_REQUEST": 8, - "RESPONSE_TOO_LARGE": 9, - "CANCELLED": 10, - "REPLAY_ERROR": 11, - "DEADLINE_EXCEEDED": 12, -} - -func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode { - p := new(RpcError_ErrorCode) - *p = x - return p -} -func (x RpcError_ErrorCode) String() string { - return proto.EnumName(RpcError_ErrorCode_name, int32(x)) -} -func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode") - if err != nil { - return err - } - *x = RpcError_ErrorCode(value) - return nil -} - -type Request struct { - ServiceName *string `protobuf:"bytes,2,req,name=service_name" json:"service_name,omitempty"` - Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"` - Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"` - RequestId *string `protobuf:"bytes,5,opt,name=request_id" json:"request_id,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Request) Reset() { *m = Request{} } -func (m *Request) String() string { return proto.CompactTextString(m) } -func (*Request) ProtoMessage() {} - -func (m *Request) GetServiceName() string { - if m != nil && m.ServiceName != nil { - return *m.ServiceName - } - return "" -} - -func (m *Request) GetMethod() string { - if m != nil && m.Method != nil { - return *m.Method - } - return "" -} - -func (m *Request) GetRequest() []byte { - if m != nil { - return m.Request - } - return nil -} - -func (m *Request) GetRequestId() string { - if m != nil && m.RequestId != nil { - return *m.RequestId - } - return "" -} - -type ApplicationError struct { - Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` - Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ApplicationError) Reset() { *m = ApplicationError{} } -func (m *ApplicationError) String() string { return proto.CompactTextString(m) } -func (*ApplicationError) ProtoMessage() {} - -func (m *ApplicationError) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -func (m *ApplicationError) GetDetail() string { - if m != nil && m.Detail != nil { - return *m.Detail - } - return "" -} - -type RpcError struct { - Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` - Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RpcError) Reset() { *m = RpcError{} } -func (m *RpcError) String() string { return proto.CompactTextString(m) } -func (*RpcError) ProtoMessage() {} - -func (m *RpcError) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -func (m *RpcError) GetDetail() string { - if m != nil && m.Detail != nil { - return *m.Detail - } - return "" -} - -type Response struct { - Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"` - Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"` - ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error" json:"application_error,omitempty"` - JavaException []byte `protobuf:"bytes,4,opt,name=java_exception" json:"java_exception,omitempty"` - RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error" json:"rpc_error,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Response) Reset() { *m = Response{} } -func (m *Response) String() string { return proto.CompactTextString(m) } -func (*Response) ProtoMessage() {} - -func (m *Response) GetResponse() []byte { - if m != nil { - return m.Response - } - return nil -} - -func (m *Response) GetException() []byte { - if m != nil { - return m.Exception - } - return nil -} - -func (m *Response) GetApplicationError() *ApplicationError { - if m != nil { - return m.ApplicationError - } - return nil -} - -func (m *Response) GetJavaException() []byte { - if m != nil { - return m.JavaException - } - return nil -} - -func (m *Response) GetRpcError() *RpcError { - if m != nil { - return m.RpcError - } - return nil -} - -func init() { -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto deleted file mode 100644 index f21763a4..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto2"; -option go_package = "remote_api"; - -package remote_api; - -message Request { - required string service_name = 2; - required string method = 3; - required bytes request = 4; - optional string request_id = 5; -} - -message ApplicationError { - required int32 code = 1; - required string detail = 2; -} - -message RpcError { - enum ErrorCode { - UNKNOWN = 0; - CALL_NOT_FOUND = 1; - PARSE_ERROR = 2; - SECURITY_VIOLATION = 3; - OVER_QUOTA = 4; - REQUEST_TOO_LARGE = 5; - CAPABILITY_DISABLED = 6; - FEATURE_DISABLED = 7; - BAD_REQUEST = 8; - RESPONSE_TOO_LARGE = 9; - CANCELLED = 10; - REPLAY_ERROR = 11; - DEADLINE_EXCEEDED = 12; - } - required int32 code = 1; - optional string detail = 2; -} - -message Response { - optional bytes response = 1; - optional bytes exception = 2; - optional ApplicationError application_error = 3; - optional bytes java_exception = 4; - optional RpcError rpc_error = 5; -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/transaction.go deleted file mode 100644 index 28a6d181..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/internal/transaction.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file implements hooks for applying datastore transactions. - -import ( - "errors" - "reflect" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" - - basepb "google.golang.org/appengine/internal/base" - pb "google.golang.org/appengine/internal/datastore" -) - -var transactionSetters = make(map[reflect.Type]reflect.Value) - -// RegisterTransactionSetter registers a function that sets transaction information -// in a protocol buffer message. f should be a function with two arguments, -// the first being a protocol buffer type, and the second being *datastore.Transaction. -func RegisterTransactionSetter(f interface{}) { - v := reflect.ValueOf(f) - transactionSetters[v.Type().In(0)] = v -} - -// applyTransaction applies the transaction t to message pb -// by using the relevant setter passed to RegisterTransactionSetter. -func applyTransaction(pb proto.Message, t *pb.Transaction) { - v := reflect.ValueOf(pb) - if f, ok := transactionSetters[v.Type()]; ok { - f.Call([]reflect.Value{v, reflect.ValueOf(t)}) - } -} - -var transactionKey = "used for *Transaction" - -func transactionFromContext(ctx netcontext.Context) *transaction { - t, _ := ctx.Value(&transactionKey).(*transaction) - return t -} - -func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { - return netcontext.WithValue(ctx, &transactionKey, t) -} - -type transaction struct { - transaction pb.Transaction - finished bool -} - -var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") - -func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool) error { - if transactionFromContext(c) != nil { - return errors.New("nested transactions are not supported") - } - - // Begin the transaction. - t := &transaction{} - req := &pb.BeginTransactionRequest{ - App: proto.String(FullyQualifiedAppID(c)), - } - if xg { - req.AllowMultipleEg = proto.Bool(true) - } - if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil { - return err - } - - // Call f, rolling back the transaction if f returns a non-nil error, or panics. - // The panic is not recovered. - defer func() { - if t.finished { - return - } - t.finished = true - // Ignore the error return value, since we are already returning a non-nil - // error (or we're panicking). - Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{}) - }() - if err := f(withTransaction(c, t)); err != nil { - return err - } - t.finished = true - - // Commit the transaction. - res := &pb.CommitResponse{} - err := Call(c, "datastore_v3", "Commit", &t.transaction, res) - if ae, ok := err.(*APIError); ok { - /* TODO: restore this conditional - if appengine.IsDevAppServer() { - */ - // The Python Dev AppServer raises an ApplicationError with error code 2 (which is - // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.". - if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." { - return ErrConcurrentTransaction - } - if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) { - return ErrConcurrentTransaction - } - } - return err -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/namespace.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/namespace.go deleted file mode 100644 index 21860ca0..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/namespace.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2012 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import ( - "fmt" - "regexp" - - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -// Namespace returns a replacement context that operates within the given namespace. -func Namespace(c context.Context, namespace string) (context.Context, error) { - if !validNamespace.MatchString(namespace) { - return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace) - } - return internal.NamespacedContext(c, namespace), nil -} - -// validNamespace matches valid namespace names. -var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`) diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/timeout.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/timeout.go deleted file mode 100644 index 05642a99..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/appengine/timeout.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2013 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import "golang.org/x/net/context" - -// IsTimeoutError reports whether err is a timeout error. -func IsTimeoutError(err error) bool { - if err == context.DeadlineExceeded { - return true - } - if t, ok := err.(interface { - IsTimeout() bool - }); ok { - return t.IsTimeout() - } - return false -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/.travis.yml b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/.travis.yml deleted file mode 100644 index c037df0d..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -sudo: false -language: go -go: -- 1.4 -- 1.5 -install: -- go get -v google.golang.org/cloud/... -script: -- openssl aes-256-cbc -K $encrypted_912ff8fa81ad_key -iv $encrypted_912ff8fa81ad_iv -in key.json.enc -out key.json -d -- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json" - go test -v -tags=integration google.golang.org/cloud/... diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/AUTHORS b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/AUTHORS deleted file mode 100644 index 3da443dc..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/AUTHORS +++ /dev/null @@ -1,12 +0,0 @@ -# This is the official list of cloud authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as: -# Name or Organization -# The email address is not required for organizations. - -Google Inc. -Palm Stone Games, Inc. -Péter Szilágyi -Tyler Treat diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/CONTRIBUTING.md b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/CONTRIBUTING.md deleted file mode 100644 index 9a1cab28..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/CONTRIBUTING.md +++ /dev/null @@ -1,114 +0,0 @@ -# Contributing - -1. Sign one of the contributor license agreements below. -1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool. -1. Get the cloud package by running `go get -d google.golang.org/cloud`. - 1. If you have already checked out the source, make sure that the remote git - origin is https://code.googlesource.com/gocloud: - - git remote set-url origin https://code.googlesource.com/gocloud -1. Make changes and create a change by running `git codereview change `, -provide a command message, and use `git codereview mail` to create a Gerrit CL. -1. Keep amending to the change and mail as your recieve feedback. - -## Integration Tests - -Additional to the unit tests, you may run the integration test suite. - -To run the integrations tests, creating and configuration of a project in the -Google Developers Console is required. Once you create a project, set the -following environment variables to be able to run the against the actual APIs. - -- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455) -- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file. - -Create a storage bucket with the same name as the project id set in **GCLOUD_TESTS_GOLANG_PROJECT_ID**. -The storage integration test will create and delete some objects in this bucket. - -Install the [gcloud command-line tool][gcloudcli] to your machine and use it -to create the indexes used in the datastore integration tests with indexes -found in `datastore/testdata/index.yaml`: - -From the project's root directory: - -``` sh -# Install the app component -$ gcloud components update app - -# Set the default project in your env -$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID - -# Authenticate the gcloud tool with your account -$ gcloud auth login - -# Create the indexes -$ gcloud preview datastore create-indexes datastore/testdata/index.yaml - -``` - -You can run the integration tests by running: - -``` sh -$ go test -v -tags=integration google.golang.org/cloud/... -``` - -## Contributor License Agreements - -Before we can accept your pull requests you'll need to sign a Contributor -License Agreement (CLA): - -- **If you are an individual writing original source code** and **you own the -- intellectual property**, then you'll need to sign an [individual CLA][indvcla]. -- **If you work for a company that wants to allow you to contribute your work**, -then you'll need to sign a [corporate CLA][corpcla]. - -You can sign these electronically (just scroll to the bottom). After that, -we'll be able to accept your pull requests. - -## Contributor Code of Conduct - -As contributors and maintainers of this project, -and in the interest of fostering an open and welcoming community, -we pledge to respect all people who contribute through reporting issues, -posting feature requests, updating documentation, -submitting pull requests or patches, and other activities. - -We are committed to making participation in this project -a harassment-free experience for everyone, -regardless of level of experience, gender, gender identity and expression, -sexual orientation, disability, personal appearance, -body size, race, ethnicity, age, religion, or nationality. - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery -* Personal attacks -* Trolling or insulting/derogatory comments -* Public or private harassment -* Publishing other's private information, -such as physical or electronic -addresses, without explicit permission -* Other unethical or unprofessional conduct. - -Project maintainers have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct. -By adopting this Code of Conduct, -project maintainers commit themselves to fairly and consistently -applying these principles to every aspect of managing this project. -Project maintainers who do not follow or enforce the Code of Conduct -may be permanently removed from the project team. - -This code of conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior -may be reported by opening an issue -or contacting one or more of the project maintainers. - -This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, -available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) - -[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/ -[indvcla]: https://developers.google.com/open-source/cla/individual -[corpcla]: https://developers.google.com/open-source/cla/corporate diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/CONTRIBUTORS b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/CONTRIBUTORS deleted file mode 100644 index 475ac6a6..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/CONTRIBUTORS +++ /dev/null @@ -1,24 +0,0 @@ -# People who have agreed to one of the CLAs and can contribute patches. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# https://developers.google.com/open-source/cla/individual -# https://developers.google.com/open-source/cla/corporate -# -# Names should be added to this file as: -# Name - -# Keep the list alphabetically sorted. - -Andrew Gerrand -Brad Fitzpatrick -Burcu Dogan -Dave Day -David Symonds -Glenn Lewis -Johan Euphrosine -Luna Duclos -Michael McGreevy -Péter Szilágyi -Tyler Treat diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/LICENSE b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/LICENSE deleted file mode 100644 index a4c5efd8..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2014 Google Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/README.md b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/README.md deleted file mode 100644 index 10d3995d..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/README.md +++ /dev/null @@ -1,135 +0,0 @@ -# Google Cloud for Go - -[![Build Status](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang) - -**NOTE:** These packages are experimental, and may occasionally make -backwards-incompatible changes. - -**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). - -Go packages for Google Cloud Platform services. Supported APIs include: - - * Google Cloud Datastore - * Google Cloud Storage - * Google Cloud Pub/Sub - * Google Cloud Container Engine - -``` go -import "google.golang.org/cloud" -``` - -Documentation and examples are available at -[https://godoc.org/google.golang.org/cloud](https://godoc.org/google.golang.org/cloud). - -## Authorization - -Authorization, throughout the package, is delegated to the godoc.org/golang.org/x/oauth2. -Refer to the [godoc documentation](https://godoc.org/golang.org/x/oauth2) -for examples on using oauth2 with the Cloud package. - -## Google Cloud Datastore - -[Google Cloud Datastore][cloud-datastore] ([docs][cloud-datastore-docs]) is a fully -managed, schemaless database for storing non-relational data. Cloud Datastore -automatically scales with your users and supports ACID transactions, high availability -of reads and writes, strong consistency for reads and ancestor queries, and eventual -consistency for all other queries. - -Follow the [activation instructions][cloud-datastore-activation] to use the Google -Cloud Datastore API with your project. - -[https://godoc.org/google.golang.org/cloud/datastore](https://godoc.org/google.golang.org/cloud/datastore) - - -```go -type Post struct { - Title string - Body string `datastore:",noindex"` - PublishedAt time.Time -} -keys := []*datastore.Key{ - datastore.NewKey(ctx, "Post", "post1", 0, nil), - datastore.NewKey(ctx, "Post", "post2", 0, nil), -} -posts := []*Post{ - {Title: "Post 1", Body: "...", PublishedAt: time.Now()}, - {Title: "Post 2", Body: "...", PublishedAt: time.Now()}, -} -if _, err := datastore.PutMulti(ctx, keys, posts); err != nil { - log.Println(err) -} -``` - -## Google Cloud Storage - -[Google Cloud Storage][cloud-storage] ([docs][cloud-storage-docs]) allows you to store -data on Google infrastructure with very high reliability, performance and availability, -and can be used to distribute large data objects to users via direct download. - -[https://godoc.org/google.golang.org/cloud/storage](https://godoc.org/google.golang.org/cloud/storage) - - -```go -// Read the object1 from bucket. -rc, err := storage.NewReader(ctx, "bucket", "object1") -if err != nil { - log.Fatal(err) -} -slurp, err := ioutil.ReadAll(rc) -rc.Close() -if err != nil { - log.Fatal(err) -} -``` - -## Google Cloud Pub/Sub (Alpha) - -> Google Cloud Pub/Sub is in **Alpha status**. As a result, it might change in -> backward-incompatible ways and is not recommended for production use. It is not -> subject to any SLA or deprecation policy. - -[Google Cloud Pub/Sub][cloud-pubsub] ([docs][cloud-pubsub-docs]) allows you to connect -your services with reliable, many-to-many, asynchronous messaging hosted on Google's -infrastructure. Cloud Pub/Sub automatically scales as you need it and provides a foundation -for building your own robust, global services. - -[https://godoc.org/google.golang.org/cloud/pubsub](https://godoc.org/google.golang.org/cloud/pubsub) - - -```go -// Publish "hello world" on topic1. -msgIDs, err := pubsub.Publish(ctx, "topic1", &pubsub.Message{ - Data: []byte("hello world"), -}) -if err != nil { - log.Println(err) -} -// Pull messages via subscription1. -msgs, err := pubsub.Pull(ctx, "subscription1", 1) -if err != nil { - log.Println(err) -} -``` - -## Contributing - -Contributions are welcome. Please, see the -[CONTRIBUTING](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md) -document for details. We're using Gerrit for our code reviews. Please don't open pull -requests against this repo, new pull requests will be automatically closed. - -Please note that this project is released with a Contributor Code of Conduct. -By participating in this project you agree to abide by its terms. -See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md#contributor-code-of-conduct) -for more information. - -[cloud-datastore]: https://cloud.google.com/datastore/ -[cloud-datastore-docs]: https://cloud.google.com/datastore/docs -[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate - -[cloud-pubsub]: https://cloud.google.com/pubsub/ -[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs - -[cloud-storage]: https://cloud.google.com/storage/ -[cloud-storage-docs]: https://cloud.google.com/storage/docs/overview -[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/cloud.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/cloud.go deleted file mode 100644 index a634b055..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/cloud.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package cloud contains Google Cloud Platform APIs related types -// and common functions. -package cloud - -import ( - "net/http" - - "golang.org/x/net/context" - "google.golang.org/cloud/internal" -) - -// NewContext returns a new context that uses the provided http.Client. -// Provided http.Client is responsible to authorize and authenticate -// the requests made to the Google Cloud APIs. -// It mutates the client's original Transport to append the cloud -// package's user-agent to the outgoing requests. -// You can obtain the project ID from the Google Developers Console, -// https://console.developers.google.com. -func NewContext(projID string, c *http.Client) context.Context { - if c == nil { - panic("invalid nil *http.Client passed to NewContext") - } - return WithContext(context.Background(), projID, c) -} - -// WithContext returns a new context in a similar way NewContext does, -// but initiates the new context with the specified parent. -func WithContext(parent context.Context, projID string, c *http.Client) context.Context { - // TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does. - // Do User-Agent some other way. - if _, ok := c.Transport.(*internal.Transport); !ok { - c.Transport = &internal.Transport{Base: c.Transport} - } - return internal.WithContext(parent, projID, c) -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/compute/metadata/metadata.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/compute/metadata/metadata.go deleted file mode 100644 index 3dd684e0..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/compute/metadata/metadata.go +++ /dev/null @@ -1,327 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package metadata provides access to Google Compute Engine (GCE) -// metadata and API service accounts. -// -// This package is a wrapper around the GCE metadata service, -// as documented at https://developers.google.com/compute/docs/metadata. -package metadata - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "strings" - "sync" - "time" - - "google.golang.org/cloud/internal" -) - -type cachedValue struct { - k string - trim bool - mu sync.Mutex - v string -} - -var ( - projID = &cachedValue{k: "project/project-id", trim: true} - projNum = &cachedValue{k: "project/numeric-project-id", trim: true} - instID = &cachedValue{k: "instance/id", trim: true} -) - -var metaClient = &http.Client{ - Transport: &internal.Transport{ - Base: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 750 * time.Millisecond, - KeepAlive: 30 * time.Second, - }).Dial, - ResponseHeaderTimeout: 750 * time.Millisecond, - }, - }, -} - -// NotDefinedError is returned when requested metadata is not defined. -// -// The underlying string is the suffix after "/computeMetadata/v1/". -// -// This error is not returned if the value is defined to be the empty -// string. -type NotDefinedError string - -func (suffix NotDefinedError) Error() string { - return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) -} - -// Get returns a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// -// If the GCE_METADATA_HOST environment variable is not defined, a default of -// 169.254.169.254 will be used instead. -// -// If the requested metadata is not defined, the returned error will -// be of type NotDefinedError. -func Get(suffix string) (string, error) { - val, _, err := getETag(suffix) - return val, err -} - -// getETag returns a value from the metadata service as well as the associated -// ETag. This func is otherwise equivalent to Get. -func getETag(suffix string) (value, etag string, err error) { - // Using a fixed IP makes it very difficult to spoof the metadata service in - // a container, which is an important use-case for local testing of cloud - // deployments. To enable spoofing of the metadata service, the environment - // variable GCE_METADATA_HOST is first inspected to decide where metadata - // requests shall go. - host := os.Getenv("GCE_METADATA_HOST") - if host == "" { - // Using 169.254.169.254 instead of "metadata" here because Go - // binaries built with the "netgo" tag and without cgo won't - // know the search suffix for "metadata" is - // ".google.internal", and this IP address is documented as - // being stable anyway. - host = "169.254.169.254" - } - url := "http://" + host + "/computeMetadata/v1/" + suffix - req, _ := http.NewRequest("GET", url, nil) - req.Header.Set("Metadata-Flavor", "Google") - res, err := metaClient.Do(req) - if err != nil { - return "", "", err - } - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - return "", "", NotDefinedError(suffix) - } - if res.StatusCode != 200 { - return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) - } - all, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", err - } - return string(all), res.Header.Get("Etag"), nil -} - -func getTrimmed(suffix string) (s string, err error) { - s, err = Get(suffix) - s = strings.TrimSpace(s) - return -} - -func (c *cachedValue) get() (v string, err error) { - defer c.mu.Unlock() - c.mu.Lock() - if c.v != "" { - return c.v, nil - } - if c.trim { - v, err = getTrimmed(c.k) - } else { - v, err = Get(c.k) - } - if err == nil { - c.v = v - } - return -} - -var onGCE struct { - sync.Mutex - set bool - v bool -} - -// OnGCE reports whether this process is running on Google Compute Engine. -func OnGCE() bool { - defer onGCE.Unlock() - onGCE.Lock() - if onGCE.set { - return onGCE.v - } - onGCE.set = true - - // We use the DNS name of the metadata service here instead of the IP address - // because we expect that to fail faster in the not-on-GCE case. - res, err := metaClient.Get("http://metadata.google.internal") - if err != nil { - return false - } - onGCE.v = res.Header.Get("Metadata-Flavor") == "Google" - return onGCE.v -} - -// Subscribe subscribes to a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// -// Subscribe calls fn with the latest metadata value indicated by the provided -// suffix. If the metadata value is deleted, fn is called with the empty string -// and ok false. Subscribe blocks until fn returns a non-nil error or the value -// is deleted. Subscribe returns the error value returned from the last call to -// fn, which may be nil when ok == false. -func Subscribe(suffix string, fn func(v string, ok bool) error) error { - const failedSubscribeSleep = time.Second * 5 - - // First check to see if the metadata value exists at all. - val, lastETag, err := getETag(suffix) - if err != nil { - return err - } - - if err := fn(val, true); err != nil { - return err - } - - ok := true - suffix += "?wait_for_change=true&last_etag=" - for { - val, etag, err := getETag(suffix + url.QueryEscape(lastETag)) - if err != nil { - if _, deleted := err.(NotDefinedError); !deleted { - time.Sleep(failedSubscribeSleep) - continue // Retry on other errors. - } - ok = false - } - lastETag = etag - - if err := fn(val, ok); err != nil || !ok { - return err - } - } -} - -// ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return projID.get() } - -// NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return projNum.get() } - -// InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { - return getTrimmed("instance/network-interfaces/0/ip") -} - -// ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { - return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") -} - -// Hostname returns the instance's hostname. This will be of the form -// ".c..internal". -func Hostname() (string, error) { - return getTrimmed("instance/hostname") -} - -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { - var s []string - j, err := Get("instance/tags") - if err != nil { - return nil, err - } - if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { - return nil, err - } - return s, nil -} - -// InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { - return instID.get() -} - -// InstanceName returns the current VM's instance ID string. -func InstanceName() (string, error) { - host, err := Hostname() - if err != nil { - return "", err - } - return strings.Split(host, ".")[0], nil -} - -// Zone returns the current VM's zone, such as "us-central1-b". -func Zone() (string, error) { - zone, err := getTrimmed("instance/zone") - // zone is of the form "projects//zones/". - if err != nil { - return "", err - } - return zone[strings.LastIndex(zone, "/")+1:], nil -} - -// InstanceAttributes returns the list of user-defined attributes, -// assigned when initially creating a GCE VM instance. The value of an -// attribute can be obtained with InstanceAttributeValue. -func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } - -// ProjectAttributes returns the list of user-defined attributes -// applying to the project as a whole, not just this VM. The value of -// an attribute can be obtained with ProjectAttributeValue. -func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } - -func lines(suffix string) ([]string, error) { - j, err := Get(suffix) - if err != nil { - return nil, err - } - s := strings.Split(strings.TrimSpace(j), "\n") - for i := range s { - s[i] = strings.TrimSpace(s[i]) - } - return s, nil -} - -// InstanceAttributeValue returns the value of the provided VM -// instance attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// InstanceAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func InstanceAttributeValue(attr string) (string, error) { - return Get("instance/attributes/" + attr) -} - -// ProjectAttributeValue returns the value of the provided -// project attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// ProjectAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func ProjectAttributeValue(attr string) (string, error) { - return Get("project/attributes/" + attr) -} - -// Scopes returns the service account scopes for the given account. -// The account may be empty or the string "default" to use the instance's -// main account. -func Scopes(serviceAccount string) ([]string, error) { - if serviceAccount == "" { - serviceAccount = "default" - } - return lines("instance/service-accounts/" + serviceAccount + "/scopes") -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/internal/cloud.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/internal/cloud.go deleted file mode 100644 index 8b0db1b5..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/internal/cloud.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package internal provides support for the cloud packages. -// -// Users should not import this package directly. -package internal - -import ( - "fmt" - "net/http" - "sync" - - "golang.org/x/net/context" -) - -type contextKey struct{} - -func WithContext(parent context.Context, projID string, c *http.Client) context.Context { - if c == nil { - panic("nil *http.Client passed to WithContext") - } - if projID == "" { - panic("empty project ID passed to WithContext") - } - return context.WithValue(parent, contextKey{}, &cloudContext{ - ProjectID: projID, - HTTPClient: c, - }) -} - -const userAgent = "gcloud-golang/0.1" - -type cloudContext struct { - ProjectID string - HTTPClient *http.Client - - mu sync.Mutex // guards svc - svc map[string]interface{} // e.g. "storage" => *rawStorage.Service -} - -// Service returns the result of the fill function if it's never been -// called before for the given name (which is assumed to be an API -// service name, like "datastore"). If it has already been cached, the fill -// func is not run. -// It's safe for concurrent use by multiple goroutines. -func Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} { - return cc(ctx).service(name, fill) -} - -func (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} { - c.mu.Lock() - defer c.mu.Unlock() - - if c.svc == nil { - c.svc = make(map[string]interface{}) - } else if v, ok := c.svc[name]; ok { - return v - } - v := fill(c.HTTPClient) - c.svc[name] = v - return v -} - -// Transport is an http.RoundTripper that appends -// Google Cloud client's user-agent to the original -// request's user-agent header. -type Transport struct { - // Base represents the actual http.RoundTripper - // the requests will be delegated to. - Base http.RoundTripper -} - -// RoundTrip appends a user-agent to the existing user-agent -// header and delegates the request to the base http.RoundTripper. -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - req = cloneRequest(req) - ua := req.Header.Get("User-Agent") - if ua == "" { - ua = userAgent - } else { - ua = fmt.Sprintf("%s %s", ua, userAgent) - } - req.Header.Set("User-Agent", ua) - return t.Base.RoundTrip(req) -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header) - for k, s := range r.Header { - r2.Header[k] = s - } - return r2 -} - -func ProjID(ctx context.Context) string { - return cc(ctx).ProjectID -} - -func HTTPClient(ctx context.Context) *http.Client { - return cc(ctx).HTTPClient -} - -// cc returns the internal *cloudContext (cc) state for a context.Context. -// It panics if the user did it wrong. -func cc(ctx context.Context) *cloudContext { - if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok { - return c - } - panic("invalid context.Context type; it should be created with cloud.NewContext") -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/internal/opts/option.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/internal/opts/option.go deleted file mode 100644 index c5ccf4f5..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/internal/opts/option.go +++ /dev/null @@ -1,24 +0,0 @@ -// Package opts holds the DialOpts struct, configurable by -// cloud.ClientOptions to set up transports for cloud packages. -// -// This is a separate page to prevent cycles between the core -// cloud packages. -package opts - -import ( - "net/http" - - "golang.org/x/oauth2" - "google.golang.org/grpc" -) - -type DialOpt struct { - Endpoint string - Scopes []string - UserAgent string - - TokenSource oauth2.TokenSource - - HTTPClient *http.Client - GRPCClient *grpc.ClientConn -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/key.json.enc b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/key.json.enc deleted file mode 100644 index 2f673a84..00000000 Binary files a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/key.json.enc and /dev/null differ diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/option.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/option.go deleted file mode 100644 index d4a5aea2..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/option.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cloud - -import ( - "net/http" - - "golang.org/x/oauth2" - "google.golang.org/cloud/internal/opts" - "google.golang.org/grpc" -) - -// ClientOption is used when construct clients for each cloud service. -type ClientOption interface { - // Resolve configures the given DialOpts for this option. - Resolve(*opts.DialOpt) -} - -// WithTokenSource returns a ClientOption that specifies an OAuth2 token -// source to be used as the basis for authentication. -func WithTokenSource(s oauth2.TokenSource) ClientOption { - return withTokenSource{s} -} - -type withTokenSource struct{ ts oauth2.TokenSource } - -func (w withTokenSource) Resolve(o *opts.DialOpt) { - o.TokenSource = w.ts -} - -// WithEndpoint returns a ClientOption that overrides the default endpoint -// to be used for a service. -func WithEndpoint(url string) ClientOption { - return withEndpoint(url) -} - -type withEndpoint string - -func (w withEndpoint) Resolve(o *opts.DialOpt) { - o.Endpoint = string(w) -} - -// WithScopes returns a ClientOption that overrides the default OAuth2 scopes -// to be used for a service. -func WithScopes(scope ...string) ClientOption { - return withScopes(scope) -} - -type withScopes []string - -func (w withScopes) Resolve(o *opts.DialOpt) { - o.Scopes = []string(w) -} - -// WithUserAgent returns a ClientOption that sets the User-Agent. -func WithUserAgent(ua string) ClientOption { - return withUA(ua) -} - -type withUA string - -func (w withUA) Resolve(o *opts.DialOpt) { o.UserAgent = string(w) } - -// WithBaseHTTP returns a ClientOption that specifies the HTTP client to -// use as the basis of communications. This option may only be used with -// services that support HTTP as their communication transport. -func WithBaseHTTP(client *http.Client) ClientOption { - return withBaseHTTP{client} -} - -type withBaseHTTP struct{ client *http.Client } - -func (w withBaseHTTP) Resolve(o *opts.DialOpt) { - o.HTTPClient = w.client -} - -// WithBaseGRPC returns a ClientOption that specifies the GRPC client -// connection to use as the basis of communications. This option many only be -// used with services that support HRPC as their communication transport. -func WithBaseGRPC(client *grpc.ClientConn) ClientOption { - return withBaseGRPC{client} -} - -type withBaseGRPC struct{ client *grpc.ClientConn } - -func (w withBaseGRPC) Resolve(o *opts.DialOpt) { - o.GRPCClient = w.client -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/acl.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/acl.go deleted file mode 100644 index 71c5800a..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/acl.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "fmt" - - "golang.org/x/net/context" - raw "google.golang.org/api/storage/v1" -) - -// ACLRole is the the access permission for the entity. -type ACLRole string - -const ( - RoleOwner ACLRole = "OWNER" - RoleReader ACLRole = "READER" -) - -// ACLEntity is an entity holding an ACL permission. -// -// It could be in the form of: -// "user-", "user-","group-", "group-", -// "domain-" and "project-team-". -// -// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers. -type ACLEntity string - -const ( - AllUsers ACLEntity = "allUsers" - AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers" -) - -// ACLRule represents an access control list rule entry for a Google Cloud Storage object or bucket. -// A bucket is a Google Cloud Storage container whose name is globally unique and contains zero or -// more objects. An object is a blob of data that is stored in a bucket. -type ACLRule struct { - // Entity identifies the entity holding the current rule's permissions. - Entity ACLEntity - - // Role is the the access permission for the entity. - Role ACLRole -} - -// DefaultACL returns the default object ACL entries for the named bucket. -func DefaultACL(ctx context.Context, bucket string) ([]ACLRule, error) { - acls, err := rawService(ctx).DefaultObjectAccessControls.List(bucket).Context(ctx).Do() - if err != nil { - return nil, fmt.Errorf("storage: error listing default object ACL for bucket %q: %v", bucket, err) - } - r := make([]ACLRule, 0, len(acls.Items)) - for _, v := range acls.Items { - if m, ok := v.(map[string]interface{}); ok { - entity, ok1 := m["entity"].(string) - role, ok2 := m["role"].(string) - if ok1 && ok2 { - r = append(r, ACLRule{Entity: ACLEntity(entity), Role: ACLRole(role)}) - } - } - } - return r, nil -} - -// PutDefaultACLRule saves the named default object ACL entity with the provided role for the named bucket. -func PutDefaultACLRule(ctx context.Context, bucket string, entity ACLEntity, role ACLRole) error { - acl := &raw.ObjectAccessControl{ - Bucket: bucket, - Entity: string(entity), - Role: string(role), - } - _, err := rawService(ctx).DefaultObjectAccessControls.Update(bucket, string(entity), acl).Context(ctx).Do() - if err != nil { - return fmt.Errorf("storage: error updating default ACL rule for bucket %q, entity %q: %v", bucket, entity, err) - } - return nil -} - -// DeleteDefaultACLRule deletes the named default ACL entity for the named bucket. -func DeleteDefaultACLRule(ctx context.Context, bucket string, entity ACLEntity) error { - err := rawService(ctx).DefaultObjectAccessControls.Delete(bucket, string(entity)).Context(ctx).Do() - if err != nil { - return fmt.Errorf("storage: error deleting default ACL rule for bucket %q, entity %q: %v", bucket, entity, err) - } - return nil -} - -// BucketACL returns the ACL entries for the named bucket. -func BucketACL(ctx context.Context, bucket string) ([]ACLRule, error) { - acls, err := rawService(ctx).BucketAccessControls.List(bucket).Context(ctx).Do() - if err != nil { - return nil, fmt.Errorf("storage: error listing bucket ACL for bucket %q: %v", bucket, err) - } - r := make([]ACLRule, len(acls.Items)) - for i, v := range acls.Items { - r[i].Entity = ACLEntity(v.Entity) - r[i].Role = ACLRole(v.Role) - } - return r, nil -} - -// PutBucketACLRule saves the named ACL entity with the provided role for the named bucket. -func PutBucketACLRule(ctx context.Context, bucket string, entity ACLEntity, role ACLRole) error { - acl := &raw.BucketAccessControl{ - Bucket: bucket, - Entity: string(entity), - Role: string(role), - } - _, err := rawService(ctx).BucketAccessControls.Update(bucket, string(entity), acl).Context(ctx).Do() - if err != nil { - return fmt.Errorf("storage: error updating bucket ACL rule for bucket %q, entity %q: %v", bucket, entity, err) - } - return nil -} - -// DeleteBucketACLRule deletes the named ACL entity for the named bucket. -func DeleteBucketACLRule(ctx context.Context, bucket string, entity ACLEntity) error { - err := rawService(ctx).BucketAccessControls.Delete(bucket, string(entity)).Context(ctx).Do() - if err != nil { - return fmt.Errorf("storage: error deleting bucket ACL rule for bucket %q, entity %q: %v", bucket, entity, err) - } - return nil -} - -// ACL returns the ACL entries for the named object. -func ACL(ctx context.Context, bucket, object string) ([]ACLRule, error) { - acls, err := rawService(ctx).ObjectAccessControls.List(bucket, object).Context(ctx).Do() - if err != nil { - return nil, fmt.Errorf("storage: error listing object ACL for bucket %q, file %q: %v", bucket, object, err) - } - r := make([]ACLRule, 0, len(acls.Items)) - for _, v := range acls.Items { - if m, ok := v.(map[string]interface{}); ok { - entity, ok1 := m["entity"].(string) - role, ok2 := m["role"].(string) - if ok1 && ok2 { - r = append(r, ACLRule{Entity: ACLEntity(entity), Role: ACLRole(role)}) - } - } - } - return r, nil -} - -// PutACLRule saves the named ACL entity with the provided role for the named object. -func PutACLRule(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole) error { - acl := &raw.ObjectAccessControl{ - Bucket: bucket, - Entity: string(entity), - Role: string(role), - } - _, err := rawService(ctx).ObjectAccessControls.Update(bucket, object, string(entity), acl).Context(ctx).Do() - if err != nil { - return fmt.Errorf("storage: error updating object ACL rule for bucket %q, file %q, entity %q: %v", bucket, object, entity, err) - } - return nil -} - -// DeleteACLRule deletes the named ACL entity for the named object. -func DeleteACLRule(ctx context.Context, bucket, object string, entity ACLEntity) error { - err := rawService(ctx).ObjectAccessControls.Delete(bucket, object, string(entity)).Context(ctx).Do() - if err != nil { - return fmt.Errorf("storage: error deleting object ACL rule for bucket %q, file %q, entity %q: %v", bucket, object, entity, err) - } - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/storage.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/storage.go deleted file mode 100644 index bf22c6ae..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/storage.go +++ /dev/null @@ -1,350 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package storage contains a Google Cloud Storage client. -// -// This package is experimental and may make backwards-incompatible changes. -package storage - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" - "encoding/base64" - "encoding/pem" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strings" - "time" - - "google.golang.org/cloud/internal" - - "golang.org/x/net/context" - "google.golang.org/api/googleapi" - raw "google.golang.org/api/storage/v1" -) - -var ( - ErrBucketNotExist = errors.New("storage: bucket doesn't exist") - ErrObjectNotExist = errors.New("storage: object doesn't exist") -) - -const ( - // ScopeFullControl grants permissions to manage your - // data and permissions in Google Cloud Storage. - ScopeFullControl = raw.DevstorageFullControlScope - - // ScopeReadOnly grants permissions to - // view your data in Google Cloud Storage. - ScopeReadOnly = raw.DevstorageReadOnlyScope - - // ScopeReadWrite grants permissions to manage your - // data in Google Cloud Storage. - ScopeReadWrite = raw.DevstorageReadWriteScope -) - -// TODO(jbd): Add storage.buckets.list. -// TODO(jbd): Add storage.buckets.insert. -// TODO(jbd): Add storage.buckets.update. -// TODO(jbd): Add storage.buckets.delete. - -// TODO(jbd): Add storage.objects.watch. - -// BucketInfo returns the metadata for the specified bucket. -func BucketInfo(ctx context.Context, name string) (*Bucket, error) { - resp, err := rawService(ctx).Buckets.Get(name).Projection("full").Context(ctx).Do() - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - return nil, ErrBucketNotExist - } - if err != nil { - return nil, err - } - return newBucket(resp), nil -} - -// ListObjects lists objects from the bucket. You can specify a query -// to filter the results. If q is nil, no filtering is applied. -func ListObjects(ctx context.Context, bucket string, q *Query) (*Objects, error) { - c := rawService(ctx).Objects.List(bucket) - c.Projection("full") - if q != nil { - c.Delimiter(q.Delimiter) - c.Prefix(q.Prefix) - c.Versions(q.Versions) - c.PageToken(q.Cursor) - if q.MaxResults > 0 { - c.MaxResults(int64(q.MaxResults)) - } - } - resp, err := c.Context(ctx).Do() - if err != nil { - return nil, err - } - objects := &Objects{ - Results: make([]*Object, len(resp.Items)), - Prefixes: make([]string, len(resp.Prefixes)), - } - for i, item := range resp.Items { - objects.Results[i] = newObject(item) - } - for i, prefix := range resp.Prefixes { - objects.Prefixes[i] = prefix - } - if resp.NextPageToken != "" { - next := Query{} - if q != nil { - // keep the other filtering - // criteria if there is a query - next = *q - } - next.Cursor = resp.NextPageToken - objects.Next = &next - } - return objects, nil -} - -// SignedURLOptions allows you to restrict the access to the signed URL. -type SignedURLOptions struct { - // GoogleAccessID represents the authorizer of the signed URL generation. - // It is typically the Google service account client email address from - // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". - // Required. - GoogleAccessID string - - // PrivateKey is the Google service account private key. It is obtainable - // from the Google Developers Console. - // At https://console.developers.google.com/project//apiui/credential, - // create a service account client ID or reuse one of your existing service account - // credentials. Click on the "Generate new P12 key" to generate and download - // a new private key. Once you download the P12 file, use the following command - // to convert it into a PEM file. - // - // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes - // - // Provide the contents of the PEM file as a byte slice. - // Required. - PrivateKey []byte - - // Method is the HTTP method to be used with the signed URL. - // Signed URLs can be used with GET, HEAD, PUT, and DELETE requests. - // Required. - Method string - - // Expires is the expiration time on the signed URL. It must be - // a datetime in the future. - // Required. - Expires time.Time - - // ContentType is the content type header the client must provide - // to use the generated signed URL. - // Optional. - ContentType string - - // Headers is a list of extention headers the client must provide - // in order to use the generated signed URL. - // Optional. - Headers []string - - // MD5 is the base64 encoded MD5 checksum of the file. - // If provided, the client should provide the exact value on the request - // header in order to use the signed URL. - // Optional. - MD5 []byte -} - -// SignedURL returns a URL for the specified object. Signed URLs allow -// the users access to a restricted resource for a limited time without having a -// Google account or signing in. For more information about the signed -// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs. -func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) { - if opts == nil { - return "", errors.New("storage: missing required SignedURLOptions") - } - if opts.GoogleAccessID == "" || opts.PrivateKey == nil { - return "", errors.New("storage: missing required credentials to generate a signed URL") - } - if opts.Method == "" { - return "", errors.New("storage: missing required method option") - } - if opts.Expires.IsZero() { - return "", errors.New("storage: missing required expires option") - } - key, err := parseKey(opts.PrivateKey) - if err != nil { - return "", err - } - h := sha256.New() - fmt.Fprintf(h, "%s\n", opts.Method) - fmt.Fprintf(h, "%s\n", opts.MD5) - fmt.Fprintf(h, "%s\n", opts.ContentType) - fmt.Fprintf(h, "%d\n", opts.Expires.Unix()) - fmt.Fprintf(h, "%s", strings.Join(opts.Headers, "\n")) - fmt.Fprintf(h, "/%s/%s", bucket, name) - b, err := rsa.SignPKCS1v15( - rand.Reader, - key, - crypto.SHA256, - h.Sum(nil), - ) - if err != nil { - return "", err - } - encoded := base64.StdEncoding.EncodeToString(b) - u := &url.URL{ - Scheme: "https", - Host: "storage.googleapis.com", - Path: fmt.Sprintf("/%s/%s", bucket, name), - } - q := u.Query() - q.Set("GoogleAccessId", opts.GoogleAccessID) - q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix())) - q.Set("Signature", string(encoded)) - u.RawQuery = q.Encode() - return u.String(), nil -} - -// StatObject returns meta information about the specified object. -func StatObject(ctx context.Context, bucket, name string) (*Object, error) { - o, err := rawService(ctx).Objects.Get(bucket, name).Projection("full").Context(ctx).Do() - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - return nil, ErrObjectNotExist - } - if err != nil { - return nil, err - } - return newObject(o), nil -} - -// UpdateAttrs updates an object with the provided attributes. -// All zero-value attributes are ignored. -func UpdateAttrs(ctx context.Context, bucket, name string, attrs ObjectAttrs) (*Object, error) { - o, err := rawService(ctx).Objects.Patch(bucket, name, attrs.toRawObject(bucket)).Projection("full").Context(ctx).Do() - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - return nil, ErrObjectNotExist - } - if err != nil { - return nil, err - } - return newObject(o), nil -} - -// DeleteObject deletes the single specified object. -func DeleteObject(ctx context.Context, bucket, name string) error { - return rawService(ctx).Objects.Delete(bucket, name).Context(ctx).Do() -} - -// CopyObject copies the source object to the destination. -// The copied object's attributes are overwritten by attrs if non-nil. -func CopyObject(ctx context.Context, srcBucket, srcName string, destBucket, destName string, attrs *ObjectAttrs) (*Object, error) { - if srcBucket == "" || destBucket == "" { - return nil, errors.New("storage: srcBucket and destBucket must both be non-empty") - } - if srcName == "" || destName == "" { - return nil, errors.New("storage: srcName and destName must be non-empty") - } - var rawObject *raw.Object - if attrs != nil { - attrs.Name = destName - if attrs.ContentType == "" { - return nil, errors.New("storage: attrs.ContentType must be non-empty") - } - rawObject = attrs.toRawObject(destBucket) - } - o, err := rawService(ctx).Objects.Copy( - srcBucket, srcName, destBucket, destName, rawObject).Projection("full").Context(ctx).Do() - if err != nil { - return nil, err - } - return newObject(o), nil -} - -// NewReader creates a new io.ReadCloser to read the contents -// of the object. -func NewReader(ctx context.Context, bucket, name string) (io.ReadCloser, error) { - hc := internal.HTTPClient(ctx) - u := &url.URL{ - Scheme: "https", - Host: "storage.googleapis.com", - Path: fmt.Sprintf("/%s/%s", bucket, name), - } - res, err := hc.Get(u.String()) - if err != nil { - return nil, err - } - if res.StatusCode == http.StatusNotFound { - res.Body.Close() - return nil, ErrObjectNotExist - } - if res.StatusCode < 200 || res.StatusCode > 299 { - res.Body.Close() - return res.Body, fmt.Errorf("storage: can't read object %v/%v, status code: %v", bucket, name, res.Status) - } - return res.Body, nil -} - -// NewWriter returns a storage Writer that writes to the GCS object -// identified by the specified name. -// If such an object doesn't exist, it creates one. -// Attributes can be set on the object by modifying the returned Writer's -// ObjectAttrs field before the first call to Write. The name parameter to this -// function is ignored if the Name field of the ObjectAttrs field is set to a -// non-empty string. -// -// It is the caller's responsibility to call Close when writing is done. -// -// The object is not available and any previous object with the same -// name is not replaced on Cloud Storage until Close is called. -func NewWriter(ctx context.Context, bucket, name string) *Writer { - return &Writer{ - ctx: ctx, - bucket: bucket, - name: name, - donec: make(chan struct{}), - } -} - -func rawService(ctx context.Context) *raw.Service { - return internal.Service(ctx, "storage", func(hc *http.Client) interface{} { - svc, _ := raw.New(hc) - return svc - }).(*raw.Service) -} - -// parseKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a -// PEM container or not. If so, it extracts the the private key -// from PEM container before conversion. It only supports PEM -// containers with no passphrase. -func parseKey(key []byte) (*rsa.PrivateKey, error) { - if block, _ := pem.Decode(key); block != nil { - key = block.Bytes - } - parsedKey, err := x509.ParsePKCS8PrivateKey(key) - if err != nil { - parsedKey, err = x509.ParsePKCS1PrivateKey(key) - if err != nil { - return nil, err - } - } - parsed, ok := parsedKey.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("oauth2: private key is invalid") - } - return parsed, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/types.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/types.go deleted file mode 100644 index 060deb6a..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/cloud/storage/types.go +++ /dev/null @@ -1,417 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "encoding/base64" - "io" - "sync" - "time" - - "golang.org/x/net/context" - raw "google.golang.org/api/storage/v1" -) - -// Bucket represents a Google Cloud Storage bucket. -type Bucket struct { - // Name is the name of the bucket. - Name string - - // ACL is the list of access control rules on the bucket. - ACL []ACLRule - - // DefaultObjectACL is the list of access controls to - // apply to new objects when no object ACL is provided. - DefaultObjectACL []ACLRule - - // Location is the location of the bucket. It defaults to "US". - Location string - - // Metageneration is the metadata generation of the bucket. - // Read-only. - Metageneration int64 - - // StorageClass is the storage class of the bucket. This defines - // how objects in the bucket are stored and determines the SLA - // and the cost of storage. Typical values are "STANDARD" and - // "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD". - StorageClass string - - // Created is the creation time of the bucket. - // Read-only. - Created time.Time -} - -func newBucket(b *raw.Bucket) *Bucket { - if b == nil { - return nil - } - bucket := &Bucket{ - Name: b.Name, - Location: b.Location, - Metageneration: b.Metageneration, - StorageClass: b.StorageClass, - Created: convertTime(b.TimeCreated), - } - acl := make([]ACLRule, len(b.Acl)) - for i, rule := range b.Acl { - acl[i] = ACLRule{ - Entity: ACLEntity(rule.Entity), - Role: ACLRole(rule.Role), - } - } - bucket.ACL = acl - objACL := make([]ACLRule, len(b.DefaultObjectAcl)) - for i, rule := range b.DefaultObjectAcl { - objACL[i] = ACLRule{ - Entity: ACLEntity(rule.Entity), - Role: ACLRole(rule.Role), - } - } - bucket.DefaultObjectACL = objACL - return bucket -} - -// ObjectAttrs is the user-editable object attributes. -type ObjectAttrs struct { - // Name is the name of the object. - Name string - - // ContentType is the MIME type of the object's content. - // Optional. - ContentType string - - // ContentLanguage is the optional RFC 1766 Content-Language of - // the object's content sent in response headers. - ContentLanguage string - - // ContentEncoding is the optional Content-Encoding of the object - // sent it the response headers. - ContentEncoding string - - // CacheControl is the optional Cache-Control header of the object - // sent in the response headers. - CacheControl string - - // ContentDisposition is the optional Content-Disposition header of the object - // sent in the response headers. - ContentDisposition string - - // ACL is the list of access control rules for the object. - // Optional. If nil or empty, existing ACL rules are preserved. - ACL []ACLRule - - // Metadata represents user-provided metadata, in key/value pairs. - // It can be nil if the current metadata values needs to preserved. - Metadata map[string]string -} - -func (o ObjectAttrs) toRawObject(bucket string) *raw.Object { - var acl []*raw.ObjectAccessControl - if len(o.ACL) > 0 { - acl = make([]*raw.ObjectAccessControl, len(o.ACL)) - for i, rule := range o.ACL { - acl[i] = &raw.ObjectAccessControl{ - Entity: string(rule.Entity), - Role: string(rule.Role), - } - } - } - return &raw.Object{ - Bucket: bucket, - Name: o.Name, - ContentType: o.ContentType, - ContentEncoding: o.ContentEncoding, - ContentLanguage: o.ContentLanguage, - CacheControl: o.CacheControl, - ContentDisposition: o.ContentDisposition, - Acl: acl, - Metadata: o.Metadata, - } -} - -// Object represents a Google Cloud Storage (GCS) object. -type Object struct { - // Bucket is the name of the bucket containing this GCS object. - Bucket string - - // Name is the name of the object within the bucket. - Name string - - // ContentType is the MIME type of the object's content. - ContentType string - - // ContentLanguage is the content language of the object's content. - ContentLanguage string - - // CacheControl is the Cache-Control header to be sent in the response - // headers when serving the object data. - CacheControl string - - // ACL is the list of access control rules for the object. - ACL []ACLRule - - // Owner is the owner of the object. - // - // If non-zero, it is in the form of "user-". - Owner string - - // Size is the length of the object's content. - Size int64 - - // ContentEncoding is the encoding of the object's content. - ContentEncoding string - - // MD5 is the MD5 hash of the object's content. - MD5 []byte - - // CRC32C is the CRC32 checksum of the object's content using - // the Castagnoli93 polynomial. - CRC32C uint32 - - // MediaLink is an URL to the object's content. - MediaLink string - - // Metadata represents user-provided metadata, in key/value pairs. - // It can be nil if no metadata is provided. - Metadata map[string]string - - // Generation is the generation number of the object's content. - Generation int64 - - // MetaGeneration is the version of the metadata for this - // object at this generation. This field is used for preconditions - // and for detecting changes in metadata. A metageneration number - // is only meaningful in the context of a particular generation - // of a particular object. - MetaGeneration int64 - - // StorageClass is the storage class of the bucket. - // This value defines how objects in the bucket are stored and - // determines the SLA and the cost of storage. Typical values are - // "STANDARD" and "DURABLE_REDUCED_AVAILABILITY". - // It defaults to "STANDARD". - StorageClass string - - // Deleted is the time the object was deleted. - // If not deleted, it is the zero value. - Deleted time.Time - - // Updated is the creation or modification time of the object. - // For buckets with versioning enabled, changing an object's - // metadata does not change this property. - Updated time.Time -} - -// convertTime converts a time in RFC3339 format to time.Time. -// If any error occurs in parsing, the zero-value time.Time is silently returned. -func convertTime(t string) time.Time { - var r time.Time - if t != "" { - r, _ = time.Parse(time.RFC3339, t) - } - return r -} - -func newObject(o *raw.Object) *Object { - if o == nil { - return nil - } - acl := make([]ACLRule, len(o.Acl)) - for i, rule := range o.Acl { - acl[i] = ACLRule{ - Entity: ACLEntity(rule.Entity), - Role: ACLRole(rule.Role), - } - } - owner := "" - if o.Owner != nil { - owner = o.Owner.Entity - } - md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash) - var crc32c uint32 - d, err := base64.StdEncoding.DecodeString(o.Crc32c) - if err == nil && len(d) == 4 { - crc32c = uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]) - } - return &Object{ - Bucket: o.Bucket, - Name: o.Name, - ContentType: o.ContentType, - ContentLanguage: o.ContentLanguage, - CacheControl: o.CacheControl, - ACL: acl, - Owner: owner, - ContentEncoding: o.ContentEncoding, - Size: int64(o.Size), - MD5: md5, - CRC32C: crc32c, - MediaLink: o.MediaLink, - Metadata: o.Metadata, - Generation: o.Generation, - MetaGeneration: o.Metageneration, - StorageClass: o.StorageClass, - Deleted: convertTime(o.TimeDeleted), - Updated: convertTime(o.Updated), - } -} - -// Query represents a query to filter objects from a bucket. -type Query struct { - // Delimiter returns results in a directory-like fashion. - // Results will contain only objects whose names, aside from the - // prefix, do not contain delimiter. Objects whose names, - // aside from the prefix, contain delimiter will have their name, - // truncated after the delimiter, returned in prefixes. - // Duplicate prefixes are omitted. - // Optional. - Delimiter string - - // Prefix is the prefix filter to query objects - // whose names begin with this prefix. - // Optional. - Prefix string - - // Versions indicates whether multiple versions of the same - // object will be included in the results. - Versions bool - - // Cursor is a previously-returned page token - // representing part of the larger set of results to view. - // Optional. - Cursor string - - // MaxResults is the maximum number of items plus prefixes - // to return. As duplicate prefixes are omitted, - // fewer total results may be returned than requested. - // The default page limit is used if it is negative or zero. - MaxResults int -} - -// Objects represents a list of objects returned from -// a bucket look-p request and a query to retrieve more -// objects from the next pages. -type Objects struct { - // Results represent a list of object results. - Results []*Object - - // Next is the continuation query to retrieve more - // results with the same filtering criteria. If there - // are no more results to retrieve, it is nil. - Next *Query - - // Prefixes represents prefixes of objects - // matching-but-not-listed up to and including - // the requested delimiter. - Prefixes []string -} - -// contentTyper implements ContentTyper to enable an -// io.ReadCloser to specify its MIME type. -type contentTyper struct { - io.Reader - t string -} - -func (c *contentTyper) ContentType() string { - return c.t -} - -// A Writer writes a Cloud Storage object. -type Writer struct { - // ObjectAttrs are optional attributes to set on the object. Any attributes - // must be initialized before the first Write call. Nil or zero-valued - // attributes are ignored. - ObjectAttrs - - ctx context.Context - bucket string - name string - - once sync.Once - - opened bool - r io.Reader - pw *io.PipeWriter - - donec chan struct{} // closed after err and obj are set. - err error - obj *Object -} - -func (w *Writer) open() { - attrs := w.ObjectAttrs - // Always set the name, otherwise the backend - // rejects the request and responds with an HTTP 400. - if attrs.Name == "" { - attrs.Name = w.name - } - pr, pw := io.Pipe() - w.r = &contentTyper{pr, attrs.ContentType} - w.pw = pw - w.opened = true - - go func() { - resp, err := rawService(w.ctx).Objects.Insert( - w.bucket, attrs.toRawObject(w.bucket)).Media(w.r).Projection("full").Context(w.ctx).Do() - w.err = err - if err == nil { - w.obj = newObject(resp) - } else { - pr.CloseWithError(w.err) - } - close(w.donec) - }() -} - -// Write appends to w. -func (w *Writer) Write(p []byte) (n int, err error) { - if w.err != nil { - return 0, w.err - } - if !w.opened { - w.open() - } - return w.pw.Write(p) -} - -// Close completes the write operation and flushes any buffered data. -// If Close doesn't return an error, metadata about the written object -// can be retrieved by calling Object. -func (w *Writer) Close() error { - if !w.opened { - w.open() - } - if err := w.pw.Close(); err != nil { - return err - } - <-w.donec - return w.err -} - -// CloseWithError aborts the write operation with the provided error. -// CloseWithError always returns nil. -func (w *Writer) CloseWithError(err error) error { - if !w.opened { - return nil - } - return w.pw.CloseWithError(err) -} - -// Object returns metadata about a successfully-written object. -// It's only valid to call it after Close returns nil. -func (w *Writer) Object() *Object { - return w.obj -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/.travis.yml b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/.travis.yml deleted file mode 100644 index 055d6641..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -before_install: - - go get github.com/axw/gocov/gocov - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover - -install: - - mkdir -p "$GOPATH/src/google.golang.org" - - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/google.golang.org/grpc" - -script: - - make test testrace diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/CONTRIBUTING.md deleted file mode 100644 index 407d384a..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ /dev/null @@ -1,23 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to grpc! Here is some guideline -and information about how to do so. - -## Getting started - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -### Filing Issues -When filing an issue, make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -### Contributing code -Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/LICENSE b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/LICENSE deleted file mode 100644 index f4988b45..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2014, Google Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/Makefile b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/Makefile deleted file mode 100644 index 12e84e4e..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/Makefile +++ /dev/null @@ -1,50 +0,0 @@ -.PHONY: \ - all \ - deps \ - updatedeps \ - testdeps \ - updatetestdeps \ - build \ - proto \ - test \ - testrace \ - clean \ - -all: test testrace - -deps: - go get -d -v google.golang.org/grpc/... - -updatedeps: - go get -d -v -u -f google.golang.org/grpc/... - -testdeps: - go get -d -v -t google.golang.org/grpc/... - -updatetestdeps: - go get -d -v -t -u -f google.golang.org/grpc/... - -build: deps - go build google.golang.org/grpc/... - -proto: - @ if ! which protoc > /dev/null; then \ - echo "error: protoc not installed" >&2; \ - exit 1; \ - fi - go get -v github.com/golang/protobuf/protoc-gen-go - for file in $$(git ls-files '*.proto'); do \ - protoc -I $$(dirname $$file) --go_out=plugins=grpc:$$(dirname $$file) $$file; \ - done - -test: testdeps - go test -v -cpu 1,4 google.golang.org/grpc/... - -testrace: testdeps - go test -v -race -cpu 1,4 google.golang.org/grpc/... - -clean: - go clean google.golang.org/grpc/... - -coverage: testdeps - ./coverage.sh --coveralls diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/PATENTS b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/PATENTS deleted file mode 100644 index 619f9dbf..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the GRPC project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of GRPC, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of GRPC. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of GRPC or any code incorporated within this -implementation of GRPC constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of GRPC -shall terminate as of the date such litigation is filed. diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/README.md b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/README.md deleted file mode 100644 index 37b05f09..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/README.md +++ /dev/null @@ -1,32 +0,0 @@ -#gRPC-Go - -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) - -The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide. - -Installation ------------- - -To install this package, you need to install Go 1.4 or above and setup your Go workspace on your computer. The simplest way to install the library is to run: - -``` -$ go get google.golang.org/grpc -``` - -Prerequisites -------------- - -This requires Go 1.4 or above. - -Constraints ------------ -The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants. - -Documentation -------------- -See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/). - -Status ------- -Beta release - diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/call.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/call.go deleted file mode 100644 index 504a6e18..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/call.go +++ /dev/null @@ -1,190 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "bytes" - "io" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/trace" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/transport" -) - -// recvResponse receives and parses an RPC response. -// On error, it returns the error and indicates whether the call should be retried. -// -// TODO(zhaoq): Check whether the received message sequence is valid. -func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error { - // Try to acquire header metadata from the server if there is any. - var err error - c.headerMD, err = stream.Header() - if err != nil { - return err - } - p := &parser{r: stream} - for { - if err = recv(p, dopts.codec, stream, dopts.dc, reply); err != nil { - if err == io.EOF { - break - } - return err - } - } - c.trailerMD = stream.Trailer() - return nil -} - -// sendRequest writes out various information of an RPC such as Context and Message. -func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) { - stream, err := t.NewStream(ctx, callHdr) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - if _, ok := err.(transport.ConnectionError); !ok { - t.CloseStream(stream, err) - } - } - }() - var cbuf *bytes.Buffer - if compressor != nil { - cbuf = new(bytes.Buffer) - } - outBuf, err := encode(codec, args, compressor, cbuf) - if err != nil { - return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err) - } - err = t.Write(stream, outBuf, opts) - if err != nil { - return nil, err - } - // Sent successfully. - return stream, nil -} - -// Invoke is called by the generated code. It sends the RPC request on the -// wire and returns after response is received. -func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) { - var c callInfo - for _, o := range opts { - if err := o.before(&c); err != nil { - return toRPCErr(err) - } - } - defer func() { - for _, o := range opts { - o.after(&c) - } - }() - if EnableTracing { - c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) - defer c.traceInfo.tr.Finish() - c.traceInfo.firstLine.client = true - if deadline, ok := ctx.Deadline(); ok { - c.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) - } - c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false) - // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set. - defer func() { - if err != nil { - c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - c.traceInfo.tr.SetError() - } - }() - } - topts := &transport.Options{ - Last: true, - Delay: false, - } - var ( - lastErr error // record the error that happened - ) - for { - var ( - err error - t transport.ClientTransport - stream *transport.Stream - ) - // TODO(zhaoq): Need a formal spec of retry strategy for non-failfast rpcs. - if lastErr != nil && c.failFast { - return toRPCErr(lastErr) - } - callHdr := &transport.CallHdr{ - Host: cc.authority, - Method: method, - } - if cc.dopts.cp != nil { - callHdr.SendCompress = cc.dopts.cp.Type() - } - t, err = cc.dopts.picker.Pick(ctx) - if err != nil { - if lastErr != nil { - // This was a retry; return the error from the last attempt. - return toRPCErr(lastErr) - } - return toRPCErr(err) - } - if c.traceInfo.tr != nil { - c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) - } - stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts) - if err != nil { - if _, ok := err.(transport.ConnectionError); ok { - lastErr = err - continue - } - if lastErr != nil { - return toRPCErr(lastErr) - } - return toRPCErr(err) - } - // Receive the response - lastErr = recvResponse(cc.dopts, t, &c, stream, reply) - if _, ok := lastErr.(transport.ConnectionError); ok { - continue - } - if c.traceInfo.tr != nil { - c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) - } - t.CloseStream(stream, lastErr) - if lastErr != nil { - return toRPCErr(lastErr) - } - return Errorf(stream.StatusCode(), stream.StatusDesc()) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/clientconn.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/clientconn.go deleted file mode 100644 index e2264236..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/clientconn.go +++ /dev/null @@ -1,590 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "errors" - "fmt" - "net" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/trace" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/transport" -) - -var ( - // ErrUnspecTarget indicates that the target address is unspecified. - ErrUnspecTarget = errors.New("grpc: target is unspecified") - // ErrNoTransportSecurity indicates that there is no transport security - // being set for ClientConn. Users should either set one or explicitly - // call WithInsecure DialOption to disable security. - ErrNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") - // ErrCredentialsMisuse indicates that users want to transmit security information - // (e.g., oauth2 token) which requires secure connection on an insecure - // connection. - ErrCredentialsMisuse = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportAuthenticator() to set)") - // ErrClientConnClosing indicates that the operation is illegal because - // the session is closing. - ErrClientConnClosing = errors.New("grpc: the client connection is closing") - // ErrClientConnTimeout indicates that the connection could not be - // established or re-established within the specified timeout. - ErrClientConnTimeout = errors.New("grpc: timed out trying to connect") - // minimum time to give a connection to complete - minConnectTimeout = 20 * time.Second -) - -// dialOptions configure a Dial call. dialOptions are set by the DialOption -// values passed to Dial. -type dialOptions struct { - codec Codec - cp Compressor - dc Decompressor - picker Picker - block bool - insecure bool - copts transport.ConnectOptions -} - -// DialOption configures how we set up the connection. -type DialOption func(*dialOptions) - -// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling. -func WithCodec(c Codec) DialOption { - return func(o *dialOptions) { - o.codec = c - } -} - -// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message -// compressor. -func WithCompressor(cp Compressor) DialOption { - return func(o *dialOptions) { - o.cp = cp - } -} - -// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating -// message decompressor. -func WithDecompressor(dc Decompressor) DialOption { - return func(o *dialOptions) { - o.dc = dc - } -} - -// WithPicker returns a DialOption which sets a picker for connection selection. -func WithPicker(p Picker) DialOption { - return func(o *dialOptions) { - o.picker = p - } -} - -// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying -// connection is up. Without this, Dial returns immediately and connecting the server -// happens in background. -func WithBlock() DialOption { - return func(o *dialOptions) { - o.block = true - } -} - -// WithInsecure returns a DialOption which disables transport security for this ClientConn. -// Note that transport security is required unless WithInsecure is set. -func WithInsecure() DialOption { - return func(o *dialOptions) { - o.insecure = true - } -} - -// WithTransportCredentials returns a DialOption which configures a -// connection level security credentials (e.g., TLS/SSL). -func WithTransportCredentials(creds credentials.TransportAuthenticator) DialOption { - return func(o *dialOptions) { - o.copts.AuthOptions = append(o.copts.AuthOptions, creds) - } -} - -// WithPerRPCCredentials returns a DialOption which sets -// credentials which will place auth state on each outbound RPC. -func WithPerRPCCredentials(creds credentials.Credentials) DialOption { - return func(o *dialOptions) { - o.copts.AuthOptions = append(o.copts.AuthOptions, creds) - } -} - -// WithTimeout returns a DialOption that configures a timeout for dialing a client connection. -func WithTimeout(d time.Duration) DialOption { - return func(o *dialOptions) { - o.copts.Timeout = d - } -} - -// WithDialer returns a DialOption that specifies a function to use for dialing network addresses. -func WithDialer(f func(addr string, timeout time.Duration) (net.Conn, error)) DialOption { - return func(o *dialOptions) { - o.copts.Dialer = f - } -} - -// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs. -func WithUserAgent(s string) DialOption { - return func(o *dialOptions) { - o.copts.UserAgent = s - } -} - -// Dial creates a client connection the given target. -func Dial(target string, opts ...DialOption) (*ClientConn, error) { - cc := &ClientConn{ - target: target, - } - for _, opt := range opts { - opt(&cc.dopts) - } - if cc.dopts.codec == nil { - // Set the default codec. - cc.dopts.codec = protoCodec{} - } - if cc.dopts.picker == nil { - cc.dopts.picker = &unicastPicker{ - target: target, - } - } - if err := cc.dopts.picker.Init(cc); err != nil { - return nil, err - } - colonPos := strings.LastIndex(target, ":") - if colonPos == -1 { - colonPos = len(target) - } - cc.authority = target[:colonPos] - return cc, nil -} - -// ConnectivityState indicates the state of a client connection. -type ConnectivityState int - -const ( - // Idle indicates the ClientConn is idle. - Idle ConnectivityState = iota - // Connecting indicates the ClienConn is connecting. - Connecting - // Ready indicates the ClientConn is ready for work. - Ready - // TransientFailure indicates the ClientConn has seen a failure but expects to recover. - TransientFailure - // Shutdown indicates the ClientConn has started shutting down. - Shutdown -) - -func (s ConnectivityState) String() string { - switch s { - case Idle: - return "IDLE" - case Connecting: - return "CONNECTING" - case Ready: - return "READY" - case TransientFailure: - return "TRANSIENT_FAILURE" - case Shutdown: - return "SHUTDOWN" - default: - panic(fmt.Sprintf("unknown connectivity state: %d", s)) - } -} - -// ClientConn represents a client connection to an RPC service. -type ClientConn struct { - target string - authority string - dopts dialOptions -} - -// State returns the connectivity state of cc. -// This is EXPERIMENTAL API. -func (cc *ClientConn) State() (ConnectivityState, error) { - return cc.dopts.picker.State() -} - -// WaitForStateChange blocks until the state changes to something other than the sourceState. -// It returns the new state or error. -// This is EXPERIMENTAL API. -func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { - return cc.dopts.picker.WaitForStateChange(ctx, sourceState) -} - -// Close starts to tear down the ClientConn. -func (cc *ClientConn) Close() error { - return cc.dopts.picker.Close() -} - -// Conn is a client connection to a single destination. -type Conn struct { - target string - dopts dialOptions - resetChan chan int - shutdownChan chan struct{} - events trace.EventLog - - mu sync.Mutex - state ConnectivityState - stateCV *sync.Cond - // ready is closed and becomes nil when a new transport is up or failed - // due to timeout. - ready chan struct{} - transport transport.ClientTransport -} - -// NewConn creates a Conn. -func NewConn(cc *ClientConn) (*Conn, error) { - if cc.target == "" { - return nil, ErrUnspecTarget - } - c := &Conn{ - target: cc.target, - dopts: cc.dopts, - resetChan: make(chan int, 1), - shutdownChan: make(chan struct{}), - } - if EnableTracing { - c.events = trace.NewEventLog("grpc.ClientConn", c.target) - } - if !c.dopts.insecure { - var ok bool - for _, cd := range c.dopts.copts.AuthOptions { - if _, ok := cd.(credentials.TransportAuthenticator); !ok { - continue - } - ok = true - } - if !ok { - return nil, ErrNoTransportSecurity - } - } else { - for _, cd := range c.dopts.copts.AuthOptions { - if cd.RequireTransportSecurity() { - return nil, ErrCredentialsMisuse - } - } - } - c.stateCV = sync.NewCond(&c.mu) - if c.dopts.block { - if err := c.resetTransport(false); err != nil { - c.Close() - return nil, err - } - // Start to monitor the error status of transport. - go c.transportMonitor() - } else { - // Start a goroutine connecting to the server asynchronously. - go func() { - if err := c.resetTransport(false); err != nil { - grpclog.Printf("Failed to dial %s: %v; please retry.", c.target, err) - c.Close() - return - } - c.transportMonitor() - }() - } - return c, nil -} - -// printf records an event in cc's event log, unless cc has been closed. -// REQUIRES cc.mu is held. -func (cc *Conn) printf(format string, a ...interface{}) { - if cc.events != nil { - cc.events.Printf(format, a...) - } -} - -// errorf records an error in cc's event log, unless cc has been closed. -// REQUIRES cc.mu is held. -func (cc *Conn) errorf(format string, a ...interface{}) { - if cc.events != nil { - cc.events.Errorf(format, a...) - } -} - -// State returns the connectivity state of the Conn -func (cc *Conn) State() ConnectivityState { - cc.mu.Lock() - defer cc.mu.Unlock() - return cc.state -} - -// WaitForStateChange blocks until the state changes to something other than the sourceState. -func (cc *Conn) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { - cc.mu.Lock() - defer cc.mu.Unlock() - if sourceState != cc.state { - return cc.state, nil - } - done := make(chan struct{}) - var err error - go func() { - select { - case <-ctx.Done(): - cc.mu.Lock() - err = ctx.Err() - cc.stateCV.Broadcast() - cc.mu.Unlock() - case <-done: - } - }() - defer close(done) - for sourceState == cc.state { - cc.stateCV.Wait() - if err != nil { - return cc.state, err - } - } - return cc.state, nil -} - -// NotifyReset tries to signal the underlying transport needs to be reset due to -// for example a name resolution change in flight. -func (cc *Conn) NotifyReset() { - select { - case cc.resetChan <- 0: - default: - } -} - -func (cc *Conn) resetTransport(closeTransport bool) error { - var retries int - start := time.Now() - for { - cc.mu.Lock() - cc.printf("connecting") - if cc.state == Shutdown { - // cc.Close() has been invoked. - cc.mu.Unlock() - return ErrClientConnClosing - } - cc.state = Connecting - cc.stateCV.Broadcast() - cc.mu.Unlock() - if closeTransport { - cc.transport.Close() - } - // Adjust timeout for the current try. - copts := cc.dopts.copts - if copts.Timeout < 0 { - cc.Close() - return ErrClientConnTimeout - } - if copts.Timeout > 0 { - copts.Timeout -= time.Since(start) - if copts.Timeout <= 0 { - cc.Close() - return ErrClientConnTimeout - } - } - sleepTime := backoff(retries) - timeout := sleepTime - if timeout < minConnectTimeout { - timeout = minConnectTimeout - } - if copts.Timeout == 0 || copts.Timeout > timeout { - copts.Timeout = timeout - } - connectTime := time.Now() - addr, err := cc.dopts.picker.PickAddr() - var newTransport transport.ClientTransport - if err == nil { - newTransport, err = transport.NewClientTransport(addr, &copts) - } - if err != nil { - cc.mu.Lock() - if cc.state == Shutdown { - // cc.Close() has been invoked. - cc.mu.Unlock() - return ErrClientConnClosing - } - cc.errorf("transient failure: %v", err) - cc.state = TransientFailure - cc.stateCV.Broadcast() - if cc.ready != nil { - close(cc.ready) - cc.ready = nil - } - cc.mu.Unlock() - sleepTime -= time.Since(connectTime) - if sleepTime < 0 { - sleepTime = 0 - } - // Fail early before falling into sleep. - if cc.dopts.copts.Timeout > 0 && cc.dopts.copts.Timeout < sleepTime+time.Since(start) { - cc.mu.Lock() - cc.errorf("connection timeout") - cc.mu.Unlock() - cc.Close() - return ErrClientConnTimeout - } - closeTransport = false - time.Sleep(sleepTime) - retries++ - grpclog.Printf("grpc: Conn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, cc.target) - continue - } - cc.mu.Lock() - cc.printf("ready") - if cc.state == Shutdown { - // cc.Close() has been invoked. - cc.mu.Unlock() - newTransport.Close() - return ErrClientConnClosing - } - cc.state = Ready - cc.stateCV.Broadcast() - cc.transport = newTransport - if cc.ready != nil { - close(cc.ready) - cc.ready = nil - } - cc.mu.Unlock() - return nil - } -} - -func (cc *Conn) reconnect() bool { - cc.mu.Lock() - if cc.state == Shutdown { - // cc.Close() has been invoked. - cc.mu.Unlock() - return false - } - cc.state = TransientFailure - cc.stateCV.Broadcast() - cc.mu.Unlock() - if err := cc.resetTransport(true); err != nil { - // The ClientConn is closing. - cc.mu.Lock() - cc.printf("transport exiting: %v", err) - cc.mu.Unlock() - grpclog.Printf("grpc: Conn.transportMonitor exits due to: %v", err) - return false - } - return true -} - -// Run in a goroutine to track the error in transport and create the -// new transport if an error happens. It returns when the channel is closing. -func (cc *Conn) transportMonitor() { - for { - select { - // shutdownChan is needed to detect the teardown when - // the ClientConn is idle (i.e., no RPC in flight). - case <-cc.shutdownChan: - return - case <-cc.resetChan: - if !cc.reconnect() { - return - } - case <-cc.transport.Error(): - if !cc.reconnect() { - return - } - // Tries to drain reset signal if there is any since it is out-dated. - select { - case <-cc.resetChan: - default: - } - } - } -} - -// Wait blocks until i) the new transport is up or ii) ctx is done or iii) cc is closed. -func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) { - for { - cc.mu.Lock() - switch { - case cc.state == Shutdown: - cc.mu.Unlock() - return nil, ErrClientConnClosing - case cc.state == Ready: - ct := cc.transport - cc.mu.Unlock() - return ct, nil - default: - ready := cc.ready - if ready == nil { - ready = make(chan struct{}) - cc.ready = ready - } - cc.mu.Unlock() - select { - case <-ctx.Done(): - return nil, transport.ContextErr(ctx.Err()) - // Wait until the new transport is ready or failed. - case <-ready: - } - } - } -} - -// Close starts to tear down the Conn. Returns ErrClientConnClosing if -// it has been closed (mostly due to dial time-out). -// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in -// some edge cases (e.g., the caller opens and closes many ClientConn's in a -// tight loop. -func (cc *Conn) Close() error { - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.state == Shutdown { - return ErrClientConnClosing - } - cc.state = Shutdown - cc.stateCV.Broadcast() - if cc.events != nil { - cc.events.Finish() - cc.events = nil - } - if cc.ready != nil { - close(cc.ready) - cc.ready = nil - } - if cc.transport != nil { - cc.transport.Close() - } - if cc.shutdownChan != nil { - close(cc.shutdownChan) - } - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codegen.sh b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codegen.sh deleted file mode 100644 index b0094888..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codegen.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -# This script serves as an example to demonstrate how to generate the gRPC-Go -# interface and the related messages from .proto file. -# -# It assumes the installation of i) Google proto buffer compiler at -# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen -# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have -# not, please install them first. -# -# We recommend running this script at $GOPATH/src. -# -# If this is not what you need, feel free to make your own scripts. Again, this -# script is for demonstration purpose. -# -proto=$1 -protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codes/code_string.go deleted file mode 100644 index e6762d08..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codes/code_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// generated by stringer -type=Code; DO NOT EDIT - -package codes - -import "fmt" - -const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated" - -var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192} - -func (i Code) String() string { - if i+1 >= Code(len(_Code_index)) { - return fmt.Sprintf("Code(%d)", i) - } - return _Code_name[_Code_index[i]:_Code_index[i+1]] -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codes/codes.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codes/codes.go deleted file mode 100644 index 37c5b860..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/codes/codes.go +++ /dev/null @@ -1,159 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -// Package codes defines the canonical error codes used by gRPC. It is -// consistent across various languages. -package codes - -// A Code is an unsigned 32-bit error code as defined in the gRPC spec. -type Code uint32 - -//go:generate stringer -type=Code - -const ( - // OK is returned on success. - OK Code = 0 - - // Canceled indicates the operation was cancelled (typically by the caller). - Canceled Code = 1 - - // Unknown error. An example of where this error may be returned is - // if a Status value received from another address space belongs to - // an error-space that is not known in this address space. Also - // errors raised by APIs that do not return enough error information - // may be converted to this error. - Unknown Code = 2 - - // InvalidArgument indicates client specified an invalid argument. - // Note that this differs from FailedPrecondition. It indicates arguments - // that are problematic regardless of the state of the system - // (e.g., a malformed file name). - InvalidArgument Code = 3 - - // DeadlineExceeded means operation expired before completion. - // For operations that change the state of the system, this error may be - // returned even if the operation has completed successfully. For - // example, a successful response from a server could have been delayed - // long enough for the deadline to expire. - DeadlineExceeded Code = 4 - - // NotFound means some requested entity (e.g., file or directory) was - // not found. - NotFound Code = 5 - - // AlreadyExists means an attempt to create an entity failed because one - // already exists. - AlreadyExists Code = 6 - - // PermissionDenied indicates the caller does not have permission to - // execute the specified operation. It must not be used for rejections - // caused by exhausting some resource (use ResourceExhausted - // instead for those errors). It must not be - // used if the caller cannot be identified (use Unauthenticated - // instead for those errors). - PermissionDenied Code = 7 - - // Unauthenticated indicates the request does not have valid - // authentication credentials for the operation. - Unauthenticated Code = 16 - - // ResourceExhausted indicates some resource has been exhausted, perhaps - // a per-user quota, or perhaps the entire file system is out of space. - ResourceExhausted Code = 8 - - // FailedPrecondition indicates operation was rejected because the - // system is not in a state required for the operation's execution. - // For example, directory to be deleted may be non-empty, an rmdir - // operation is applied to a non-directory, etc. - // - // A litmus test that may help a service implementor in deciding - // between FailedPrecondition, Aborted, and Unavailable: - // (a) Use Unavailable if the client can retry just the failing call. - // (b) Use Aborted if the client should retry at a higher-level - // (e.g., restarting a read-modify-write sequence). - // (c) Use FailedPrecondition if the client should not retry until - // the system state has been explicitly fixed. E.g., if an "rmdir" - // fails because the directory is non-empty, FailedPrecondition - // should be returned since the client should not retry unless - // they have first fixed up the directory by deleting files from it. - // (d) Use FailedPrecondition if the client performs conditional - // REST Get/Update/Delete on a resource and the resource on the - // server does not match the condition. E.g., conflicting - // read-modify-write on the same resource. - FailedPrecondition Code = 9 - - // Aborted indicates the operation was aborted, typically due to a - // concurrency issue like sequencer check failures, transaction aborts, - // etc. - // - // See litmus test above for deciding between FailedPrecondition, - // Aborted, and Unavailable. - Aborted Code = 10 - - // OutOfRange means operation was attempted past the valid range. - // E.g., seeking or reading past end of file. - // - // Unlike InvalidArgument, this error indicates a problem that may - // be fixed if the system state changes. For example, a 32-bit file - // system will generate InvalidArgument if asked to read at an - // offset that is not in the range [0,2^32-1], but it will generate - // OutOfRange if asked to read from an offset past the current - // file size. - // - // There is a fair bit of overlap between FailedPrecondition and - // OutOfRange. We recommend using OutOfRange (the more specific - // error) when it applies so that callers who are iterating through - // a space can easily look for an OutOfRange error to detect when - // they are done. - OutOfRange Code = 11 - - // Unimplemented indicates operation is not implemented or not - // supported/enabled in this service. - Unimplemented Code = 12 - - // Internal errors. Means some invariants expected by underlying - // system has been broken. If you see one of these errors, - // something is very broken. - Internal Code = 13 - - // Unavailable indicates the service is currently unavailable. - // This is a most likely a transient condition and may be corrected - // by retrying with a backoff. - // - // See litmus test above for deciding between FailedPrecondition, - // Aborted, and Unavailable. - Unavailable Code = 14 - - // DataLoss indicates unrecoverable data loss or corruption. - DataLoss Code = 15 -) diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/coverage.sh b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/coverage.sh deleted file mode 100644 index 12023537..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/coverage.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash - -set -e - -workdir=.cover -profile="$workdir/cover.out" -mode=set -end2endtest="google.golang.org/grpc/test" - -generate_cover_data() { - rm -rf "$workdir" - mkdir "$workdir" - - for pkg in "$@"; do - if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ] - then - f="$workdir/$(echo $pkg | tr / -)" - go test -covermode="$mode" -coverprofile="$f.cover" "$pkg" - go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest" - fi - done - - echo "mode: $mode" >"$profile" - grep -h -v "^mode:" "$workdir"/*.cover >>"$profile" -} - -show_cover_report() { - go tool cover -${1}="$profile" -} - -push_to_coveralls() { - goveralls -coverprofile="$profile" -} - -generate_cover_data $(go list ./...) -show_cover_report func -case "$1" in -"") - ;; ---html) - show_cover_report html ;; ---coveralls) - push_to_coveralls ;; -*) - echo >&2 "error: invalid option: $1" ;; -esac -rm -rf "$workdir" diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/credentials/credentials.go deleted file mode 100644 index 0b0b89b6..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/credentials/credentials.go +++ /dev/null @@ -1,226 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -// Package credentials implements various credentials supported by gRPC library, -// which encapsulate all the state needed by a client to authenticate with a -// server and make various assertions, e.g., about the client's identity, role, -// or whether it is authorized to make a particular call. -package credentials - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net" - "strings" - "time" - - "golang.org/x/net/context" -) - -var ( - // alpnProtoStr are the specified application level protocols for gRPC. - alpnProtoStr = []string{"h2"} -) - -// Credentials defines the common interface all supported credentials must -// implement. -type Credentials interface { - // GetRequestMetadata gets the current request metadata, refreshing - // tokens if required. This should be called by the transport layer on - // each request, and the data should be populated in headers or other - // context. uri is the URI of the entry point for the request. When - // supported by the underlying implementation, ctx can be used for - // timeout and cancellation. - // TODO(zhaoq): Define the set of the qualified keys instead of leaving - // it as an arbitrary string. - GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) - // RequireTransportSecurity indicates whether the credentails requires - // transport security. - RequireTransportSecurity() bool -} - -// ProtocolInfo provides information regarding the gRPC wire protocol version, -// security protocol, security protocol version in use, etc. -type ProtocolInfo struct { - // ProtocolVersion is the gRPC wire protocol version. - ProtocolVersion string - // SecurityProtocol is the security protocol in use. - SecurityProtocol string - // SecurityVersion is the security protocol version. - SecurityVersion string -} - -// AuthInfo defines the common interface for the auth information the users are interested in. -type AuthInfo interface { - AuthType() string -} - -// TransportAuthenticator defines the common interface for all the live gRPC wire -// protocols and supported transport security protocols (e.g., TLS, SSL). -type TransportAuthenticator interface { - // ClientHandshake does the authentication handshake specified by the corresponding - // authentication protocol on rawConn for clients. It returns the authenticated - // connection and the corresponding auth information about the connection. - ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, AuthInfo, error) - // ServerHandshake does the authentication handshake for servers. It returns - // the authenticated connection and the corresponding auth information about - // the connection. - ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) - // Info provides the ProtocolInfo of this TransportAuthenticator. - Info() ProtocolInfo - Credentials -} - -// TLSInfo contains the auth information for a TLS authenticated connection. -// It implements the AuthInfo interface. -type TLSInfo struct { - State tls.ConnectionState -} - -func (t TLSInfo) AuthType() string { - return "tls" -} - -// tlsCreds is the credentials required for authenticating a connection using TLS. -type tlsCreds struct { - // TLS configuration - config tls.Config -} - -func (c tlsCreds) Info() ProtocolInfo { - return ProtocolInfo{ - SecurityProtocol: "tls", - SecurityVersion: "1.2", - } -} - -// GetRequestMetadata returns nil, nil since TLS credentials does not have -// metadata. -func (c *tlsCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - return nil, nil -} - -func (c *tlsCreds) RequireTransportSecurity() bool { - return true -} - -type timeoutError struct{} - -func (timeoutError) Error() string { return "credentials: Dial timed out" } -func (timeoutError) Timeout() bool { return true } -func (timeoutError) Temporary() bool { return true } - -func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (_ net.Conn, _ AuthInfo, err error) { - // borrow some code from tls.DialWithDialer - var errChannel chan error - if timeout != 0 { - errChannel = make(chan error, 2) - time.AfterFunc(timeout, func() { - errChannel <- timeoutError{} - }) - } - if c.config.ServerName == "" { - colonPos := strings.LastIndex(addr, ":") - if colonPos == -1 { - colonPos = len(addr) - } - c.config.ServerName = addr[:colonPos] - } - conn := tls.Client(rawConn, &c.config) - if timeout == 0 { - err = conn.Handshake() - } else { - go func() { - errChannel <- conn.Handshake() - }() - err = <-errChannel - } - if err != nil { - rawConn.Close() - return nil, nil, err - } - // TODO(zhaoq): Omit the auth info for client now. It is more for - // information than anything else. - return conn, nil, nil -} - -func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { - conn := tls.Server(rawConn, &c.config) - if err := conn.Handshake(); err != nil { - rawConn.Close() - return nil, nil, err - } - return conn, TLSInfo{conn.ConnectionState()}, nil -} - -// NewTLS uses c to construct a TransportAuthenticator based on TLS. -func NewTLS(c *tls.Config) TransportAuthenticator { - tc := &tlsCreds{*c} - tc.config.NextProtos = alpnProtoStr - return tc -} - -// NewClientTLSFromCert constructs a TLS from the input certificate for client. -func NewClientTLSFromCert(cp *x509.CertPool, serverName string) TransportAuthenticator { - return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}) -} - -// NewClientTLSFromFile constructs a TLS from the input certificate file for client. -func NewClientTLSFromFile(certFile, serverName string) (TransportAuthenticator, error) { - b, err := ioutil.ReadFile(certFile) - if err != nil { - return nil, err - } - cp := x509.NewCertPool() - if !cp.AppendCertsFromPEM(b) { - return nil, fmt.Errorf("credentials: failed to append certificates") - } - return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}), nil -} - -// NewServerTLSFromCert constructs a TLS from the input certificate for server. -func NewServerTLSFromCert(cert *tls.Certificate) TransportAuthenticator { - return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) -} - -// NewServerTLSFromFile constructs a TLS from the input certificate file and key -// file for server. -func NewServerTLSFromFile(certFile, keyFile string) (TransportAuthenticator, error) { - cert, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return nil, err - } - return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/doc.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/doc.go deleted file mode 100644 index b4c0e740..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -/* -Package grpc implements an RPC system called gRPC. - -See www.grpc.io for more information about gRPC. -*/ -package grpc diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/grpclog/logger.go deleted file mode 100644 index 2cc09be4..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/grpclog/logger.go +++ /dev/null @@ -1,93 +0,0 @@ -/* - * - * Copyright 2015, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -/* -Package grpclog defines logging for grpc. -*/ -package grpclog - -import ( - "log" - "os" -) - -// Use golang's standard logger by default. -// Access is not mutex-protected: do not modify except in init() -// functions. -var logger Logger = log.New(os.Stderr, "", log.LstdFlags) - -// Logger mimics golang's standard Logger as an interface. -type Logger interface { - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatalln(args ...interface{}) - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) -} - -// SetLogger sets the logger that is used in grpc. Call only from -// init() functions. -func SetLogger(l Logger) { - logger = l -} - -// Fatal is equivalent to Print() followed by a call to os.Exit() with a non-zero exit code. -func Fatal(args ...interface{}) { - logger.Fatal(args...) -} - -// Fatalf is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code. -func Fatalf(format string, args ...interface{}) { - logger.Fatalf(format, args...) -} - -// Fatalln is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code. -func Fatalln(args ...interface{}) { - logger.Fatalln(args...) -} - -// Print prints to the logger. Arguments are handled in the manner of fmt.Print. -func Print(args ...interface{}) { - logger.Print(args...) -} - -// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. -func Printf(format string, args ...interface{}) { - logger.Printf(format, args...) -} - -// Println prints to the logger. Arguments are handled in the manner of fmt.Println. -func Println(args ...interface{}) { - logger.Println(args...) -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/internal/internal.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/internal/internal.go deleted file mode 100644 index 5489143a..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/internal/internal.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -// Package internal contains gRPC-internal code for testing, to avoid polluting -// the godoc of the top-level grpc package. -package internal - -// TestingCloseConns closes all existing transports but keeps -// grpcServer.lis accepting new connections. -// -// The provided grpcServer must be of type *grpc.Server. It is untyped -// for circular dependency reasons. -var TestingCloseConns func(grpcServer interface{}) - -// TestingUseHandlerImpl enables the http.Handler-based server implementation. -// It must be called before Serve and requires TLS credentials. -// -// The provided grpcServer must be of type *grpc.Server. It is untyped -// for circular dependency reasons. -var TestingUseHandlerImpl func(grpcServer interface{}) diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/metadata/metadata.go deleted file mode 100644 index 58469ddd..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/metadata/metadata.go +++ /dev/null @@ -1,134 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -// Package metadata define the structure of the metadata supported by gRPC library. -package metadata - -import ( - "encoding/base64" - "fmt" - "strings" - - "golang.org/x/net/context" -) - -const ( - binHdrSuffix = "-bin" -) - -// encodeKeyValue encodes key and value qualified for transmission via gRPC. -// Transmitting binary headers violates HTTP/2 spec. -// TODO(zhaoq): Maybe check if k is ASCII also. -func encodeKeyValue(k, v string) (string, string) { - k = strings.ToLower(k) - if strings.HasSuffix(k, binHdrSuffix) { - val := base64.StdEncoding.EncodeToString([]byte(v)) - v = string(val) - } - return k, v -} - -// DecodeKeyValue returns the original key and value corresponding to the -// encoded data in k, v. -func DecodeKeyValue(k, v string) (string, string, error) { - if !strings.HasSuffix(k, binHdrSuffix) { - return k, v, nil - } - val, err := base64.StdEncoding.DecodeString(v) - if err != nil { - return "", "", err - } - return k, string(val), nil -} - -// MD is a mapping from metadata keys to values. Users should use the following -// two convenience functions New and Pairs to generate MD. -type MD map[string][]string - -// New creates a MD from given key-value map. -func New(m map[string]string) MD { - md := MD{} - for k, v := range m { - key, val := encodeKeyValue(k, v) - md[key] = append(md[key], val) - } - return md -} - -// Pairs returns an MD formed by the mapping of key, value ... -// Pairs panics if len(kv) is odd. -func Pairs(kv ...string) MD { - if len(kv)%2 == 1 { - panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) - } - md := MD{} - var k string - for i, s := range kv { - if i%2 == 0 { - k = s - continue - } - key, val := encodeKeyValue(k, s) - md[key] = append(md[key], val) - } - return md -} - -// Len returns the number of items in md. -func (md MD) Len() int { - return len(md) -} - -// Copy returns a copy of md. -func (md MD) Copy() MD { - out := MD{} - for k, v := range md { - for _, i := range v { - out[k] = append(out[k], i) - } - } - return out -} - -type mdKey struct{} - -// NewContext creates a new context with md attached. -func NewContext(ctx context.Context, md MD) context.Context { - return context.WithValue(ctx, mdKey{}, md) -} - -// FromContext returns the MD in ctx if it exists. -func FromContext(ctx context.Context) (md MD, ok bool) { - md, ok = ctx.Value(mdKey{}).(MD) - return -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/naming/naming.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/naming/naming.go deleted file mode 100644 index 06605607..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/naming/naming.go +++ /dev/null @@ -1,73 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -// Package naming defines the naming API and related data structures for gRPC. -// The interface is EXPERIMENTAL and may be suject to change. -package naming - -// Operation defines the corresponding operations for a name resolution change. -type Operation uint8 - -const ( - // Add indicates a new address is added. - Add Operation = iota - // Delete indicates an exisiting address is deleted. - Delete -) - -// Update defines a name resolution update. Notice that it is not valid having both -// empty string Addr and nil Metadata in an Update. -type Update struct { - // Op indicates the operation of the update. - Op Operation - // Addr is the updated address. It is empty string if there is no address update. - Addr string - // Metadata is the updated metadata. It is nil if there is no metadata update. - // Metadata is not required for a custom naming implementation. - Metadata interface{} -} - -// Resolver creates a Watcher for a target to track its resolution changes. -type Resolver interface { - // Resolve creates a Watcher for target. - Resolve(target string) (Watcher, error) -} - -// Watcher watches for the updates on the specified target. -type Watcher interface { - // Next blocks until an update or error happens. It may return one or more - // updates. The first call should get the full set of the results. - Next() ([]*Update, error) - // Close closes the Watcher. - Close() -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/peer/peer.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/peer/peer.go deleted file mode 100644 index bfa6205b..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/peer/peer.go +++ /dev/null @@ -1,65 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -// Package peer defines various peer information associated with RPCs and -// corresponding utils. -package peer - -import ( - "net" - - "golang.org/x/net/context" - "google.golang.org/grpc/credentials" -) - -// Peer contains the information of the peer for an RPC. -type Peer struct { - // Addr is the peer address. - Addr net.Addr - // AuthInfo is the authentication information of the transport. - // It is nil if there is no transport security being used. - AuthInfo credentials.AuthInfo -} - -type peerKey struct{} - -// NewContext creates a new context with peer information attached. -func NewContext(ctx context.Context, p *Peer) context.Context { - return context.WithValue(ctx, peerKey{}, p) -} - -// FromContext returns the peer information in ctx if it exists. -func FromContext(ctx context.Context) (p *Peer, ok bool) { - p, ok = ctx.Value(peerKey{}).(*Peer) - return -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/picker.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/picker.go deleted file mode 100644 index 50f315b4..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/picker.go +++ /dev/null @@ -1,243 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "container/list" - "fmt" - "sync" - - "golang.org/x/net/context" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/naming" - "google.golang.org/grpc/transport" -) - -// Picker picks a Conn for RPC requests. -// This is EXPERIMENTAL and please do not implement your own Picker for now. -type Picker interface { - // Init does initial processing for the Picker, e.g., initiate some connections. - Init(cc *ClientConn) error - // Pick blocks until either a transport.ClientTransport is ready for the upcoming RPC - // or some error happens. - Pick(ctx context.Context) (transport.ClientTransport, error) - // PickAddr picks a peer address for connecting. This will be called repeated for - // connecting/reconnecting. - PickAddr() (string, error) - // State returns the connectivity state of the underlying connections. - State() (ConnectivityState, error) - // WaitForStateChange blocks until the state changes to something other than - // the sourceState. It returns the new state or error. - WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) - // Close closes all the Conn's owned by this Picker. - Close() error -} - -// unicastPicker is the default Picker which is used when there is no custom Picker -// specified by users. It always picks the same Conn. -type unicastPicker struct { - target string - conn *Conn -} - -func (p *unicastPicker) Init(cc *ClientConn) error { - c, err := NewConn(cc) - if err != nil { - return err - } - p.conn = c - return nil -} - -func (p *unicastPicker) Pick(ctx context.Context) (transport.ClientTransport, error) { - return p.conn.Wait(ctx) -} - -func (p *unicastPicker) PickAddr() (string, error) { - return p.target, nil -} - -func (p *unicastPicker) State() (ConnectivityState, error) { - return p.conn.State(), nil -} - -func (p *unicastPicker) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { - return p.conn.WaitForStateChange(ctx, sourceState) -} - -func (p *unicastPicker) Close() error { - if p.conn != nil { - return p.conn.Close() - } - return nil -} - -// unicastNamingPicker picks an address from a name resolver to set up the connection. -type unicastNamingPicker struct { - cc *ClientConn - resolver naming.Resolver - watcher naming.Watcher - mu sync.Mutex - // The list of the addresses are obtained from watcher. - addrs *list.List - // It tracks the current picked addr by PickAddr(). The next PickAddr may - // push it forward on addrs. - pickedAddr *list.Element - conn *Conn -} - -// NewUnicastNamingPicker creates a Picker to pick addresses from a name resolver -// to connect. -func NewUnicastNamingPicker(r naming.Resolver) Picker { - return &unicastNamingPicker{ - resolver: r, - addrs: list.New(), - } -} - -type addrInfo struct { - addr string - // Set to true if this addrInfo needs to be deleted in the next PickAddrr() call. - deleting bool -} - -// processUpdates calls Watcher.Next() once and processes the obtained updates. -func (p *unicastNamingPicker) processUpdates() error { - updates, err := p.watcher.Next() - if err != nil { - return err - } - for _, update := range updates { - switch update.Op { - case naming.Add: - p.mu.Lock() - p.addrs.PushBack(&addrInfo{ - addr: update.Addr, - }) - p.mu.Unlock() - // Initial connection setup - if p.conn == nil { - conn, err := NewConn(p.cc) - if err != nil { - return err - } - p.conn = conn - } - case naming.Delete: - p.mu.Lock() - for e := p.addrs.Front(); e != nil; e = e.Next() { - if update.Addr == e.Value.(*addrInfo).addr { - if e == p.pickedAddr { - // Do not remove the element now if it is the current picked - // one. We leave the deletion to the next PickAddr() call. - e.Value.(*addrInfo).deleting = true - // Notify Conn to close it. All the live RPCs on this connection - // will be aborted. - p.conn.NotifyReset() - } else { - p.addrs.Remove(e) - } - } - } - p.mu.Unlock() - default: - grpclog.Println("Unknown update.Op ", update.Op) - } - } - return nil -} - -// monitor runs in a standalone goroutine to keep watching name resolution updates until the watcher -// is closed. -func (p *unicastNamingPicker) monitor() { - for { - if err := p.processUpdates(); err != nil { - return - } - } -} - -func (p *unicastNamingPicker) Init(cc *ClientConn) error { - w, err := p.resolver.Resolve(cc.target) - if err != nil { - return err - } - p.watcher = w - p.cc = cc - // Get the initial name resolution. - if err := p.processUpdates(); err != nil { - return err - } - go p.monitor() - return nil -} - -func (p *unicastNamingPicker) Pick(ctx context.Context) (transport.ClientTransport, error) { - return p.conn.Wait(ctx) -} - -func (p *unicastNamingPicker) PickAddr() (string, error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.pickedAddr == nil { - p.pickedAddr = p.addrs.Front() - } else { - pa := p.pickedAddr - p.pickedAddr = pa.Next() - if pa.Value.(*addrInfo).deleting { - p.addrs.Remove(pa) - } - if p.pickedAddr == nil { - p.pickedAddr = p.addrs.Front() - } - } - if p.pickedAddr == nil { - return "", fmt.Errorf("there is no address available to pick") - } - return p.pickedAddr.Value.(*addrInfo).addr, nil -} - -func (p *unicastNamingPicker) State() (ConnectivityState, error) { - return 0, fmt.Errorf("State() is not supported for unicastNamingPicker") -} - -func (p *unicastNamingPicker) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { - return 0, fmt.Errorf("WaitForStateChange is not supported for unicastNamingPciker") -} - -func (p *unicastNamingPicker) Close() error { - p.watcher.Close() - p.conn.Close() - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/rpc_util.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/rpc_util.go deleted file mode 100644 index 96c790be..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/rpc_util.go +++ /dev/null @@ -1,452 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "bytes" - "compress/gzip" - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "math" - "math/rand" - "os" - "time" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/transport" -) - -// Codec defines the interface gRPC uses to encode and decode messages. -type Codec interface { - // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) - // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error - // String returns the name of the Codec implementation. The returned - // string will be used as part of content type in transmission. - String() string -} - -// protoCodec is a Codec implemetation with protobuf. It is the default codec for gRPC. -type protoCodec struct{} - -func (protoCodec) Marshal(v interface{}) ([]byte, error) { - return proto.Marshal(v.(proto.Message)) -} - -func (protoCodec) Unmarshal(data []byte, v interface{}) error { - return proto.Unmarshal(data, v.(proto.Message)) -} - -func (protoCodec) String() string { - return "proto" -} - -// Compressor defines the interface gRPC uses to compress a message. -type Compressor interface { - // Do compresses p into w. - Do(w io.Writer, p []byte) error - // Type returns the compression algorithm the Compressor uses. - Type() string -} - -// NewGZIPCompressor creates a Compressor based on GZIP. -func NewGZIPCompressor() Compressor { - return &gzipCompressor{} -} - -type gzipCompressor struct { -} - -func (c *gzipCompressor) Do(w io.Writer, p []byte) error { - z := gzip.NewWriter(w) - if _, err := z.Write(p); err != nil { - return err - } - return z.Close() -} - -func (c *gzipCompressor) Type() string { - return "gzip" -} - -// Decompressor defines the interface gRPC uses to decompress a message. -type Decompressor interface { - // Do reads the data from r and uncompress them. - Do(r io.Reader) ([]byte, error) - // Type returns the compression algorithm the Decompressor uses. - Type() string -} - -type gzipDecompressor struct { -} - -// NewGZIPDecompressor creates a Decompressor based on GZIP. -func NewGZIPDecompressor() Decompressor { - return &gzipDecompressor{} -} - -func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { - z, err := gzip.NewReader(r) - if err != nil { - return nil, err - } - defer z.Close() - return ioutil.ReadAll(z) -} - -func (d *gzipDecompressor) Type() string { - return "gzip" -} - -// callInfo contains all related configuration and information about an RPC. -type callInfo struct { - failFast bool - headerMD metadata.MD - trailerMD metadata.MD - traceInfo traceInfo // in trace.go -} - -// CallOption configures a Call before it starts or extracts information from -// a Call after it completes. -type CallOption interface { - // before is called before the call is sent to any server. If before - // returns a non-nil error, the RPC fails with that error. - before(*callInfo) error - - // after is called after the call has completed. after cannot return an - // error, so any failures should be reported via output parameters. - after(*callInfo) -} - -type beforeCall func(c *callInfo) error - -func (o beforeCall) before(c *callInfo) error { return o(c) } -func (o beforeCall) after(c *callInfo) {} - -type afterCall func(c *callInfo) - -func (o afterCall) before(c *callInfo) error { return nil } -func (o afterCall) after(c *callInfo) { o(c) } - -// Header returns a CallOptions that retrieves the header metadata -// for a unary RPC. -func Header(md *metadata.MD) CallOption { - return afterCall(func(c *callInfo) { - *md = c.headerMD - }) -} - -// Trailer returns a CallOptions that retrieves the trailer metadata -// for a unary RPC. -func Trailer(md *metadata.MD) CallOption { - return afterCall(func(c *callInfo) { - *md = c.trailerMD - }) -} - -// The format of the payload: compressed or not? -type payloadFormat uint8 - -const ( - compressionNone payloadFormat = iota // no compression - compressionMade -) - -// parser reads complelete gRPC messages from the underlying reader. -type parser struct { - // r is the underlying reader. - // See the comment on recvMsg for the permissible - // error types. - r io.Reader - - // The header of a gRPC message. Find more detail - // at http://www.grpc.io/docs/guides/wire.html. - header [5]byte -} - -// recvMsg reads a complete gRPC message from the stream. -// -// It returns the message and its payload (compression/encoding) -// format. The caller owns the returned msg memory. -// -// If there is an error, possible values are: -// * io.EOF, when no messages remain -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * of type transport.StreamError -// No other error values or types must be returned, which also means -// that the underlying io.Reader must not return an incompatible -// error. -func (p *parser) recvMsg() (pf payloadFormat, msg []byte, err error) { - if _, err := io.ReadFull(p.r, p.header[:]); err != nil { - return 0, nil, err - } - - pf = payloadFormat(p.header[0]) - length := binary.BigEndian.Uint32(p.header[1:]) - - if length == 0 { - return pf, nil, nil - } - // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead - // of making it for each message: - msg = make([]byte, int(length)) - if _, err := io.ReadFull(p.r, msg); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return 0, nil, err - } - return pf, msg, nil -} - -// encode serializes msg and prepends the message header. If msg is nil, it -// generates the message header of 0 message length. -func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer) ([]byte, error) { - var b []byte - var length uint - if msg != nil { - var err error - // TODO(zhaoq): optimize to reduce memory alloc and copying. - b, err = c.Marshal(msg) - if err != nil { - return nil, err - } - if cp != nil { - if err := cp.Do(cbuf, b); err != nil { - return nil, err - } - b = cbuf.Bytes() - } - length = uint(len(b)) - } - if length > math.MaxUint32 { - return nil, Errorf(codes.InvalidArgument, "grpc: message too large (%d bytes)", length) - } - - const ( - payloadLen = 1 - sizeLen = 4 - ) - - var buf = make([]byte, payloadLen+sizeLen+len(b)) - - // Write payload format - if cp == nil { - buf[0] = byte(compressionNone) - } else { - buf[0] = byte(compressionMade) - } - // Write length of b into buf - binary.BigEndian.PutUint32(buf[1:], uint32(length)) - // Copy encoded msg to buf - copy(buf[5:], b) - - return buf, nil -} - -func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error { - switch pf { - case compressionNone: - case compressionMade: - if recvCompress == "" { - return transport.StreamErrorf(codes.InvalidArgument, "grpc: invalid grpc-encoding %q with compression enabled", recvCompress) - } - if dc == nil || recvCompress != dc.Type() { - return transport.StreamErrorf(codes.InvalidArgument, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) - } - default: - return transport.StreamErrorf(codes.InvalidArgument, "grpc: received unexpected payload format %d", pf) - } - return nil -} - -func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}) error { - pf, d, err := p.recvMsg() - if err != nil { - return err - } - if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil { - return err - } - if pf == compressionMade { - d, err = dc.Do(bytes.NewReader(d)) - if err != nil { - return transport.StreamErrorf(codes.Internal, "grpc: failed to decompress the received message %v", err) - } - } - if err := c.Unmarshal(d, m); err != nil { - return transport.StreamErrorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) - } - return nil -} - -// rpcError defines the status from an RPC. -type rpcError struct { - code codes.Code - desc string -} - -func (e rpcError) Error() string { - return fmt.Sprintf("rpc error: code = %d desc = %q", e.code, e.desc) -} - -// Code returns the error code for err if it was produced by the rpc system. -// Otherwise, it returns codes.Unknown. -func Code(err error) codes.Code { - if err == nil { - return codes.OK - } - if e, ok := err.(rpcError); ok { - return e.code - } - return codes.Unknown -} - -// ErrorDesc returns the error description of err if it was produced by the rpc system. -// Otherwise, it returns err.Error() or empty string when err is nil. -func ErrorDesc(err error) string { - if err == nil { - return "" - } - if e, ok := err.(rpcError); ok { - return e.desc - } - return err.Error() -} - -// Errorf returns an error containing an error code and a description; -// Errorf returns nil if c is OK. -func Errorf(c codes.Code, format string, a ...interface{}) error { - if c == codes.OK { - return nil - } - return rpcError{ - code: c, - desc: fmt.Sprintf(format, a...), - } -} - -// toRPCErr converts an error into a rpcError. -func toRPCErr(err error) error { - switch e := err.(type) { - case rpcError: - return err - case transport.StreamError: - return rpcError{ - code: e.Code, - desc: e.Desc, - } - case transport.ConnectionError: - return rpcError{ - code: codes.Internal, - desc: e.Desc, - } - } - return Errorf(codes.Unknown, "%v", err) -} - -// convertCode converts a standard Go error into its canonical code. Note that -// this is only used to translate the error returned by the server applications. -func convertCode(err error) codes.Code { - switch err { - case nil: - return codes.OK - case io.EOF: - return codes.OutOfRange - case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: - return codes.FailedPrecondition - case os.ErrInvalid: - return codes.InvalidArgument - case context.Canceled: - return codes.Canceled - case context.DeadlineExceeded: - return codes.DeadlineExceeded - } - switch { - case os.IsExist(err): - return codes.AlreadyExists - case os.IsNotExist(err): - return codes.NotFound - case os.IsPermission(err): - return codes.PermissionDenied - } - return codes.Unknown -} - -const ( - // how long to wait after the first failure before retrying - baseDelay = 1.0 * time.Second - // upper bound of backoff delay - maxDelay = 120 * time.Second - // backoff increases by this factor on each retry - backoffFactor = 1.6 - // backoff is randomized downwards by this factor - backoffJitter = 0.2 -) - -func backoff(retries int) (t time.Duration) { - if retries == 0 { - return baseDelay - } - backoff, max := float64(baseDelay), float64(maxDelay) - for backoff < max && retries > 0 { - backoff *= backoffFactor - retries-- - } - if backoff > max { - backoff = max - } - // Randomize backoff delays so that if a cluster of requests start at - // the same time, they won't operate in lockstep. - backoff *= 1 + backoffJitter*(rand.Float64()*2-1) - if backoff < 0 { - return 0 - } - return time.Duration(backoff) -} - -// SupportPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the grpc package. -// -// This constant may be renamed in the future if a change in the generated code -// requires a synchronised update of grpc-go and protoc-gen-go. This constant -// should not be referenced from any other code. -const SupportPackageIsVersion1 = true diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/server.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/server.go deleted file mode 100644 index bdf68a0f..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/server.go +++ /dev/null @@ -1,746 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "net/http" - "reflect" - "runtime" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/http2" - "golang.org/x/net/trace" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/transport" -) - -type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) - -// MethodDesc represents an RPC service's method specification. -type MethodDesc struct { - MethodName string - Handler methodHandler -} - -// ServiceDesc represents an RPC service's specification. -type ServiceDesc struct { - ServiceName string - // The pointer to the service interface. Used to check whether the user - // provided implementation satisfies the interface requirements. - HandlerType interface{} - Methods []MethodDesc - Streams []StreamDesc -} - -// service consists of the information of the server serving this service and -// the methods in this service. -type service struct { - server interface{} // the server for service methods - md map[string]*MethodDesc - sd map[string]*StreamDesc -} - -// Server is a gRPC server to serve RPC requests. -type Server struct { - opts options - - mu sync.Mutex // guards following - lis map[net.Listener]bool - conns map[io.Closer]bool - m map[string]*service // service name -> service info - events trace.EventLog -} - -type options struct { - creds credentials.Credentials - codec Codec - cp Compressor - dc Decompressor - maxConcurrentStreams uint32 - useHandlerImpl bool // use http.Handler-based server -} - -// A ServerOption sets options. -type ServerOption func(*options) - -// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. -func CustomCodec(codec Codec) ServerOption { - return func(o *options) { - o.codec = codec - } -} - -func RPCCompressor(cp Compressor) ServerOption { - return func(o *options) { - o.cp = cp - } -} - -func RPCDecompressor(dc Decompressor) ServerOption { - return func(o *options) { - o.dc = dc - } -} - -// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number -// of concurrent streams to each ServerTransport. -func MaxConcurrentStreams(n uint32) ServerOption { - return func(o *options) { - o.maxConcurrentStreams = n - } -} - -// Creds returns a ServerOption that sets credentials for server connections. -func Creds(c credentials.Credentials) ServerOption { - return func(o *options) { - o.creds = c - } -} - -// NewServer creates a gRPC server which has no service registered and has not -// started to accept requests yet. -func NewServer(opt ...ServerOption) *Server { - var opts options - for _, o := range opt { - o(&opts) - } - if opts.codec == nil { - // Set the default codec. - opts.codec = protoCodec{} - } - s := &Server{ - lis: make(map[net.Listener]bool), - opts: opts, - conns: make(map[io.Closer]bool), - m: make(map[string]*service), - } - if EnableTracing { - _, file, line, _ := runtime.Caller(1) - s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) - } - return s -} - -// printf records an event in s's event log, unless s has been stopped. -// REQUIRES s.mu is held. -func (s *Server) printf(format string, a ...interface{}) { - if s.events != nil { - s.events.Printf(format, a...) - } -} - -// errorf records an error in s's event log, unless s has been stopped. -// REQUIRES s.mu is held. -func (s *Server) errorf(format string, a ...interface{}) { - if s.events != nil { - s.events.Errorf(format, a...) - } -} - -// RegisterService register a service and its implementation to the gRPC -// server. Called from the IDL generated code. This must be called before -// invoking Serve. -func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { - ht := reflect.TypeOf(sd.HandlerType).Elem() - st := reflect.TypeOf(ss) - if !st.Implements(ht) { - grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) - } - s.register(sd, ss) -} - -func (s *Server) register(sd *ServiceDesc, ss interface{}) { - s.mu.Lock() - defer s.mu.Unlock() - s.printf("RegisterService(%q)", sd.ServiceName) - if _, ok := s.m[sd.ServiceName]; ok { - grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) - } - srv := &service{ - server: ss, - md: make(map[string]*MethodDesc), - sd: make(map[string]*StreamDesc), - } - for i := range sd.Methods { - d := &sd.Methods[i] - srv.md[d.MethodName] = d - } - for i := range sd.Streams { - d := &sd.Streams[i] - srv.sd[d.StreamName] = d - } - s.m[sd.ServiceName] = srv -} - -var ( - // ErrServerStopped indicates that the operation is now illegal because of - // the server being stopped. - ErrServerStopped = errors.New("grpc: the server has been stopped") -) - -func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - creds, ok := s.opts.creds.(credentials.TransportAuthenticator) - if !ok { - return rawConn, nil, nil - } - return creds.ServerHandshake(rawConn) -} - -// Serve accepts incoming connections on the listener lis, creating a new -// ServerTransport and service goroutine for each. The service goroutines -// read gRPC requests and then call the registered handlers to reply to them. -// Service returns when lis.Accept fails. -func (s *Server) Serve(lis net.Listener) error { - s.mu.Lock() - s.printf("serving") - if s.lis == nil { - s.mu.Unlock() - return ErrServerStopped - } - s.lis[lis] = true - s.mu.Unlock() - defer func() { - lis.Close() - s.mu.Lock() - delete(s.lis, lis) - s.mu.Unlock() - }() - for { - rawConn, err := lis.Accept() - if err != nil { - s.mu.Lock() - s.printf("done serving; Accept = %v", err) - s.mu.Unlock() - return err - } - // Start a new goroutine to deal with rawConn - // so we don't stall this Accept loop goroutine. - go s.handleRawConn(rawConn) - } -} - -// handleRawConn is run in its own goroutine and handles a just-accepted -// connection that has not had any I/O performed on it yet. -func (s *Server) handleRawConn(rawConn net.Conn) { - conn, authInfo, err := s.useTransportAuthenticator(rawConn) - if err != nil { - s.mu.Lock() - s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) - s.mu.Unlock() - grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) - rawConn.Close() - return - } - - s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - conn.Close() - return - } - s.mu.Unlock() - - if s.opts.useHandlerImpl { - s.serveUsingHandler(conn) - } else { - s.serveNewHTTP2Transport(conn, authInfo) - } -} - -// serveNewHTTP2Transport sets up a new http/2 transport (using the -// gRPC http2 server transport in transport/http2_server.go) and -// serves streams on it. -// This is run in its own goroutine (it does network I/O in -// transport.NewServerTransport). -func (s *Server) serveNewHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) { - st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo) - if err != nil { - s.mu.Lock() - s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) - s.mu.Unlock() - c.Close() - grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err) - return - } - if !s.addConn(st) { - st.Close() - return - } - s.serveStreams(st) -} - -func (s *Server) serveStreams(st transport.ServerTransport) { - defer s.removeConn(st) - defer st.Close() - var wg sync.WaitGroup - st.HandleStreams(func(stream *transport.Stream) { - wg.Add(1) - go func() { - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) - }() - }) - wg.Wait() -} - -var _ http.Handler = (*Server)(nil) - -// serveUsingHandler is called from handleRawConn when s is configured -// to handle requests via the http.Handler interface. It sets up a -// net/http.Server to handle the just-accepted conn. The http.Server -// is configured to route all incoming requests (all HTTP/2 streams) -// to ServeHTTP, which creates a new ServerTransport for each stream. -// serveUsingHandler blocks until conn closes. -// -// This codepath is only used when Server.TestingUseHandlerImpl has -// been configured. This lets the end2end tests exercise the ServeHTTP -// method as one of the environment types. -// -// conn is the *tls.Conn that's already been authenticated. -func (s *Server) serveUsingHandler(conn net.Conn) { - if !s.addConn(conn) { - conn.Close() - return - } - defer s.removeConn(conn) - h2s := &http2.Server{ - MaxConcurrentStreams: s.opts.maxConcurrentStreams, - } - h2s.ServeConn(conn, &http2.ServeConnOpts{ - Handler: s, - }) -} - -func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - if !s.addConn(st) { - st.Close() - return - } - defer s.removeConn(st) - s.serveStreams(st) -} - -// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. -// If tracing is not enabled, it returns nil. -func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { - if !EnableTracing { - return nil - } - trInfo = &traceInfo{ - tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()), - } - trInfo.firstLine.client = false - trInfo.firstLine.remoteAddr = st.RemoteAddr() - stream.TraceContext(trInfo.tr) - if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = dl.Sub(time.Now()) - } - return trInfo -} - -func (s *Server) addConn(c io.Closer) bool { - s.mu.Lock() - defer s.mu.Unlock() - if s.conns == nil { - return false - } - s.conns[c] = true - return true -} - -func (s *Server) removeConn(c io.Closer) { - s.mu.Lock() - defer s.mu.Unlock() - if s.conns != nil { - delete(s.conns, c) - } -} - -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error { - var cbuf *bytes.Buffer - if cp != nil { - cbuf = new(bytes.Buffer) - } - p, err := encode(s.opts.codec, msg, cp, cbuf) - if err != nil { - // This typically indicates a fatal issue (e.g., memory - // corruption or hardware faults) the application program - // cannot handle. - // - // TODO(zhaoq): There exist other options also such as only closing the - // faulty stream locally and remotely (Other streams can keep going). Find - // the optimal option. - grpclog.Fatalf("grpc: Server failed to encode response %v", err) - } - return t.Write(stream, p, opts) -} - -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { - if trInfo != nil { - defer trInfo.tr.Finish() - trInfo.firstLine.client = false - trInfo.tr.LazyLog(&trInfo.firstLine, false) - defer func() { - if err != nil && err != io.EOF { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() - } - }() - } - p := &parser{r: stream} - for { - pf, req, err := p.recvMsg() - if err == io.EOF { - // The entire stream is done (for unary RPC only). - return err - } - if err == io.ErrUnexpectedEOF { - err = transport.StreamError{Code: codes.Internal, Desc: "io.ErrUnexpectedEOF"} - } - if err != nil { - switch err := err.(type) { - case transport.ConnectionError: - // Nothing to do here. - case transport.StreamError: - if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) - } - default: - panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err)) - } - return err - } - - if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { - switch err := err.(type) { - case transport.StreamError: - if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) - } - default: - if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) - } - - } - return err - } - statusCode := codes.OK - statusDesc := "" - df := func(v interface{}) error { - if pf == compressionMade { - var err error - req, err = s.opts.dc.Do(bytes.NewReader(req)) - if err != nil { - if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) - } - return err - } - } - if err := s.opts.codec.Unmarshal(req, v); err != nil { - return err - } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) - } - return nil - } - reply, appErr := md.Handler(srv.server, stream.Context(), df) - if appErr != nil { - if err, ok := appErr.(rpcError); ok { - statusCode = err.code - statusDesc = err.desc - } else { - statusCode = convertCode(appErr) - statusDesc = appErr.Error() - } - if trInfo != nil && statusCode != codes.OK { - trInfo.tr.LazyLog(stringer(statusDesc), true) - trInfo.tr.SetError() - } - if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) - return err - } - return nil - } - if trInfo != nil { - trInfo.tr.LazyLog(stringer("OK"), false) - } - opts := &transport.Options{ - Last: true, - Delay: false, - } - if s.opts.cp != nil { - stream.SetSendCompress(s.opts.cp.Type()) - } - if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { - switch err := err.(type) { - case transport.ConnectionError: - // Nothing to do here. - case transport.StreamError: - statusCode = err.Code - statusDesc = err.Desc - default: - statusCode = codes.Unknown - statusDesc = err.Error() - } - return err - } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) - } - return t.WriteStatus(stream, statusCode, statusDesc) - } -} - -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { - if s.opts.cp != nil { - stream.SetSendCompress(s.opts.cp.Type()) - } - ss := &serverStream{ - t: t, - s: stream, - p: &parser{r: stream}, - codec: s.opts.codec, - cp: s.opts.cp, - dc: s.opts.dc, - trInfo: trInfo, - } - if ss.cp != nil { - ss.cbuf = new(bytes.Buffer) - } - if trInfo != nil { - trInfo.tr.LazyLog(&trInfo.firstLine, false) - defer func() { - ss.mu.Lock() - if err != nil && err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - ss.trInfo.tr.SetError() - } - ss.trInfo.tr.Finish() - ss.trInfo.tr = nil - ss.mu.Unlock() - }() - } - if appErr := sd.Handler(srv.server, ss); appErr != nil { - if err, ok := appErr.(rpcError); ok { - ss.statusCode = err.code - ss.statusDesc = err.desc - } else if err, ok := appErr.(transport.StreamError); ok { - ss.statusCode = err.Code - ss.statusDesc = err.Desc - } else { - ss.statusCode = convertCode(appErr) - ss.statusDesc = appErr.Error() - } - } - if trInfo != nil { - ss.mu.Lock() - if ss.statusCode != codes.OK { - ss.trInfo.tr.LazyLog(stringer(ss.statusDesc), true) - ss.trInfo.tr.SetError() - } else { - ss.trInfo.tr.LazyLog(stringer("OK"), false) - } - ss.mu.Unlock() - } - return t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc) - -} - -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { - sm := stream.Method() - if sm != "" && sm[0] == '/' { - sm = sm[1:] - } - pos := strings.LastIndex(sm, "/") - if pos == -1 { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) - trInfo.tr.SetError() - } - if err := t.WriteStatus(stream, codes.InvalidArgument, fmt.Sprintf("malformed method name: %q", stream.Method())); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() - } - grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) - } - if trInfo != nil { - trInfo.tr.Finish() - } - return - } - service := sm[:pos] - method := sm[pos+1:] - srv, ok := s.m[service] - if !ok { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) - trInfo.tr.SetError() - } - if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown service %v", service)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() - } - grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) - } - if trInfo != nil { - trInfo.tr.Finish() - } - return - } - // Unary RPC or Streaming RPC? - if md, ok := srv.md[method]; ok { - s.processUnaryRPC(t, stream, srv, md, trInfo) - return - } - if sd, ok := srv.sd[method]; ok { - s.processStreamingRPC(t, stream, srv, sd, trInfo) - return - } - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true) - trInfo.tr.SetError() - } - if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown method %v", method)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() - } - grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) - } - if trInfo != nil { - trInfo.tr.Finish() - } -} - -// Stop stops the gRPC server. It immediately closes all open -// connections and listeners. -// It cancels all active RPCs on the server side and the corresponding -// pending RPCs on the client side will get notified by connection -// errors. -func (s *Server) Stop() { - s.mu.Lock() - listeners := s.lis - s.lis = nil - cs := s.conns - s.conns = nil - s.mu.Unlock() - - for lis := range listeners { - lis.Close() - } - for c := range cs { - c.Close() - } - - s.mu.Lock() - if s.events != nil { - s.events.Finish() - s.events = nil - } - s.mu.Unlock() -} - -func init() { - internal.TestingCloseConns = func(arg interface{}) { - arg.(*Server).testingCloseConns() - } - internal.TestingUseHandlerImpl = func(arg interface{}) { - arg.(*Server).opts.useHandlerImpl = true - } -} - -// testingCloseConns closes all existing transports but keeps s.lis -// accepting new connections. -func (s *Server) testingCloseConns() { - s.mu.Lock() - for c := range s.conns { - c.Close() - delete(s.conns, c) - } - s.mu.Unlock() -} - -// SendHeader sends header metadata. It may be called at most once from a unary -// RPC handler. The ctx is the RPC handler's Context or one derived from it. -func SendHeader(ctx context.Context, md metadata.MD) error { - if md.Len() == 0 { - return nil - } - stream, ok := transport.StreamFromContext(ctx) - if !ok { - return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx) - } - t := stream.ServerTransport() - if t == nil { - grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream) - } - return t.WriteHeader(stream, md) -} - -// SetTrailer sets the trailer metadata that will be sent when an RPC returns. -// It may be called at most once from a unary RPC handler. The ctx is the RPC -// handler's Context or one derived from it. -func SetTrailer(ctx context.Context, md metadata.MD) error { - if md.Len() == 0 { - return nil - } - stream, ok := transport.StreamFromContext(ctx) - if !ok { - return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx) - } - return stream.SetTrailer(md) -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/stream.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/stream.go deleted file mode 100644 index dba7f6c4..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/stream.go +++ /dev/null @@ -1,411 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "bytes" - "errors" - "io" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/trace" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/transport" -) - -type streamHandler func(srv interface{}, stream ServerStream) error - -// StreamDesc represents a streaming RPC service's method specification. -type StreamDesc struct { - StreamName string - Handler streamHandler - - // At least one of these is true. - ServerStreams bool - ClientStreams bool -} - -// Stream defines the common interface a client or server stream has to satisfy. -type Stream interface { - // Context returns the context for this stream. - Context() context.Context - // SendMsg blocks until it sends m, the stream is done or the stream - // breaks. - // On error, it aborts the stream and returns an RPC status on client - // side. On server side, it simply returns the error to the caller. - // SendMsg is called by generated code. - SendMsg(m interface{}) error - // RecvMsg blocks until it receives a message or the stream is - // done. On client side, it returns io.EOF when the stream is done. On - // any other error, it aborts the stream and returns an RPC status. On - // server side, it simply returns the error to the caller. - RecvMsg(m interface{}) error -} - -// ClientStream defines the interface a client stream has to satify. -type ClientStream interface { - // Header returns the header metedata received from the server if there - // is any. It blocks if the metadata is not ready to read. - Header() (metadata.MD, error) - // Trailer returns the trailer metadata from the server. It must be called - // after stream.Recv() returns non-nil error (including io.EOF) for - // bi-directional streaming and server streaming or stream.CloseAndRecv() - // returns for client streaming in order to receive trailer metadata if - // present. Otherwise, it could returns an empty MD even though trailer - // is present. - Trailer() metadata.MD - // CloseSend closes the send direction of the stream. It closes the stream - // when non-nil error is met. - CloseSend() error - Stream -} - -// NewClientStream creates a new Stream for the client side. This is called -// by generated code. -func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { - var ( - t transport.ClientTransport - err error - ) - t, err = cc.dopts.picker.Pick(ctx) - if err != nil { - return nil, toRPCErr(err) - } - // TODO(zhaoq): CallOption is omitted. Add support when it is needed. - callHdr := &transport.CallHdr{ - Host: cc.authority, - Method: method, - Flush: desc.ServerStreams && desc.ClientStreams, - } - if cc.dopts.cp != nil { - callHdr.SendCompress = cc.dopts.cp.Type() - } - cs := &clientStream{ - desc: desc, - codec: cc.dopts.codec, - cp: cc.dopts.cp, - dc: cc.dopts.dc, - tracing: EnableTracing, - } - if cc.dopts.cp != nil { - callHdr.SendCompress = cc.dopts.cp.Type() - cs.cbuf = new(bytes.Buffer) - } - if cs.tracing { - cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) - cs.trInfo.firstLine.client = true - if deadline, ok := ctx.Deadline(); ok { - cs.trInfo.firstLine.deadline = deadline.Sub(time.Now()) - } - cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false) - ctx = trace.NewContext(ctx, cs.trInfo.tr) - } - s, err := t.NewStream(ctx, callHdr) - if err != nil { - cs.finish(err) - return nil, toRPCErr(err) - } - cs.t = t - cs.s = s - cs.p = &parser{r: s} - // Listen on ctx.Done() to detect cancellation when there is no pending - // I/O operations on this stream. - go func() { - select { - case <-t.Error(): - // Incur transport error, simply exit. - case <-s.Context().Done(): - err := s.Context().Err() - cs.finish(err) - cs.closeTransportStream(transport.ContextErr(err)) - } - }() - return cs, nil -} - -// clientStream implements a client side Stream. -type clientStream struct { - t transport.ClientTransport - s *transport.Stream - p *parser - desc *StreamDesc - codec Codec - cp Compressor - cbuf *bytes.Buffer - dc Decompressor - - tracing bool // set to EnableTracing when the clientStream is created. - - mu sync.Mutex - closed bool - // trInfo.tr is set when the clientStream is created (if EnableTracing is true), - // and is set to nil when the clientStream's finish method is called. - trInfo traceInfo -} - -func (cs *clientStream) Context() context.Context { - return cs.s.Context() -} - -func (cs *clientStream) Header() (metadata.MD, error) { - m, err := cs.s.Header() - if err != nil { - if _, ok := err.(transport.ConnectionError); !ok { - cs.closeTransportStream(err) - } - } - return m, err -} - -func (cs *clientStream) Trailer() metadata.MD { - return cs.s.Trailer() -} - -func (cs *clientStream) SendMsg(m interface{}) (err error) { - if cs.tracing { - cs.mu.Lock() - if cs.trInfo.tr != nil { - cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) - } - cs.mu.Unlock() - } - defer func() { - if err != nil { - cs.finish(err) - } - if err == nil || err == io.EOF { - return - } - if _, ok := err.(transport.ConnectionError); !ok { - cs.closeTransportStream(err) - } - err = toRPCErr(err) - }() - out, err := encode(cs.codec, m, cs.cp, cs.cbuf) - defer func() { - if cs.cbuf != nil { - cs.cbuf.Reset() - } - }() - if err != nil { - return transport.StreamErrorf(codes.Internal, "grpc: %v", err) - } - return cs.t.Write(cs.s, out, &transport.Options{Last: false}) -} - -func (cs *clientStream) RecvMsg(m interface{}) (err error) { - err = recv(cs.p, cs.codec, cs.s, cs.dc, m) - defer func() { - // err != nil indicates the termination of the stream. - if err != nil { - cs.finish(err) - } - }() - if err == nil { - if cs.tracing { - cs.mu.Lock() - if cs.trInfo.tr != nil { - cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) - } - cs.mu.Unlock() - } - if !cs.desc.ClientStreams || cs.desc.ServerStreams { - return - } - // Special handling for client streaming rpc. - err = recv(cs.p, cs.codec, cs.s, cs.dc, m) - cs.closeTransportStream(err) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { - if cs.s.StatusCode() == codes.OK { - cs.finish(err) - return nil - } - return Errorf(cs.s.StatusCode(), cs.s.StatusDesc()) - } - return toRPCErr(err) - } - if _, ok := err.(transport.ConnectionError); !ok { - cs.closeTransportStream(err) - } - if err == io.EOF { - if cs.s.StatusCode() == codes.OK { - // Returns io.EOF to indicate the end of the stream. - return - } - return Errorf(cs.s.StatusCode(), cs.s.StatusDesc()) - } - return toRPCErr(err) -} - -func (cs *clientStream) CloseSend() (err error) { - err = cs.t.Write(cs.s, nil, &transport.Options{Last: true}) - defer func() { - if err != nil { - cs.finish(err) - } - }() - if err == nil || err == io.EOF { - return - } - if _, ok := err.(transport.ConnectionError); !ok { - cs.closeTransportStream(err) - } - err = toRPCErr(err) - return -} - -func (cs *clientStream) closeTransportStream(err error) { - cs.mu.Lock() - if cs.closed { - cs.mu.Unlock() - return - } - cs.closed = true - cs.mu.Unlock() - cs.t.CloseStream(cs.s, err) -} - -func (cs *clientStream) finish(err error) { - if !cs.tracing { - return - } - cs.mu.Lock() - defer cs.mu.Unlock() - if cs.trInfo.tr != nil { - if err == nil || err == io.EOF { - cs.trInfo.tr.LazyPrintf("RPC: [OK]") - } else { - cs.trInfo.tr.LazyPrintf("RPC: [%v]", err) - cs.trInfo.tr.SetError() - } - cs.trInfo.tr.Finish() - cs.trInfo.tr = nil - } -} - -// ServerStream defines the interface a server stream has to satisfy. -type ServerStream interface { - // SendHeader sends the header metadata. It should not be called - // after SendProto. It fails if called multiple times or if - // called after SendProto. - SendHeader(metadata.MD) error - // SetTrailer sets the trailer metadata which will be sent with the - // RPC status. - SetTrailer(metadata.MD) - Stream -} - -// serverStream implements a server side Stream. -type serverStream struct { - t transport.ServerTransport - s *transport.Stream - p *parser - codec Codec - cp Compressor - dc Decompressor - cbuf *bytes.Buffer - statusCode codes.Code - statusDesc string - trInfo *traceInfo - - mu sync.Mutex // protects trInfo.tr after the service handler runs. -} - -func (ss *serverStream) Context() context.Context { - return ss.s.Context() -} - -func (ss *serverStream) SendHeader(md metadata.MD) error { - return ss.t.WriteHeader(ss.s, md) -} - -func (ss *serverStream) SetTrailer(md metadata.MD) { - if md.Len() == 0 { - return - } - ss.s.SetTrailer(md) - return -} - -func (ss *serverStream) SendMsg(m interface{}) (err error) { - defer func() { - if ss.trInfo != nil { - ss.mu.Lock() - if ss.trInfo.tr != nil { - if err == nil { - ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) - } else { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - ss.trInfo.tr.SetError() - } - } - ss.mu.Unlock() - } - }() - out, err := encode(ss.codec, m, ss.cp, ss.cbuf) - defer func() { - if ss.cbuf != nil { - ss.cbuf.Reset() - } - }() - if err != nil { - err = transport.StreamErrorf(codes.Internal, "grpc: %v", err) - return err - } - return ss.t.Write(ss.s, out, &transport.Options{Last: false}) -} - -func (ss *serverStream) RecvMsg(m interface{}) (err error) { - defer func() { - if ss.trInfo != nil { - ss.mu.Lock() - if ss.trInfo.tr != nil { - if err == nil { - ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) - } else if err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - ss.trInfo.tr.SetError() - } - } - ss.mu.Unlock() - } - }() - return recv(ss.p, ss.codec, ss.s, ss.dc, m) -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/trace.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/trace.go deleted file mode 100644 index cde04fbf..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/trace.go +++ /dev/null @@ -1,120 +0,0 @@ -/* - * - * Copyright 2015, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "bytes" - "fmt" - "io" - "net" - "strings" - "time" - - "golang.org/x/net/trace" -) - -// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. -// This should only be set before any RPCs are sent or received by this program. -var EnableTracing = true - -// methodFamily returns the trace family for the given method. -// It turns "/pkg.Service/GetFoo" into "pkg.Service". -func methodFamily(m string) string { - m = strings.TrimPrefix(m, "/") // remove leading slash - if i := strings.Index(m, "/"); i >= 0 { - m = m[:i] // remove everything from second slash - } - if i := strings.LastIndex(m, "."); i >= 0 { - m = m[i+1:] // cut down to last dotted component - } - return m -} - -// traceInfo contains tracing information for an RPC. -type traceInfo struct { - tr trace.Trace - firstLine firstLine -} - -// firstLine is the first line of an RPC trace. -type firstLine struct { - client bool // whether this is a client (outgoing) RPC - remoteAddr net.Addr - deadline time.Duration // may be zero -} - -func (f *firstLine) String() string { - var line bytes.Buffer - io.WriteString(&line, "RPC: ") - if f.client { - io.WriteString(&line, "to") - } else { - io.WriteString(&line, "from") - } - fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) - if f.deadline != 0 { - fmt.Fprint(&line, f.deadline) - } else { - io.WriteString(&line, "none") - } - return line.String() -} - -// payload represents an RPC request or response payload. -type payload struct { - sent bool // whether this is an outgoing payload - msg interface{} // e.g. a proto.Message - // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? -} - -func (p payload) String() string { - if p.sent { - return fmt.Sprintf("sent: %v", p.msg) - } else { - return fmt.Sprintf("recv: %v", p.msg) - } -} - -type fmtStringer struct { - format string - a []interface{} -} - -func (f *fmtStringer) String() string { - return fmt.Sprintf(f.format, f.a...) -} - -type stringer string - -func (s stringer) String() string { return string(s) } diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/control.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/control.go deleted file mode 100644 index f6b38a5a..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/control.go +++ /dev/null @@ -1,260 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "fmt" - "sync" - - "golang.org/x/net/http2" -) - -const ( - // The default value of flow control window size in HTTP2 spec. - defaultWindowSize = 65535 - // The initial window size for flow control. - initialWindowSize = defaultWindowSize // for an RPC - initialConnWindowSize = defaultWindowSize * 16 // for a connection -) - -// The following defines various control items which could flow through -// the control buffer of transport. They represent different aspects of -// control tasks, e.g., flow control, settings, streaming resetting, etc. -type windowUpdate struct { - streamID uint32 - increment uint32 -} - -func (windowUpdate) isItem() bool { - return true -} - -type settings struct { - ack bool - ss []http2.Setting -} - -func (settings) isItem() bool { - return true -} - -type resetStream struct { - streamID uint32 - code http2.ErrCode -} - -func (resetStream) isItem() bool { - return true -} - -type flushIO struct { -} - -func (flushIO) isItem() bool { - return true -} - -type ping struct { - ack bool - data [8]byte -} - -func (ping) isItem() bool { - return true -} - -// quotaPool is a pool which accumulates the quota and sends it to acquire() -// when it is available. -type quotaPool struct { - c chan int - - mu sync.Mutex - quota int -} - -// newQuotaPool creates a quotaPool which has quota q available to consume. -func newQuotaPool(q int) *quotaPool { - qb := "aPool{ - c: make(chan int, 1), - } - if q > 0 { - qb.c <- q - } else { - qb.quota = q - } - return qb -} - -// add adds n to the available quota and tries to send it on acquire. -func (qb *quotaPool) add(n int) { - qb.mu.Lock() - defer qb.mu.Unlock() - qb.quota += n - if qb.quota <= 0 { - return - } - select { - case qb.c <- qb.quota: - qb.quota = 0 - default: - } -} - -// cancel cancels the pending quota sent on acquire, if any. -func (qb *quotaPool) cancel() { - qb.mu.Lock() - defer qb.mu.Unlock() - select { - case n := <-qb.c: - qb.quota += n - default: - } -} - -// reset cancels the pending quota sent on acquired, incremented by v and sends -// it back on acquire. -func (qb *quotaPool) reset(v int) { - qb.mu.Lock() - defer qb.mu.Unlock() - select { - case n := <-qb.c: - qb.quota += n - default: - } - qb.quota += v - if qb.quota <= 0 { - return - } - select { - case qb.c <- qb.quota: - qb.quota = 0 - default: - } -} - -// acquire returns the channel on which available quota amounts are sent. -func (qb *quotaPool) acquire() <-chan int { - return qb.c -} - -// inFlow deals with inbound flow control -type inFlow struct { - // The inbound flow control limit for pending data. - limit uint32 - // conn points to the shared connection-level inFlow that is shared - // by all streams on that conn. It is nil for the inFlow on the conn - // directly. - conn *inFlow - - mu sync.Mutex - // pendingData is the overall data which have been received but not been - // consumed by applications. - pendingData uint32 - // The amount of data the application has consumed but grpc has not sent - // window update for them. Used to reduce window update frequency. - pendingUpdate uint32 -} - -// onData is invoked when some data frame is received. It increments not only its -// own pendingData but also that of the associated connection-level flow. -func (f *inFlow) onData(n uint32) error { - if n == 0 { - return nil - } - f.mu.Lock() - defer f.mu.Unlock() - if f.pendingData+f.pendingUpdate+n > f.limit { - return fmt.Errorf("recieved %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate+n, f.limit) - } - if f.conn != nil { - if err := f.conn.onData(n); err != nil { - return ConnectionErrorf("%v", err) - } - } - f.pendingData += n - return nil -} - -// connOnRead updates the connection level states when the application consumes data. -func (f *inFlow) connOnRead(n uint32) uint32 { - if n == 0 || f.conn != nil { - return 0 - } - f.mu.Lock() - defer f.mu.Unlock() - f.pendingData -= n - f.pendingUpdate += n - if f.pendingUpdate >= f.limit/4 { - ret := f.pendingUpdate - f.pendingUpdate = 0 - return ret - } - return 0 -} - -// onRead is invoked when the application reads the data. It returns the window updates -// for both stream and connection level. -func (f *inFlow) onRead(n uint32) (swu, cwu uint32) { - if n == 0 { - return - } - f.mu.Lock() - defer f.mu.Unlock() - if f.pendingData == 0 { - // pendingData has been adjusted by restoreConn. - return - } - f.pendingData -= n - f.pendingUpdate += n - if f.pendingUpdate >= f.limit/4 { - swu = f.pendingUpdate - f.pendingUpdate = 0 - } - cwu = f.conn.connOnRead(n) - return -} - -// restoreConn is invoked when a stream is terminated. It removes its stake in -// the connection-level flow and resets its own state. -func (f *inFlow) restoreConn() uint32 { - if f.conn == nil { - return 0 - } - f.mu.Lock() - defer f.mu.Unlock() - n := f.pendingData - f.pendingData = 0 - f.pendingUpdate = 0 - return f.conn.connOnRead(n) -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/handler_server.go deleted file mode 100644 index d7e18a0b..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/handler_server.go +++ /dev/null @@ -1,377 +0,0 @@ -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -// This file is the implementation of a gRPC server using HTTP/2 which -// uses the standard Go http2 Server implementation (via the -// http.Handler interface), rather than speaking low-level HTTP/2 -// frames itself. It is the implementation of *grpc.Server.ServeHTTP. - -package transport - -import ( - "errors" - "fmt" - "io" - "net" - "net/http" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/http2" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" -) - -// NewServerHandlerTransport returns a ServerTransport handling gRPC -// from inside an http.Handler. It requires that the http Server -// supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) { - if r.ProtoMajor != 2 { - return nil, errors.New("gRPC requires HTTP/2") - } - if r.Method != "POST" { - return nil, errors.New("invalid gRPC request method") - } - if !strings.Contains(r.Header.Get("Content-Type"), "application/grpc") { - return nil, errors.New("invalid gRPC request content-type") - } - if _, ok := w.(http.Flusher); !ok { - return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") - } - if _, ok := w.(http.CloseNotifier); !ok { - return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier") - } - - st := &serverHandlerTransport{ - rw: w, - req: r, - closedCh: make(chan struct{}), - writes: make(chan func()), - } - - if v := r.Header.Get("grpc-timeout"); v != "" { - to, err := timeoutDecode(v) - if err != nil { - return nil, StreamErrorf(codes.Internal, "malformed time-out: %v", err) - } - st.timeoutSet = true - st.timeout = to - } - - var metakv []string - for k, vv := range r.Header { - k = strings.ToLower(k) - if isReservedHeader(k) { - continue - } - for _, v := range vv { - if k == "user-agent" { - // user-agent is special. Copying logic of http_util.go. - if i := strings.LastIndex(v, " "); i == -1 { - // There is no application user agent string being set - continue - } else { - v = v[:i] - } - } - metakv = append(metakv, k, v) - - } - } - st.headerMD = metadata.Pairs(metakv...) - - return st, nil -} - -// serverHandlerTransport is an implementation of ServerTransport -// which replies to exactly one gRPC request (exactly one HTTP request), -// using the net/http.Handler interface. This http.Handler is guaranteed -// at this point to be speaking over HTTP/2, so it's able to speak valid -// gRPC. -type serverHandlerTransport struct { - rw http.ResponseWriter - req *http.Request - timeoutSet bool - timeout time.Duration - didCommonHeaders bool - - headerMD metadata.MD - - closeOnce sync.Once - closedCh chan struct{} // closed on Close - - // writes is a channel of code to run serialized in the - // ServeHTTP (HandleStreams) goroutine. The channel is closed - // when WriteStatus is called. - writes chan func() -} - -func (ht *serverHandlerTransport) Close() error { - ht.closeOnce.Do(ht.closeCloseChanOnce) - return nil -} - -func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } - -func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } - -// strAddr is a net.Addr backed by either a TCP "ip:port" string, or -// the empty string if unknown. -type strAddr string - -func (a strAddr) Network() string { - if a != "" { - // Per the documentation on net/http.Request.RemoteAddr, if this is - // set, it's set to the IP:port of the peer (hence, TCP): - // https://golang.org/pkg/net/http/#Request - // - // If we want to support Unix sockets later, we can - // add our own grpc-specific convention within the - // grpc codebase to set RemoteAddr to a different - // format, or probably better: we can attach it to the - // context and use that from serverHandlerTransport.RemoteAddr. - return "tcp" - } - return "" -} - -func (a strAddr) String() string { return string(a) } - -// do runs fn in the ServeHTTP goroutine. -func (ht *serverHandlerTransport) do(fn func()) error { - select { - case ht.writes <- fn: - return nil - case <-ht.closedCh: - return ErrConnClosing - } -} - -func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error { - err := ht.do(func() { - ht.writeCommonHeaders(s) - - // And flush, in case no header or body has been sent yet. - // This forces a separation of headers and trailers if this is the - // first call (for example, in end2end tests's TestNoService). - ht.rw.(http.Flusher).Flush() - - h := ht.rw.Header() - h.Set("Grpc-Status", fmt.Sprintf("%d", statusCode)) - if statusDesc != "" { - h.Set("Grpc-Message", statusDesc) - } - if md := s.Trailer(); len(md) > 0 { - for k, vv := range md { - for _, v := range vv { - // http2 ResponseWriter mechanism to - // send undeclared Trailers after the - // headers have possibly been written. - h.Add(http2.TrailerPrefix+k, v) - } - } - } - }) - close(ht.writes) - return err -} - -// writeCommonHeaders sets common headers on the first write -// call (Write, WriteHeader, or WriteStatus). -func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { - if ht.didCommonHeaders { - return - } - ht.didCommonHeaders = true - - h := ht.rw.Header() - h["Date"] = nil // suppress Date to make tests happy; TODO: restore - h.Set("Content-Type", "application/grpc") - - // Predeclare trailers we'll set later in WriteStatus (after the body). - // This is a SHOULD in the HTTP RFC, and the way you add (known) - // Trailers per the net/http.ResponseWriter contract. - // See https://golang.org/pkg/net/http/#ResponseWriter - // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers - h.Add("Trailer", "Grpc-Status") - h.Add("Trailer", "Grpc-Message") - - if s.sendCompress != "" { - h.Set("Grpc-Encoding", s.sendCompress) - } -} - -func (ht *serverHandlerTransport) Write(s *Stream, data []byte, opts *Options) error { - return ht.do(func() { - ht.writeCommonHeaders(s) - ht.rw.Write(data) - if !opts.Delay { - ht.rw.(http.Flusher).Flush() - } - }) -} - -func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { - return ht.do(func() { - ht.writeCommonHeaders(s) - h := ht.rw.Header() - for k, vv := range md { - for _, v := range vv { - h.Add(k, v) - } - } - ht.rw.WriteHeader(200) - ht.rw.(http.Flusher).Flush() - }) -} - -func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { - // With this transport type there will be exactly 1 stream: this HTTP request. - - var ctx context.Context - var cancel context.CancelFunc - if ht.timeoutSet { - ctx, cancel = context.WithTimeout(context.Background(), ht.timeout) - } else { - ctx, cancel = context.WithCancel(context.Background()) - } - - // requestOver is closed when either the request's context is done - // or the status has been written via WriteStatus. - requestOver := make(chan struct{}) - - // clientGone receives a single value if peer is gone, either - // because the underlying connection is dead or because the - // peer sends an http2 RST_STREAM. - clientGone := ht.rw.(http.CloseNotifier).CloseNotify() - go func() { - select { - case <-requestOver: - return - case <-ht.closedCh: - case <-clientGone: - } - cancel() - }() - - req := ht.req - - s := &Stream{ - id: 0, // irrelevant - windowHandler: func(int) {}, // nothing - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - } - pr := &peer.Peer{ - Addr: ht.RemoteAddr(), - } - if req.TLS != nil { - pr.AuthInfo = credentials.TLSInfo{*req.TLS} - } - ctx = metadata.NewContext(ctx, ht.headerMD) - ctx = peer.NewContext(ctx, pr) - s.ctx = newContextWithStream(ctx, s) - s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf} - - // readerDone is closed when the Body.Read-ing goroutine exits. - readerDone := make(chan struct{}) - go func() { - defer close(readerDone) - for { - buf := make([]byte, 1024) // TODO: minimize garbage, optimize recvBuffer code/ownership - n, err := req.Body.Read(buf) - if n > 0 { - s.buf.put(&recvMsg{data: buf[:n]}) - } - if err != nil { - s.buf.put(&recvMsg{err: mapRecvMsgError(err)}) - return - } - } - }() - - // startStream is provided by the *grpc.Server's serveStreams. - // It starts a goroutine serving s and exits immediately. - // The goroutine that is started is the one that then calls - // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. - startStream(s) - - ht.runStream() - close(requestOver) - - // Wait for reading goroutine to finish. - req.Body.Close() - <-readerDone -} - -func (ht *serverHandlerTransport) runStream() { - for { - select { - case fn, ok := <-ht.writes: - if !ok { - return - } - fn() - case <-ht.closedCh: - return - } - } -} - -// mapRecvMsgError returns the non-nil err into the appropriate -// error value as expected by callers of *grpc.parser.recvMsg. -// In particular, in can only be: -// * io.EOF -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * of type transport.StreamError -func mapRecvMsgError(err error) error { - if err == io.EOF || err == io.ErrUnexpectedEOF { - return err - } - if se, ok := err.(http2.StreamError); ok { - if code, ok := http2ErrConvTab[se.Code]; ok { - return StreamError{ - Code: code, - Desc: se.Error(), - } - } - } - return ConnectionError{Desc: err.Error()} -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_client.go deleted file mode 100644 index 66fabbba..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_client.go +++ /dev/null @@ -1,879 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "bytes" - "errors" - "io" - "math" - "net" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" -) - -// http2Client implements the ClientTransport interface with HTTP2. -type http2Client struct { - target string // server name/addr - userAgent string - conn net.Conn // underlying communication channel - authInfo credentials.AuthInfo // auth info about the connection - nextID uint32 // the next stream ID to be used - - // writableChan synchronizes write access to the transport. - // A writer acquires the write lock by sending a value on writableChan - // and releases it by receiving from writableChan. - writableChan chan int - // shutdownChan is closed when Close is called. - // Blocking operations should select on shutdownChan to avoid - // blocking forever after Close. - // TODO(zhaoq): Maybe have a channel context? - shutdownChan chan struct{} - // errorChan is closed to notify the I/O error to the caller. - errorChan chan struct{} - - framer *framer - hBuf *bytes.Buffer // the buffer for HPACK encoding - hEnc *hpack.Encoder // HPACK encoder - - // controlBuf delivers all the control related tasks (e.g., window - // updates, reset streams, and various settings) to the controller. - controlBuf *recvBuffer - fc *inFlow - // sendQuotaPool provides flow control to outbound message. - sendQuotaPool *quotaPool - // streamsQuota limits the max number of concurrent streams. - streamsQuota *quotaPool - - // The scheme used: https if TLS is on, http otherwise. - scheme string - - authCreds []credentials.Credentials - - mu sync.Mutex // guard the following variables - state transportState // the state of underlying connection - activeStreams map[uint32]*Stream - // The max number of concurrent streams - maxStreams int - // the per-stream outbound flow control window size set by the peer. - streamSendQuota uint32 -} - -// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 -// and starts to receive messages on it. Non-nil error returns if construction -// fails. -func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err error) { - if opts.Dialer == nil { - // Set the default Dialer. - opts.Dialer = func(addr string, timeout time.Duration) (net.Conn, error) { - return net.DialTimeout("tcp", addr, timeout) - } - } - scheme := "http" - startT := time.Now() - timeout := opts.Timeout - conn, connErr := opts.Dialer(addr, timeout) - if connErr != nil { - return nil, ConnectionErrorf("transport: %v", connErr) - } - var authInfo credentials.AuthInfo - for _, c := range opts.AuthOptions { - if ccreds, ok := c.(credentials.TransportAuthenticator); ok { - scheme = "https" - // TODO(zhaoq): Now the first TransportAuthenticator is used if there are - // multiple ones provided. Revisit this if it is not appropriate. Probably - // place the ClientTransport construction into a separate function to make - // things clear. - if timeout > 0 { - timeout -= time.Since(startT) - } - conn, authInfo, connErr = ccreds.ClientHandshake(addr, conn, timeout) - break - } - } - if connErr != nil { - return nil, ConnectionErrorf("transport: %v", connErr) - } - defer func() { - if err != nil { - conn.Close() - } - }() - // Send connection preface to server. - n, err := conn.Write(clientPreface) - if err != nil { - return nil, ConnectionErrorf("transport: %v", err) - } - if n != len(clientPreface) { - return nil, ConnectionErrorf("transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) - } - framer := newFramer(conn) - if initialWindowSize != defaultWindowSize { - err = framer.writeSettings(true, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)}) - } else { - err = framer.writeSettings(true) - } - if err != nil { - return nil, ConnectionErrorf("transport: %v", err) - } - // Adjust the connection flow control window if needed. - if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { - if err := framer.writeWindowUpdate(true, 0, delta); err != nil { - return nil, ConnectionErrorf("transport: %v", err) - } - } - ua := primaryUA - if opts.UserAgent != "" { - ua = opts.UserAgent + " " + ua - } - var buf bytes.Buffer - t := &http2Client{ - target: addr, - userAgent: ua, - conn: conn, - authInfo: authInfo, - // The client initiated stream id is odd starting from 1. - nextID: 1, - writableChan: make(chan int, 1), - shutdownChan: make(chan struct{}), - errorChan: make(chan struct{}), - framer: framer, - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - controlBuf: newRecvBuffer(), - fc: &inFlow{limit: initialConnWindowSize}, - sendQuotaPool: newQuotaPool(defaultWindowSize), - scheme: scheme, - state: reachable, - activeStreams: make(map[uint32]*Stream), - authCreds: opts.AuthOptions, - maxStreams: math.MaxInt32, - streamSendQuota: defaultWindowSize, - } - go t.controller() - t.writableChan <- 0 - // Start the reader goroutine for incoming message. The threading model - // on receiving is that each transport has a dedicated goroutine which - // reads HTTP2 frame from network. Then it dispatches the frame to the - // corresponding stream entity. - go t.reader() - return t, nil -} - -func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { - fc := &inFlow{ - limit: initialWindowSize, - conn: t.fc, - } - // TODO(zhaoq): Handle uint32 overflow of Stream.id. - s := &Stream{ - id: t.nextID, - method: callHdr.Method, - sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), - fc: fc, - sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), - headerChan: make(chan struct{}), - } - t.nextID += 2 - s.windowHandler = func(n int) { - t.updateWindow(s, uint32(n)) - } - // Make a stream be able to cancel the pending operations by itself. - s.ctx, s.cancel = context.WithCancel(ctx) - s.dec = &recvBufferReader{ - ctx: s.ctx, - recv: s.buf, - } - return s -} - -// NewStream creates a stream and register it into the transport as "active" -// streams. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { - // Record the timeout value on the context. - var timeout time.Duration - if dl, ok := ctx.Deadline(); ok { - timeout = dl.Sub(time.Now()) - if timeout <= 0 { - return nil, ContextErr(context.DeadlineExceeded) - } - } - pr := &peer.Peer{ - Addr: t.conn.RemoteAddr(), - } - // Attach Auth info if there is any. - if t.authInfo != nil { - pr.AuthInfo = t.authInfo - } - ctx = peer.NewContext(ctx, pr) - authData := make(map[string]string) - for _, c := range t.authCreds { - // Construct URI required to get auth request metadata. - var port string - if pos := strings.LastIndex(t.target, ":"); pos != -1 { - // Omit port if it is the default one. - if t.target[pos+1:] != "443" { - port = ":" + t.target[pos+1:] - } - } - pos := strings.LastIndex(callHdr.Method, "/") - if pos == -1 { - return nil, StreamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method) - } - audience := "https://" + callHdr.Host + port + callHdr.Method[:pos] - data, err := c.GetRequestMetadata(ctx, audience) - if err != nil { - return nil, StreamErrorf(codes.InvalidArgument, "transport: %v", err) - } - for k, v := range data { - authData[k] = v - } - } - t.mu.Lock() - if t.state != reachable { - t.mu.Unlock() - return nil, ErrConnClosing - } - checkStreamsQuota := t.streamsQuota != nil - t.mu.Unlock() - if checkStreamsQuota { - sq, err := wait(ctx, t.shutdownChan, t.streamsQuota.acquire()) - if err != nil { - return nil, err - } - // Returns the quota balance back. - if sq > 1 { - t.streamsQuota.add(sq - 1) - } - } - if _, err := wait(ctx, t.shutdownChan, t.writableChan); err != nil { - // t.streamsQuota will be updated when t.CloseStream is invoked. - return nil, err - } - t.mu.Lock() - if t.state != reachable { - t.mu.Unlock() - return nil, ErrConnClosing - } - s := t.newStream(ctx, callHdr) - t.activeStreams[s.id] = s - - // This stream is not counted when applySetings(...) initialize t.streamsQuota. - // Reset t.streamsQuota to the right value. - var reset bool - if !checkStreamsQuota && t.streamsQuota != nil { - reset = true - } - t.mu.Unlock() - if reset { - t.streamsQuota.reset(-1) - } - - // HPACK encodes various headers. Note that once WriteField(...) is - // called, the corresponding headers/continuation frame has to be sent - // because hpack.Encoder is stateful. - t.hBuf.Reset() - t.hEnc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"}) - t.hEnc.WriteField(hpack.HeaderField{Name: ":scheme", Value: t.scheme}) - t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method}) - t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) - t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) - t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) - t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"}) - - if callHdr.SendCompress != "" { - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) - } - if timeout > 0 { - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: timeoutEncode(timeout)}) - } - for k, v := range authData { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) - } - var ( - hasMD bool - endHeaders bool - ) - if md, ok := metadata.FromContext(ctx); ok { - hasMD = true - for k, v := range md { - for _, entry := range v { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) - } - } - } - first := true - // Sends the headers in a single batch even when they span multiple frames. - for !endHeaders { - size := t.hBuf.Len() - if size > http2MaxFrameLen { - size = http2MaxFrameLen - } else { - endHeaders = true - } - var flush bool - if endHeaders && (hasMD || callHdr.Flush) { - flush = true - } - if first { - // Sends a HeadersFrame to server to start a new stream. - p := http2.HeadersFrameParam{ - StreamID: s.id, - BlockFragment: t.hBuf.Next(size), - EndStream: false, - EndHeaders: endHeaders, - } - // Do a force flush for the buffered frames iff it is the last headers frame - // and there is header metadata to be sent. Otherwise, there is flushing until - // the corresponding data frame is written. - err = t.framer.writeHeaders(flush, p) - first = false - } else { - // Sends Continuation frames for the leftover headers. - err = t.framer.writeContinuation(flush, s.id, endHeaders, t.hBuf.Next(size)) - } - if err != nil { - t.notifyError(err) - return nil, ConnectionErrorf("transport: %v", err) - } - } - t.writableChan <- 0 - return s, nil -} - -// CloseStream clears the footprint of a stream when the stream is not needed any more. -// This must not be executed in reader's goroutine. -func (t *http2Client) CloseStream(s *Stream, err error) { - var updateStreams bool - t.mu.Lock() - if t.streamsQuota != nil { - updateStreams = true - } - delete(t.activeStreams, s.id) - t.mu.Unlock() - if updateStreams { - t.streamsQuota.add(1) - } - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), the caller needs - // to call cancel on the stream to interrupt the blocking on - // other goroutines. - s.cancel() - s.mu.Lock() - if q := s.fc.restoreConn(); q > 0 { - t.controlBuf.put(&windowUpdate{0, q}) - } - if s.state == streamDone { - s.mu.Unlock() - return - } - if !s.headerDone { - close(s.headerChan) - s.headerDone = true - } - s.state = streamDone - s.mu.Unlock() - if _, ok := err.(StreamError); ok { - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeCancel}) - } -} - -// Close kicks off the shutdown process of the transport. This should be called -// only once on a transport. Once it is called, the transport should not be -// accessed any more. -func (t *http2Client) Close() (err error) { - t.mu.Lock() - if t.state == closing { - t.mu.Unlock() - return errors.New("transport: Close() was already called") - } - t.state = closing - t.mu.Unlock() - close(t.shutdownChan) - err = t.conn.Close() - t.mu.Lock() - streams := t.activeStreams - t.activeStreams = nil - t.mu.Unlock() - // Notify all active streams. - for _, s := range streams { - s.mu.Lock() - if !s.headerDone { - close(s.headerChan) - s.headerDone = true - } - s.mu.Unlock() - s.write(recvMsg{err: ErrConnClosing}) - } - return -} - -// Write formats the data into HTTP2 data frame(s) and sends it out. The caller -// should proceed only if Write returns nil. -// TODO(zhaoq): opts.Delay is ignored in this implementation. Support it later -// if it improves the performance. -func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { - r := bytes.NewBuffer(data) - for { - var p []byte - if r.Len() > 0 { - size := http2MaxFrameLen - s.sendQuotaPool.add(0) - // Wait until the stream has some quota to send the data. - sq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire()) - if err != nil { - return err - } - t.sendQuotaPool.add(0) - // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire()) - if err != nil { - if _, ok := err.(StreamError); ok { - t.sendQuotaPool.cancel() - } - return err - } - if sq < size { - size = sq - } - if tq < size { - size = tq - } - p = r.Next(size) - ps := len(p) - if ps < sq { - // Overbooked stream quota. Return it back. - s.sendQuotaPool.add(sq - ps) - } - if ps < tq { - // Overbooked transport quota. Return it back. - t.sendQuotaPool.add(tq - ps) - } - } - var ( - endStream bool - forceFlush bool - ) - if opts.Last && r.Len() == 0 { - endStream = true - } - // Indicate there is a writer who is about to write a data frame. - t.framer.adjustNumWriters(1) - // Got some quota. Try to acquire writing privilege on the transport. - if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { - if t.framer.adjustNumWriters(-1) == 0 { - // This writer is the last one in this batch and has the - // responsibility to flush the buffered frames. It queues - // a flush request to controlBuf instead of flushing directly - // in order to avoid the race with other writing or flushing. - t.controlBuf.put(&flushIO{}) - } - return err - } - if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 { - // Do a force flush iff this is last frame for the entire gRPC message - // and the caller is the only writer at this moment. - forceFlush = true - } - // If WriteData fails, all the pending streams will be handled - // by http2Client.Close(). No explicit CloseStream() needs to be - // invoked. - if err := t.framer.writeData(forceFlush, s.id, endStream, p); err != nil { - t.notifyError(err) - return ConnectionErrorf("transport: %v", err) - } - if t.framer.adjustNumWriters(-1) == 0 { - t.framer.flushWrite() - } - t.writableChan <- 0 - if r.Len() == 0 { - break - } - } - if !opts.Last { - return nil - } - s.mu.Lock() - if s.state != streamDone { - if s.state == streamReadDone { - s.state = streamDone - } else { - s.state = streamWriteDone - } - } - s.mu.Unlock() - return nil -} - -func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { - t.mu.Lock() - defer t.mu.Unlock() - s, ok := t.activeStreams[f.Header().StreamID] - return s, ok -} - -// updateWindow adjusts the inbound quota for the stream and the transport. -// Window updates will deliver to the controller for sending when -// the cumulative quota exceeds the corresponding threshold. -func (t *http2Client) updateWindow(s *Stream, n uint32) { - swu, cwu := s.fc.onRead(n) - if swu > 0 { - t.controlBuf.put(&windowUpdate{s.id, swu}) - } - if cwu > 0 { - t.controlBuf.put(&windowUpdate{0, cwu}) - } -} - -func (t *http2Client) handleData(f *http2.DataFrame) { - // Select the right stream to dispatch. - s, ok := t.getStream(f) - if !ok { - return - } - size := len(f.Data()) - if size > 0 { - if err := s.fc.onData(uint32(size)); err != nil { - if _, ok := err.(ConnectionError); ok { - t.notifyError(err) - return - } - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - return - } - s.state = streamDone - s.statusCode = codes.Internal - s.statusDesc = err.Error() - s.mu.Unlock() - s.write(recvMsg{err: io.EOF}) - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) - return - } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - data := make([]byte, size) - copy(data, f.Data()) - s.write(recvMsg{data: data}) - } - // The server has closed the stream without sending trailers. Record that - // the read direction is closed, and set the status appropriately. - if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { - s.mu.Lock() - if s.state == streamWriteDone { - s.state = streamDone - } else { - s.state = streamReadDone - } - s.statusCode = codes.Internal - s.statusDesc = "server closed the stream without sending trailers" - s.mu.Unlock() - s.write(recvMsg{err: io.EOF}) - } -} - -func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { - s, ok := t.getStream(f) - if !ok { - return - } - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - return - } - s.state = streamDone - if !s.headerDone { - close(s.headerChan) - s.headerDone = true - } - s.statusCode, ok = http2ErrConvTab[http2.ErrCode(f.ErrCode)] - if !ok { - grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode) - } - s.mu.Unlock() - s.write(recvMsg{err: io.EOF}) -} - -func (t *http2Client) handleSettings(f *http2.SettingsFrame) { - if f.IsAck() { - return - } - var ss []http2.Setting - f.ForeachSetting(func(s http2.Setting) error { - ss = append(ss, s) - return nil - }) - // The settings will be applied once the ack is sent. - t.controlBuf.put(&settings{ack: true, ss: ss}) -} - -func (t *http2Client) handlePing(f *http2.PingFrame) { - pingAck := &ping{ack: true} - copy(pingAck.data[:], f.Data[:]) - t.controlBuf.put(pingAck) -} - -func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { - // TODO(zhaoq): GoAwayFrame handler to be implemented -} - -func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { - id := f.Header().StreamID - incr := f.Increment - if id == 0 { - t.sendQuotaPool.add(int(incr)) - return - } - if s, ok := t.getStream(f); ok { - s.sendQuotaPool.add(int(incr)) - } -} - -// operateHeaders takes action on the decoded headers. -func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { - s, ok := t.getStream(frame) - if !ok { - return - } - var state decodeState - for _, hf := range frame.Fields { - state.processHeaderField(hf) - } - if state.err != nil { - s.write(recvMsg{err: state.err}) - // Something wrong. Stops reading even when there is remaining. - return - } - - endStream := frame.StreamEnded() - - s.mu.Lock() - if !endStream { - s.recvCompress = state.encoding - } - if !s.headerDone { - if !endStream && len(state.mdata) > 0 { - s.header = state.mdata - } - close(s.headerChan) - s.headerDone = true - } - if !endStream || s.state == streamDone { - s.mu.Unlock() - return - } - - if len(state.mdata) > 0 { - s.trailer = state.mdata - } - s.state = streamDone - s.statusCode = state.statusCode - s.statusDesc = state.statusDesc - s.mu.Unlock() - - s.write(recvMsg{err: io.EOF}) -} - -func handleMalformedHTTP2(s *Stream, err http2.StreamError) { - s.mu.Lock() - if !s.headerDone { - close(s.headerChan) - s.headerDone = true - } - s.mu.Unlock() - s.write(recvMsg{err: StreamErrorf(http2ErrConvTab[err.Code], "%v", err)}) -} - -// reader runs as a separate goroutine in charge of reading data from network -// connection. -// -// TODO(zhaoq): currently one reader per transport. Investigate whether this is -// optimal. -// TODO(zhaoq): Check the validity of the incoming frame sequence. -func (t *http2Client) reader() { - // Check the validity of server preface. - frame, err := t.framer.readFrame() - if err != nil { - t.notifyError(err) - return - } - sf, ok := frame.(*http2.SettingsFrame) - if !ok { - t.notifyError(err) - return - } - t.handleSettings(sf) - - // loop to keep reading incoming messages on this transport. - for { - frame, err := t.framer.readFrame() - if err != nil { - // Abort an active stream if the http2.Framer returns a - // http2.StreamError. This can happen only if the server's response - // is malformed http2. - if se, ok := err.(http2.StreamError); ok { - t.mu.Lock() - s := t.activeStreams[se.StreamID] - t.mu.Unlock() - if s != nil { - handleMalformedHTTP2(s, se) - } - continue - } else { - // Transport error. - t.notifyError(err) - return - } - } - switch frame := frame.(type) { - case *http2.MetaHeadersFrame: - t.operateHeaders(frame) - case *http2.DataFrame: - t.handleData(frame) - case *http2.RSTStreamFrame: - t.handleRSTStream(frame) - case *http2.SettingsFrame: - t.handleSettings(frame) - case *http2.PingFrame: - t.handlePing(frame) - case *http2.GoAwayFrame: - t.handleGoAway(frame) - case *http2.WindowUpdateFrame: - t.handleWindowUpdate(frame) - default: - grpclog.Printf("transport: http2Client.reader got unhandled frame type %v.", frame) - } - } -} - -func (t *http2Client) applySettings(ss []http2.Setting) { - for _, s := range ss { - switch s.ID { - case http2.SettingMaxConcurrentStreams: - // TODO(zhaoq): This is a hack to avoid significant refactoring of the - // code to deal with the unrealistic int32 overflow. Probably will try - // to find a better way to handle this later. - if s.Val > math.MaxInt32 { - s.Val = math.MaxInt32 - } - t.mu.Lock() - reset := t.streamsQuota != nil - if !reset { - t.streamsQuota = newQuotaPool(int(s.Val) - len(t.activeStreams)) - } - ms := t.maxStreams - t.maxStreams = int(s.Val) - t.mu.Unlock() - if reset { - t.streamsQuota.reset(int(s.Val) - ms) - } - case http2.SettingInitialWindowSize: - t.mu.Lock() - for _, stream := range t.activeStreams { - // Adjust the sending quota for each stream. - stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota)) - } - t.streamSendQuota = s.Val - t.mu.Unlock() - } - } -} - -// controller running in a separate goroutine takes charge of sending control -// frames (e.g., window update, reset stream, setting, etc.) to the server. -func (t *http2Client) controller() { - for { - select { - case i := <-t.controlBuf.get(): - t.controlBuf.load() - select { - case <-t.writableChan: - switch i := i.(type) { - case *windowUpdate: - t.framer.writeWindowUpdate(true, i.streamID, i.increment) - case *settings: - if i.ack { - t.framer.writeSettingsAck(true) - t.applySettings(i.ss) - } else { - t.framer.writeSettings(true, i.ss...) - } - case *resetStream: - t.framer.writeRSTStream(true, i.streamID, i.code) - case *flushIO: - t.framer.flushWrite() - case *ping: - t.framer.writePing(true, i.ack, i.data) - default: - grpclog.Printf("transport: http2Client.controller got unexpected item type %v\n", i) - } - t.writableChan <- 0 - continue - case <-t.shutdownChan: - return - } - case <-t.shutdownChan: - return - } - } -} - -func (t *http2Client) Error() <-chan struct{} { - return t.errorChan -} - -func (t *http2Client) notifyError(err error) { - t.mu.Lock() - defer t.mu.Unlock() - // make sure t.errorChan is closed only once. - if t.state == reachable { - t.state = unreachable - close(t.errorChan) - grpclog.Printf("transport: http2Client.notifyError got notified that the client transport was broken %v.", err) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_server.go deleted file mode 100644 index cec441cf..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http2_server.go +++ /dev/null @@ -1,691 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "bytes" - "errors" - "io" - "math" - "net" - "strconv" - "sync" - - "golang.org/x/net/context" - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" -) - -// ErrIllegalHeaderWrite indicates that setting header is illegal because of -// the stream's state. -var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") - -// http2Server implements the ServerTransport interface with HTTP2. -type http2Server struct { - conn net.Conn - maxStreamID uint32 // max stream ID ever seen - authInfo credentials.AuthInfo // auth info about the connection - // writableChan synchronizes write access to the transport. - // A writer acquires the write lock by receiving a value on writableChan - // and releases it by sending on writableChan. - writableChan chan int - // shutdownChan is closed when Close is called. - // Blocking operations should select on shutdownChan to avoid - // blocking forever after Close. - shutdownChan chan struct{} - framer *framer - hBuf *bytes.Buffer // the buffer for HPACK encoding - hEnc *hpack.Encoder // HPACK encoder - - // The max number of concurrent streams. - maxStreams uint32 - // controlBuf delivers all the control related tasks (e.g., window - // updates, reset streams, and various settings) to the controller. - controlBuf *recvBuffer - fc *inFlow - // sendQuotaPool provides flow control to outbound message. - sendQuotaPool *quotaPool - - mu sync.Mutex // guard the following - state transportState - activeStreams map[uint32]*Stream - // the per-stream outbound flow control window size set by the peer. - streamSendQuota uint32 -} - -// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is -// returned if something goes wrong. -func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (_ ServerTransport, err error) { - framer := newFramer(conn) - // Send initial settings as connection preface to client. - var settings []http2.Setting - // TODO(zhaoq): Have a better way to signal "no limit" because 0 is - // permitted in the HTTP2 spec. - if maxStreams == 0 { - maxStreams = math.MaxUint32 - } else { - settings = append(settings, http2.Setting{http2.SettingMaxConcurrentStreams, maxStreams}) - } - if initialWindowSize != defaultWindowSize { - settings = append(settings, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)}) - } - if err := framer.writeSettings(true, settings...); err != nil { - return nil, ConnectionErrorf("transport: %v", err) - } - // Adjust the connection flow control window if needed. - if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { - if err := framer.writeWindowUpdate(true, 0, delta); err != nil { - return nil, ConnectionErrorf("transport: %v", err) - } - } - var buf bytes.Buffer - t := &http2Server{ - conn: conn, - authInfo: authInfo, - framer: framer, - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - maxStreams: maxStreams, - controlBuf: newRecvBuffer(), - fc: &inFlow{limit: initialConnWindowSize}, - sendQuotaPool: newQuotaPool(defaultWindowSize), - state: reachable, - writableChan: make(chan int, 1), - shutdownChan: make(chan struct{}), - activeStreams: make(map[uint32]*Stream), - streamSendQuota: defaultWindowSize, - } - go t.controller() - t.writableChan <- 0 - return t, nil -} - -// operateHeader takes action on the decoded headers. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) { - buf := newRecvBuffer() - fc := &inFlow{ - limit: initialWindowSize, - conn: t.fc, - } - s := &Stream{ - id: frame.Header().StreamID, - st: t, - buf: buf, - fc: fc, - } - - var state decodeState - for _, hf := range frame.Fields { - state.processHeaderField(hf) - } - if err := state.err; err != nil { - if se, ok := err.(StreamError); ok { - t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) - } - return - } - - if frame.StreamEnded() { - // s is just created by the caller. No lock needed. - s.state = streamReadDone - } - s.recvCompress = state.encoding - if state.timeoutSet { - s.ctx, s.cancel = context.WithTimeout(context.TODO(), state.timeout) - } else { - s.ctx, s.cancel = context.WithCancel(context.TODO()) - } - pr := &peer.Peer{ - Addr: t.conn.RemoteAddr(), - } - // Attach Auth info if there is any. - if t.authInfo != nil { - pr.AuthInfo = t.authInfo - } - s.ctx = peer.NewContext(s.ctx, pr) - // Cache the current stream to the context so that the server application - // can find out. Required when the server wants to send some metadata - // back to the client (unary call only). - s.ctx = newContextWithStream(s.ctx, s) - // Attach the received metadata to the context. - if len(state.mdata) > 0 { - s.ctx = metadata.NewContext(s.ctx, state.mdata) - } - - s.dec = &recvBufferReader{ - ctx: s.ctx, - recv: s.buf, - } - s.recvCompress = state.encoding - s.method = state.method - t.mu.Lock() - if t.state != reachable { - t.mu.Unlock() - return - } - if uint32(len(t.activeStreams)) >= t.maxStreams { - t.mu.Unlock() - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) - return - } - s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) - t.activeStreams[s.id] = s - t.mu.Unlock() - s.windowHandler = func(n int) { - t.updateWindow(s, uint32(n)) - } - handle(s) -} - -// HandleStreams receives incoming streams using the given handler. This is -// typically run in a separate goroutine. -func (t *http2Server) HandleStreams(handle func(*Stream)) { - // Check the validity of client preface. - preface := make([]byte, len(clientPreface)) - if _, err := io.ReadFull(t.conn, preface); err != nil { - grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) - t.Close() - return - } - if !bytes.Equal(preface, clientPreface) { - grpclog.Printf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) - t.Close() - return - } - - frame, err := t.framer.readFrame() - if err != nil { - grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) - t.Close() - return - } - sf, ok := frame.(*http2.SettingsFrame) - if !ok { - grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) - t.Close() - return - } - t.handleSettings(sf) - - for { - frame, err := t.framer.readFrame() - if err != nil { - t.Close() - return - } - switch frame := frame.(type) { - case *http2.MetaHeadersFrame: - id := frame.Header().StreamID - if id%2 != 1 || id <= t.maxStreamID { - // illegal gRPC stream id. - grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", id) - t.Close() - break - } - t.maxStreamID = id - t.operateHeaders(frame, handle) - case *http2.DataFrame: - t.handleData(frame) - case *http2.RSTStreamFrame: - t.handleRSTStream(frame) - case *http2.SettingsFrame: - t.handleSettings(frame) - case *http2.PingFrame: - t.handlePing(frame) - case *http2.WindowUpdateFrame: - t.handleWindowUpdate(frame) - case *http2.GoAwayFrame: - break - default: - grpclog.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) - } - } -} - -func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { - t.mu.Lock() - defer t.mu.Unlock() - if t.activeStreams == nil { - // The transport is closing. - return nil, false - } - s, ok := t.activeStreams[f.Header().StreamID] - if !ok { - // The stream is already done. - return nil, false - } - return s, true -} - -// updateWindow adjusts the inbound quota for the stream and the transport. -// Window updates will deliver to the controller for sending when -// the cumulative quota exceeds the corresponding threshold. -func (t *http2Server) updateWindow(s *Stream, n uint32) { - swu, cwu := s.fc.onRead(n) - if swu > 0 { - t.controlBuf.put(&windowUpdate{s.id, swu}) - } - if cwu > 0 { - t.controlBuf.put(&windowUpdate{0, cwu}) - } -} - -func (t *http2Server) handleData(f *http2.DataFrame) { - // Select the right stream to dispatch. - s, ok := t.getStream(f) - if !ok { - return - } - size := len(f.Data()) - if size > 0 { - if err := s.fc.onData(uint32(size)); err != nil { - if _, ok := err.(ConnectionError); ok { - grpclog.Printf("transport: http2Server %v", err) - t.Close() - return - } - t.closeStream(s) - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) - return - } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - data := make([]byte, size) - copy(data, f.Data()) - s.write(recvMsg{data: data}) - } - if f.Header().Flags.Has(http2.FlagDataEndStream) { - // Received the end of stream from the client. - s.mu.Lock() - if s.state != streamDone { - if s.state == streamWriteDone { - s.state = streamDone - } else { - s.state = streamReadDone - } - } - s.mu.Unlock() - s.write(recvMsg{err: io.EOF}) - } -} - -func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { - s, ok := t.getStream(f) - if !ok { - return - } - t.closeStream(s) -} - -func (t *http2Server) handleSettings(f *http2.SettingsFrame) { - if f.IsAck() { - return - } - var ss []http2.Setting - f.ForeachSetting(func(s http2.Setting) error { - ss = append(ss, s) - return nil - }) - // The settings will be applied once the ack is sent. - t.controlBuf.put(&settings{ack: true, ss: ss}) -} - -func (t *http2Server) handlePing(f *http2.PingFrame) { - pingAck := &ping{ack: true} - copy(pingAck.data[:], f.Data[:]) - t.controlBuf.put(pingAck) -} - -func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { - id := f.Header().StreamID - incr := f.Increment - if id == 0 { - t.sendQuotaPool.add(int(incr)) - return - } - if s, ok := t.getStream(f); ok { - s.sendQuotaPool.add(int(incr)) - } -} - -func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) error { - first := true - endHeaders := false - var err error - // Sends the headers in a single batch. - for !endHeaders { - size := t.hBuf.Len() - if size > http2MaxFrameLen { - size = http2MaxFrameLen - } else { - endHeaders = true - } - if first { - p := http2.HeadersFrameParam{ - StreamID: s.id, - BlockFragment: b.Next(size), - EndStream: endStream, - EndHeaders: endHeaders, - } - err = t.framer.writeHeaders(endHeaders, p) - first = false - } else { - err = t.framer.writeContinuation(endHeaders, s.id, endHeaders, b.Next(size)) - } - if err != nil { - t.Close() - return ConnectionErrorf("transport: %v", err) - } - } - return nil -} - -// WriteHeader sends the header metedata md back to the client. -func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - s.mu.Lock() - if s.headerOk || s.state == streamDone { - s.mu.Unlock() - return ErrIllegalHeaderWrite - } - s.headerOk = true - s.mu.Unlock() - if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { - return err - } - t.hBuf.Reset() - t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) - if s.sendCompress != "" { - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) - } - for k, v := range md { - for _, entry := range v { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) - } - } - if err := t.writeHeaders(s, t.hBuf, false); err != nil { - return err - } - t.writableChan <- 0 - return nil -} - -// WriteStatus sends stream status to the client and terminates the stream. -// There is no further I/O operations being able to perform on this stream. -// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early -// OK is adopted. -func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error { - var headersSent bool - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - return nil - } - if s.headerOk { - headersSent = true - } - s.mu.Unlock() - if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { - return err - } - t.hBuf.Reset() - if !headersSent { - t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) - } - t.hEnc.WriteField( - hpack.HeaderField{ - Name: "grpc-status", - Value: strconv.Itoa(int(statusCode)), - }) - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: statusDesc}) - // Attach the trailer metadata. - for k, v := range s.trailer { - for _, entry := range v { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) - } - } - if err := t.writeHeaders(s, t.hBuf, true); err != nil { - t.Close() - return err - } - t.closeStream(s) - t.writableChan <- 0 - return nil -} - -// Write converts the data into HTTP2 data frame and sends it out. Non-nil error -// is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { - // TODO(zhaoq): Support multi-writers for a single stream. - var writeHeaderFrame bool - s.mu.Lock() - if !s.headerOk { - writeHeaderFrame = true - s.headerOk = true - } - s.mu.Unlock() - if writeHeaderFrame { - if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { - return err - } - t.hBuf.Reset() - t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) - if s.sendCompress != "" { - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) - } - p := http2.HeadersFrameParam{ - StreamID: s.id, - BlockFragment: t.hBuf.Bytes(), - EndHeaders: true, - } - if err := t.framer.writeHeaders(false, p); err != nil { - t.Close() - return ConnectionErrorf("transport: %v", err) - } - t.writableChan <- 0 - } - r := bytes.NewBuffer(data) - for { - if r.Len() == 0 { - return nil - } - size := http2MaxFrameLen - s.sendQuotaPool.add(0) - // Wait until the stream has some quota to send the data. - sq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire()) - if err != nil { - return err - } - t.sendQuotaPool.add(0) - // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire()) - if err != nil { - if _, ok := err.(StreamError); ok { - t.sendQuotaPool.cancel() - } - return err - } - if sq < size { - size = sq - } - if tq < size { - size = tq - } - p := r.Next(size) - ps := len(p) - if ps < sq { - // Overbooked stream quota. Return it back. - s.sendQuotaPool.add(sq - ps) - } - if ps < tq { - // Overbooked transport quota. Return it back. - t.sendQuotaPool.add(tq - ps) - } - t.framer.adjustNumWriters(1) - // Got some quota. Try to acquire writing privilege on the - // transport. - if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { - if t.framer.adjustNumWriters(-1) == 0 { - // This writer is the last one in this batch and has the - // responsibility to flush the buffered frames. It queues - // a flush request to controlBuf instead of flushing directly - // in order to avoid the race with other writing or flushing. - t.controlBuf.put(&flushIO{}) - } - return err - } - var forceFlush bool - if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last { - forceFlush = true - } - if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil { - t.Close() - return ConnectionErrorf("transport: %v", err) - } - if t.framer.adjustNumWriters(-1) == 0 { - t.framer.flushWrite() - } - t.writableChan <- 0 - } - -} - -func (t *http2Server) applySettings(ss []http2.Setting) { - for _, s := range ss { - if s.ID == http2.SettingInitialWindowSize { - t.mu.Lock() - defer t.mu.Unlock() - for _, stream := range t.activeStreams { - stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota)) - } - t.streamSendQuota = s.Val - } - - } -} - -// controller running in a separate goroutine takes charge of sending control -// frames (e.g., window update, reset stream, setting, etc.) to the server. -func (t *http2Server) controller() { - for { - select { - case i := <-t.controlBuf.get(): - t.controlBuf.load() - select { - case <-t.writableChan: - switch i := i.(type) { - case *windowUpdate: - t.framer.writeWindowUpdate(true, i.streamID, i.increment) - case *settings: - if i.ack { - t.framer.writeSettingsAck(true) - t.applySettings(i.ss) - } else { - t.framer.writeSettings(true, i.ss...) - } - case *resetStream: - t.framer.writeRSTStream(true, i.streamID, i.code) - case *flushIO: - t.framer.flushWrite() - case *ping: - t.framer.writePing(true, i.ack, i.data) - default: - grpclog.Printf("transport: http2Server.controller got unexpected item type %v\n", i) - } - t.writableChan <- 0 - continue - case <-t.shutdownChan: - return - } - case <-t.shutdownChan: - return - } - } -} - -// Close starts shutting down the http2Server transport. -// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This -// could cause some resource issue. Revisit this later. -func (t *http2Server) Close() (err error) { - t.mu.Lock() - if t.state == closing { - t.mu.Unlock() - return errors.New("transport: Close() was already called") - } - t.state = closing - streams := t.activeStreams - t.activeStreams = nil - t.mu.Unlock() - close(t.shutdownChan) - err = t.conn.Close() - // Cancel all active streams. - for _, s := range streams { - s.cancel() - } - return -} - -// closeStream clears the footprint of a stream when the stream is not needed -// any more. -func (t *http2Server) closeStream(s *Stream) { - t.mu.Lock() - delete(t.activeStreams, s.id) - t.mu.Unlock() - if q := s.fc.restoreConn(); q > 0 { - t.controlBuf.put(&windowUpdate{0, q}) - } - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - return - } - s.state = streamDone - s.mu.Unlock() - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() -} - -func (t *http2Server) RemoteAddr() net.Addr { - return t.conn.RemoteAddr() -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http_util.go deleted file mode 100644 index 6aabcd4a..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/http_util.go +++ /dev/null @@ -1,406 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "bufio" - "fmt" - "io" - "net" - "strconv" - "strings" - "sync/atomic" - "time" - - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" -) - -const ( - // The primary user agent - primaryUA = "grpc-go/0.11" - // http2MaxFrameLen specifies the max length of a HTTP2 frame. - http2MaxFrameLen = 16384 // 16KB frame - // http://http2.github.io/http2-spec/#SettingValues - http2InitHeaderTableSize = 4096 - // http2IOBufSize specifies the buffer size for sending frames. - http2IOBufSize = 32 * 1024 -) - -var ( - clientPreface = []byte(http2.ClientPreface) - http2ErrConvTab = map[http2.ErrCode]codes.Code{ - http2.ErrCodeNo: codes.Internal, - http2.ErrCodeProtocol: codes.Internal, - http2.ErrCodeInternal: codes.Internal, - http2.ErrCodeFlowControl: codes.ResourceExhausted, - http2.ErrCodeSettingsTimeout: codes.Internal, - http2.ErrCodeFrameSize: codes.Internal, - http2.ErrCodeRefusedStream: codes.Unavailable, - http2.ErrCodeCancel: codes.Canceled, - http2.ErrCodeCompression: codes.Internal, - http2.ErrCodeConnect: codes.Internal, - http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, - http2.ErrCodeInadequateSecurity: codes.PermissionDenied, - http2.ErrCodeHTTP11Required: codes.FailedPrecondition, - } - statusCodeConvTab = map[codes.Code]http2.ErrCode{ - codes.Internal: http2.ErrCodeInternal, - codes.Canceled: http2.ErrCodeCancel, - codes.Unavailable: http2.ErrCodeRefusedStream, - codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, - codes.PermissionDenied: http2.ErrCodeInadequateSecurity, - } -) - -// Records the states during HPACK decoding. Must be reset once the -// decoding of the entire headers are finished. -type decodeState struct { - err error // first error encountered decoding - - encoding string - // statusCode caches the stream status received from the trailer - // the server sent. Client side only. - statusCode codes.Code - statusDesc string - // Server side only fields. - timeoutSet bool - timeout time.Duration - method string - // key-value metadata map from the peer. - mdata map[string][]string -} - -// isReservedHeader checks whether hdr belongs to HTTP2 headers -// reserved by gRPC protocol. Any other headers are classified as the -// user-specified metadata. -func isReservedHeader(hdr string) bool { - if hdr != "" && hdr[0] == ':' { - return true - } - switch hdr { - case "content-type", - "grpc-message-type", - "grpc-encoding", - "grpc-message", - "grpc-status", - "grpc-timeout", - "te": - return true - default: - return false - } -} - -func (d *decodeState) setErr(err error) { - if d.err == nil { - d.err = err - } -} - -func (d *decodeState) processHeaderField(f hpack.HeaderField) { - switch f.Name { - case "content-type": - if !strings.Contains(f.Value, "application/grpc") { - d.setErr(StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value)) - return - } - case "grpc-encoding": - d.encoding = f.Value - case "grpc-status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.setErr(StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err)) - return - } - d.statusCode = codes.Code(code) - case "grpc-message": - d.statusDesc = f.Value - case "grpc-timeout": - d.timeoutSet = true - var err error - d.timeout, err = timeoutDecode(f.Value) - if err != nil { - d.setErr(StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err)) - return - } - case ":path": - d.method = f.Value - default: - if !isReservedHeader(f.Name) { - if f.Name == "user-agent" { - i := strings.LastIndex(f.Value, " ") - if i == -1 { - // There is no application user agent string being set. - return - } - // Extract the application user agent string. - f.Value = f.Value[:i] - } - if d.mdata == nil { - d.mdata = make(map[string][]string) - } - k, v, err := metadata.DecodeKeyValue(f.Name, f.Value) - if err != nil { - grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) - return - } - d.mdata[k] = append(d.mdata[k], v) - } - } -} - -type timeoutUnit uint8 - -const ( - hour timeoutUnit = 'H' - minute timeoutUnit = 'M' - second timeoutUnit = 'S' - millisecond timeoutUnit = 'm' - microsecond timeoutUnit = 'u' - nanosecond timeoutUnit = 'n' -) - -func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { - switch u { - case hour: - return time.Hour, true - case minute: - return time.Minute, true - case second: - return time.Second, true - case millisecond: - return time.Millisecond, true - case microsecond: - return time.Microsecond, true - case nanosecond: - return time.Nanosecond, true - default: - } - return -} - -const maxTimeoutValue int64 = 100000000 - 1 - -// div does integer division and round-up the result. Note that this is -// equivalent to (d+r-1)/r but has less chance to overflow. -func div(d, r time.Duration) int64 { - if m := d % r; m > 0 { - return int64(d/r + 1) - } - return int64(d / r) -} - -// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. -func timeoutEncode(t time.Duration) string { - if d := div(t, time.Nanosecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "n" - } - if d := div(t, time.Microsecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "u" - } - if d := div(t, time.Millisecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "m" - } - if d := div(t, time.Second); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "S" - } - if d := div(t, time.Minute); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "M" - } - // Note that maxTimeoutValue * time.Hour > MaxInt64. - return strconv.FormatInt(div(t, time.Hour), 10) + "H" -} - -func timeoutDecode(s string) (time.Duration, error) { - size := len(s) - if size < 2 { - return 0, fmt.Errorf("transport: timeout string is too short: %q", s) - } - unit := timeoutUnit(s[size-1]) - d, ok := timeoutUnitToDuration(unit) - if !ok { - return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) - } - t, err := strconv.ParseInt(s[:size-1], 10, 64) - if err != nil { - return 0, err - } - return d * time.Duration(t), nil -} - -type framer struct { - numWriters int32 - reader io.Reader - writer *bufio.Writer - fr *http2.Framer -} - -func newFramer(conn net.Conn) *framer { - f := &framer{ - reader: bufio.NewReaderSize(conn, http2IOBufSize), - writer: bufio.NewWriterSize(conn, http2IOBufSize), - } - f.fr = http2.NewFramer(f.writer, f.reader) - f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) - return f -} - -func (f *framer) adjustNumWriters(i int32) int32 { - return atomic.AddInt32(&f.numWriters, i) -} - -// The following writeXXX functions can only be called when the caller gets -// unblocked from writableChan channel (i.e., owns the privilege to write). - -func (f *framer) writeContinuation(forceFlush bool, streamID uint32, endHeaders bool, headerBlockFragment []byte) error { - if err := f.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeData(forceFlush bool, streamID uint32, endStream bool, data []byte) error { - if err := f.fr.WriteData(streamID, endStream, data); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeGoAway(forceFlush bool, maxStreamID uint32, code http2.ErrCode, debugData []byte) error { - if err := f.fr.WriteGoAway(maxStreamID, code, debugData); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeHeaders(forceFlush bool, p http2.HeadersFrameParam) error { - if err := f.fr.WriteHeaders(p); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writePing(forceFlush, ack bool, data [8]byte) error { - if err := f.fr.WritePing(ack, data); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writePriority(forceFlush bool, streamID uint32, p http2.PriorityParam) error { - if err := f.fr.WritePriority(streamID, p); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writePushPromise(forceFlush bool, p http2.PushPromiseParam) error { - if err := f.fr.WritePushPromise(p); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeRSTStream(forceFlush bool, streamID uint32, code http2.ErrCode) error { - if err := f.fr.WriteRSTStream(streamID, code); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeSettings(forceFlush bool, settings ...http2.Setting) error { - if err := f.fr.WriteSettings(settings...); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeSettingsAck(forceFlush bool) error { - if err := f.fr.WriteSettingsAck(); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeWindowUpdate(forceFlush bool, streamID, incr uint32) error { - if err := f.fr.WriteWindowUpdate(streamID, incr); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) flushWrite() error { - return f.writer.Flush() -} - -func (f *framer) readFrame() (http2.Frame, error) { - return f.fr.ReadFrame() -} diff --git a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/transport.go b/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/transport.go deleted file mode 100644 index f027cae5..00000000 --- a/vendor/github.com/docker/distribution/vendor/google.golang.org/grpc/transport/transport.go +++ /dev/null @@ -1,508 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -/* -Package transport defines and implements message oriented communication channel -to complete various transactions (e.g., an RPC). -*/ -package transport - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/trace" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/metadata" -) - -// recvMsg represents the received msg from the transport. All transport -// protocol specific info has been removed. -type recvMsg struct { - data []byte - // nil: received some data - // io.EOF: stream is completed. data is nil. - // other non-nil error: transport failure. data is nil. - err error -} - -func (recvMsg) isItem() bool { - return true -} - -// All items in an out of a recvBuffer should be the same type. -type item interface { - isItem() bool -} - -// recvBuffer is an unbounded channel of item. -type recvBuffer struct { - c chan item - mu sync.Mutex - backlog []item -} - -func newRecvBuffer() *recvBuffer { - b := &recvBuffer{ - c: make(chan item, 1), - } - return b -} - -func (b *recvBuffer) put(r item) { - b.mu.Lock() - defer b.mu.Unlock() - b.backlog = append(b.backlog, r) - select { - case b.c <- b.backlog[0]: - b.backlog = b.backlog[1:] - default: - } -} - -func (b *recvBuffer) load() { - b.mu.Lock() - defer b.mu.Unlock() - if len(b.backlog) > 0 { - select { - case b.c <- b.backlog[0]: - b.backlog = b.backlog[1:] - default: - } - } -} - -// get returns the channel that receives an item in the buffer. -// -// Upon receipt of an item, the caller should call load to send another -// item onto the channel if there is any. -func (b *recvBuffer) get() <-chan item { - return b.c -} - -// recvBufferReader implements io.Reader interface to read the data from -// recvBuffer. -type recvBufferReader struct { - ctx context.Context - recv *recvBuffer - last *bytes.Reader // Stores the remaining data in the previous calls. - err error -} - -// Read reads the next len(p) bytes from last. If last is drained, it tries to -// read additional data from recv. It blocks if there no additional data available -// in recv. If Read returns any non-nil error, it will continue to return that error. -func (r *recvBufferReader) Read(p []byte) (n int, err error) { - if r.err != nil { - return 0, r.err - } - defer func() { r.err = err }() - if r.last != nil && r.last.Len() > 0 { - // Read remaining data left in last call. - return r.last.Read(p) - } - select { - case <-r.ctx.Done(): - return 0, ContextErr(r.ctx.Err()) - case i := <-r.recv.get(): - r.recv.load() - m := i.(*recvMsg) - if m.err != nil { - return 0, m.err - } - r.last = bytes.NewReader(m.data) - return r.last.Read(p) - } -} - -type streamState uint8 - -const ( - streamActive streamState = iota - streamWriteDone // EndStream sent - streamReadDone // EndStream received - streamDone // sendDone and recvDone or RSTStreamFrame is sent or received. -) - -// Stream represents an RPC in the transport layer. -type Stream struct { - id uint32 - // nil for client side Stream. - st ServerTransport - // ctx is the associated context of the stream. - ctx context.Context - cancel context.CancelFunc - // method records the associated RPC method of the stream. - method string - recvCompress string - sendCompress string - buf *recvBuffer - dec io.Reader - fc *inFlow - recvQuota uint32 - // The accumulated inbound quota pending for window update. - updateQuota uint32 - // The handler to control the window update procedure for both this - // particular stream and the associated transport. - windowHandler func(int) - - sendQuotaPool *quotaPool - // Close headerChan to indicate the end of reception of header metadata. - headerChan chan struct{} - // header caches the received header metadata. - header metadata.MD - // The key-value map of trailer metadata. - trailer metadata.MD - - mu sync.RWMutex // guard the following - // headerOK becomes true from the first header is about to send. - headerOk bool - state streamState - // true iff headerChan is closed. Used to avoid closing headerChan - // multiple times. - headerDone bool - // the status received from the server. - statusCode codes.Code - statusDesc string -} - -// RecvCompress returns the compression algorithm applied to the inbound -// message. It is empty string if there is no compression applied. -func (s *Stream) RecvCompress() string { - return s.recvCompress -} - -// SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(str string) { - s.sendCompress = str -} - -// Header acquires the key-value pairs of header metadata once it -// is available. It blocks until i) the metadata is ready or ii) there is no -// header metadata or iii) the stream is cancelled/expired. -func (s *Stream) Header() (metadata.MD, error) { - select { - case <-s.ctx.Done(): - return nil, ContextErr(s.ctx.Err()) - case <-s.headerChan: - return s.header.Copy(), nil - } -} - -// Trailer returns the cached trailer metedata. Note that if it is not called -// after the entire stream is done, it could return an empty MD. Client -// side only. -func (s *Stream) Trailer() metadata.MD { - s.mu.RLock() - defer s.mu.RUnlock() - return s.trailer.Copy() -} - -// ServerTransport returns the underlying ServerTransport for the stream. -// The client side stream always returns nil. -func (s *Stream) ServerTransport() ServerTransport { - return s.st -} - -// Context returns the context of the stream. -func (s *Stream) Context() context.Context { - return s.ctx -} - -// TraceContext recreates the context of s with a trace.Trace. -func (s *Stream) TraceContext(tr trace.Trace) { - s.ctx = trace.NewContext(s.ctx, tr) -} - -// Method returns the method for the stream. -func (s *Stream) Method() string { - return s.method -} - -// StatusCode returns statusCode received from the server. -func (s *Stream) StatusCode() codes.Code { - return s.statusCode -} - -// StatusDesc returns statusDesc received from the server. -func (s *Stream) StatusDesc() string { - return s.statusDesc -} - -// ErrIllegalTrailerSet indicates that the trailer has already been set or it -// is too late to do so. -var ErrIllegalTrailerSet = errors.New("transport: trailer has been set") - -// SetTrailer sets the trailer metadata which will be sent with the RPC status -// by the server. This can only be called at most once. Server side only. -func (s *Stream) SetTrailer(md metadata.MD) error { - s.mu.Lock() - defer s.mu.Unlock() - if s.trailer != nil { - return ErrIllegalTrailerSet - } - s.trailer = md.Copy() - return nil -} - -func (s *Stream) write(m recvMsg) { - s.buf.put(&m) -} - -// Read reads all the data available for this Stream from the transport and -// passes them into the decoder, which converts them into a gRPC message stream. -// The error is io.EOF when the stream is done or another non-nil error if -// the stream broke. -func (s *Stream) Read(p []byte) (n int, err error) { - n, err = s.dec.Read(p) - if err != nil { - return - } - s.windowHandler(n) - return -} - -// The key to save transport.Stream in the context. -type streamKey struct{} - -// newContextWithStream creates a new context from ctx and attaches stream -// to it. -func newContextWithStream(ctx context.Context, stream *Stream) context.Context { - return context.WithValue(ctx, streamKey{}, stream) -} - -// StreamFromContext returns the stream saved in ctx. -func StreamFromContext(ctx context.Context) (s *Stream, ok bool) { - s, ok = ctx.Value(streamKey{}).(*Stream) - return -} - -// state of transport -type transportState int - -const ( - reachable transportState = iota - unreachable - closing -) - -// NewServerTransport creates a ServerTransport with conn or non-nil error -// if it fails. -func NewServerTransport(protocol string, conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (ServerTransport, error) { - return newHTTP2Server(conn, maxStreams, authInfo) -} - -// ConnectOptions covers all relevant options for dialing a server. -type ConnectOptions struct { - // UserAgent is the application user agent. - UserAgent string - // Dialer specifies how to dial a network address. - Dialer func(string, time.Duration) (net.Conn, error) - // AuthOptions stores the credentials required to setup a client connection and/or issue RPCs. - AuthOptions []credentials.Credentials - // Timeout specifies the timeout for dialing a client connection. - Timeout time.Duration -} - -// NewClientTransport establishes the transport with the required ConnectOptions -// and returns it to the caller. -func NewClientTransport(target string, opts *ConnectOptions) (ClientTransport, error) { - return newHTTP2Client(target, opts) -} - -// Options provides additional hints and information for message -// transmission. -type Options struct { - // Last indicates whether this write is the last piece for - // this stream. - Last bool - - // Delay is a hint to the transport implementation for whether - // the data could be buffered for a batching write. The - // Transport implementation may ignore the hint. - Delay bool -} - -// CallHdr carries the information of a particular RPC. -type CallHdr struct { - // Host specifies the peer's host. - Host string - - // Method specifies the operation to perform. - Method string - - // RecvCompress specifies the compression algorithm applied on - // inbound messages. - RecvCompress string - - // SendCompress specifies the compression algorithm applied on - // outbound message. - SendCompress string - - // Flush indicates whether a new stream command should be sent - // to the peer without waiting for the first data. This is - // only a hint. The transport may modify the flush decision - // for performance purposes. - Flush bool -} - -// ClientTransport is the common interface for all gRPC client-side transport -// implementations. -type ClientTransport interface { - // Close tears down this transport. Once it returns, the transport - // should not be accessed any more. The caller must make sure this - // is called only once. - Close() error - - // Write sends the data for the given stream. A nil stream indicates - // the write is to be performed on the transport as a whole. - Write(s *Stream, data []byte, opts *Options) error - - // NewStream creates a Stream for an RPC. - NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) - - // CloseStream clears the footprint of a stream when the stream is - // not needed any more. The err indicates the error incurred when - // CloseStream is called. Must be called when a stream is finished - // unless the associated transport is closing. - CloseStream(stream *Stream, err error) - - // Error returns a channel that is closed when some I/O error - // happens. Typically the caller should have a goroutine to monitor - // this in order to take action (e.g., close the current transport - // and create a new one) in error case. It should not return nil - // once the transport is initiated. - Error() <-chan struct{} -} - -// ServerTransport is the common interface for all gRPC server-side transport -// implementations. -// -// Methods may be called concurrently from multiple goroutines, but -// Write methods for a given Stream will be called serially. -type ServerTransport interface { - // HandleStreams receives incoming streams using the given handler. - HandleStreams(func(*Stream)) - - // WriteHeader sends the header metadata for the given stream. - // WriteHeader may not be called on all streams. - WriteHeader(s *Stream, md metadata.MD) error - - // Write sends the data for the given stream. - // Write may not be called on all streams. - Write(s *Stream, data []byte, opts *Options) error - - // WriteStatus sends the status of a stream to the client. - // WriteStatus is the final call made on a stream and always - // occurs. - WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error - - // Close tears down the transport. Once it is called, the transport - // should not be accessed any more. All the pending streams and their - // handlers will be terminated asynchronously. - Close() error - - // RemoteAddr returns the remote network address. - RemoteAddr() net.Addr -} - -// StreamErrorf creates an StreamError with the specified error code and description. -func StreamErrorf(c codes.Code, format string, a ...interface{}) StreamError { - return StreamError{ - Code: c, - Desc: fmt.Sprintf(format, a...), - } -} - -// ConnectionErrorf creates an ConnectionError with the specified error description. -func ConnectionErrorf(format string, a ...interface{}) ConnectionError { - return ConnectionError{ - Desc: fmt.Sprintf(format, a...), - } -} - -// ConnectionError is an error that results in the termination of the -// entire connection and the retry of all the active streams. -type ConnectionError struct { - Desc string -} - -func (e ConnectionError) Error() string { - return fmt.Sprintf("connection error: desc = %q", e.Desc) -} - -// Define some common ConnectionErrors. -var ErrConnClosing = ConnectionError{Desc: "transport is closing"} - -// StreamError is an error that only affects one stream within a connection. -type StreamError struct { - Code codes.Code - Desc string -} - -func (e StreamError) Error() string { - return fmt.Sprintf("stream error: code = %d desc = %q", e.Code, e.Desc) -} - -// ContextErr converts the error from context package into a StreamError. -func ContextErr(err error) StreamError { - switch err { - case context.DeadlineExceeded: - return StreamErrorf(codes.DeadlineExceeded, "%v", err) - case context.Canceled: - return StreamErrorf(codes.Canceled, "%v", err) - } - panic(fmt.Sprintf("Unexpected error from context packet: %v", err)) -} - -// wait blocks until it can receive from ctx.Done, closing, or proceed. -// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err. -// If it receives from closing, it returns 0, ErrConnClosing. -// If it receives from proceed, it returns the received integer, nil. -func wait(ctx context.Context, closing <-chan struct{}, proceed <-chan int) (int, error) { - select { - case <-ctx.Done(): - return 0, ContextErr(ctx.Err()) - case <-closing: - return 0, ErrConnClosing - case i := <-proceed: - return i, nil - } -} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/.gitignore b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/.gitignore deleted file mode 100644 index 191a5360..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -_* -*.swp -*.[568] -[568].out diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/LICENSE b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/LICENSE deleted file mode 100644 index 545cf2d3..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Gocheck - A rich testing framework for Go - -Copyright (c) 2010-2013 Gustavo Niemeyer - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/README.md b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/README.md deleted file mode 100644 index 0ca9e572..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/README.md +++ /dev/null @@ -1,20 +0,0 @@ -Instructions -============ - -Install the package with: - - go get gopkg.in/check.v1 - -Import it with: - - import "gopkg.in/check.v1" - -and use _check_ as the package name inside the code. - -For more details, visit the project page: - -* http://labix.org/gocheck - -and the API documentation: - -* https://gopkg.in/check.v1 diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/TODO b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/TODO deleted file mode 100644 index 33498270..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/TODO +++ /dev/null @@ -1,2 +0,0 @@ -- Assert(slice, Contains, item) -- Parallel test support diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/benchmark.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/benchmark.go deleted file mode 100644 index 48cb8c81..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/benchmark.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package check - -import ( - "fmt" - "runtime" - "time" -) - -var memStats runtime.MemStats - -// testingB is a type passed to Benchmark functions to manage benchmark -// timing and to specify the number of iterations to run. -type timer struct { - start time.Time // Time test or benchmark started - duration time.Duration - N int - bytes int64 - timerOn bool - benchTime time.Duration - // The initial states of memStats.Mallocs and memStats.TotalAlloc. - startAllocs uint64 - startBytes uint64 - // The net total of this test after being run. - netAllocs uint64 - netBytes uint64 -} - -// StartTimer starts timing a test. This function is called automatically -// before a benchmark starts, but it can also used to resume timing after -// a call to StopTimer. -func (c *C) StartTimer() { - if !c.timerOn { - c.start = time.Now() - c.timerOn = true - - runtime.ReadMemStats(&memStats) - c.startAllocs = memStats.Mallocs - c.startBytes = memStats.TotalAlloc - } -} - -// StopTimer stops timing a test. This can be used to pause the timer -// while performing complex initialization that you don't -// want to measure. -func (c *C) StopTimer() { - if c.timerOn { - c.duration += time.Now().Sub(c.start) - c.timerOn = false - runtime.ReadMemStats(&memStats) - c.netAllocs += memStats.Mallocs - c.startAllocs - c.netBytes += memStats.TotalAlloc - c.startBytes - } -} - -// ResetTimer sets the elapsed benchmark time to zero. -// It does not affect whether the timer is running. -func (c *C) ResetTimer() { - if c.timerOn { - c.start = time.Now() - runtime.ReadMemStats(&memStats) - c.startAllocs = memStats.Mallocs - c.startBytes = memStats.TotalAlloc - } - c.duration = 0 - c.netAllocs = 0 - c.netBytes = 0 -} - -// SetBytes informs the number of bytes that the benchmark processes -// on each iteration. If this is called in a benchmark it will also -// report MB/s. -func (c *C) SetBytes(n int64) { - c.bytes = n -} - -func (c *C) nsPerOp() int64 { - if c.N <= 0 { - return 0 - } - return c.duration.Nanoseconds() / int64(c.N) -} - -func (c *C) mbPerSec() float64 { - if c.bytes <= 0 || c.duration <= 0 || c.N <= 0 { - return 0 - } - return (float64(c.bytes) * float64(c.N) / 1e6) / c.duration.Seconds() -} - -func (c *C) timerString() string { - if c.N <= 0 { - return fmt.Sprintf("%3.3fs", float64(c.duration.Nanoseconds())/1e9) - } - mbs := c.mbPerSec() - mb := "" - if mbs != 0 { - mb = fmt.Sprintf("\t%7.2f MB/s", mbs) - } - nsop := c.nsPerOp() - ns := fmt.Sprintf("%10d ns/op", nsop) - if c.N > 0 && nsop < 100 { - // The format specifiers here make sure that - // the ones digits line up for all three possible formats. - if nsop < 10 { - ns = fmt.Sprintf("%13.2f ns/op", float64(c.duration.Nanoseconds())/float64(c.N)) - } else { - ns = fmt.Sprintf("%12.1f ns/op", float64(c.duration.Nanoseconds())/float64(c.N)) - } - } - memStats := "" - if c.benchMem { - allocedBytes := fmt.Sprintf("%8d B/op", int64(c.netBytes)/int64(c.N)) - allocs := fmt.Sprintf("%8d allocs/op", int64(c.netAllocs)/int64(c.N)) - memStats = fmt.Sprintf("\t%s\t%s", allocedBytes, allocs) - } - return fmt.Sprintf("%8d\t%s%s%s", c.N, ns, mb, memStats) -} - -func min(x, y int) int { - if x > y { - return y - } - return x -} - -func max(x, y int) int { - if x < y { - return y - } - return x -} - -// roundDown10 rounds a number down to the nearest power of 10. -func roundDown10(n int) int { - var tens = 0 - // tens = floor(log_10(n)) - for n > 10 { - n = n / 10 - tens++ - } - // result = 10^tens - result := 1 - for i := 0; i < tens; i++ { - result *= 10 - } - return result -} - -// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX]. -func roundUp(n int) int { - base := roundDown10(n) - if n < (2 * base) { - return 2 * base - } - if n < (5 * base) { - return 5 * base - } - return 10 * base -} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/check.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/check.go deleted file mode 100644 index ca8c0f92..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/check.go +++ /dev/null @@ -1,945 +0,0 @@ -// Package check is a rich testing extension for Go's testing package. -// -// For details about the project, see: -// -// http://labix.org/gocheck -// -package check - -import ( - "bytes" - "errors" - "fmt" - "io" - "math/rand" - "os" - "path" - "path/filepath" - "reflect" - "regexp" - "runtime" - "strconv" - "strings" - "sync" - "time" -) - -// ----------------------------------------------------------------------- -// Internal type which deals with suite method calling. - -const ( - fixtureKd = iota - testKd -) - -type funcKind int - -const ( - succeededSt = iota - failedSt - skippedSt - panickedSt - fixturePanickedSt - missedSt -) - -type funcStatus int - -// A method value can't reach its own Method structure. -type methodType struct { - reflect.Value - Info reflect.Method -} - -func newMethod(receiver reflect.Value, i int) *methodType { - return &methodType{receiver.Method(i), receiver.Type().Method(i)} -} - -func (method *methodType) PC() uintptr { - return method.Info.Func.Pointer() -} - -func (method *methodType) suiteName() string { - t := method.Info.Type.In(0) - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - return t.Name() -} - -func (method *methodType) String() string { - return method.suiteName() + "." + method.Info.Name -} - -func (method *methodType) matches(re *regexp.Regexp) bool { - return (re.MatchString(method.Info.Name) || - re.MatchString(method.suiteName()) || - re.MatchString(method.String())) -} - -type C struct { - method *methodType - kind funcKind - testName string - status funcStatus - logb *logger - logw io.Writer - done chan *C - reason string - mustFail bool - tempDir *tempDir - benchMem bool - startTime time.Time - timer -} - -func (c *C) stopNow() { - runtime.Goexit() -} - -// logger is a concurrency safe byte.Buffer -type logger struct { - sync.Mutex - writer bytes.Buffer -} - -func (l *logger) Write(buf []byte) (int, error) { - l.Lock() - defer l.Unlock() - return l.writer.Write(buf) -} - -func (l *logger) WriteTo(w io.Writer) (int64, error) { - l.Lock() - defer l.Unlock() - return l.writer.WriteTo(w) -} - -func (l *logger) String() string { - l.Lock() - defer l.Unlock() - return l.writer.String() -} - -// ----------------------------------------------------------------------- -// Handling of temporary files and directories. - -type tempDir struct { - sync.Mutex - path string - counter int -} - -func (td *tempDir) newPath() string { - td.Lock() - defer td.Unlock() - if td.path == "" { - var err error - for i := 0; i != 100; i++ { - path := fmt.Sprintf("%s%ccheck-%d", os.TempDir(), os.PathSeparator, rand.Int()) - if err = os.Mkdir(path, 0700); err == nil { - td.path = path - break - } - } - if td.path == "" { - panic("Couldn't create temporary directory: " + err.Error()) - } - } - result := filepath.Join(td.path, strconv.Itoa(td.counter)) - td.counter += 1 - return result -} - -func (td *tempDir) removeAll() { - td.Lock() - defer td.Unlock() - if td.path != "" { - err := os.RemoveAll(td.path) - if err != nil { - fmt.Fprintf(os.Stderr, "WARNING: Error cleaning up temporaries: "+err.Error()) - } - } -} - -// Create a new temporary directory which is automatically removed after -// the suite finishes running. -func (c *C) MkDir() string { - path := c.tempDir.newPath() - if err := os.Mkdir(path, 0700); err != nil { - panic(fmt.Sprintf("Couldn't create temporary directory %s: %s", path, err.Error())) - } - return path -} - -// ----------------------------------------------------------------------- -// Low-level logging functions. - -func (c *C) log(args ...interface{}) { - c.writeLog([]byte(fmt.Sprint(args...) + "\n")) -} - -func (c *C) logf(format string, args ...interface{}) { - c.writeLog([]byte(fmt.Sprintf(format+"\n", args...))) -} - -func (c *C) logNewLine() { - c.writeLog([]byte{'\n'}) -} - -func (c *C) writeLog(buf []byte) { - c.logb.Write(buf) - if c.logw != nil { - c.logw.Write(buf) - } -} - -func hasStringOrError(x interface{}) (ok bool) { - _, ok = x.(fmt.Stringer) - if ok { - return - } - _, ok = x.(error) - return -} - -func (c *C) logValue(label string, value interface{}) { - if label == "" { - if hasStringOrError(value) { - c.logf("... %#v (%q)", value, value) - } else { - c.logf("... %#v", value) - } - } else if value == nil { - c.logf("... %s = nil", label) - } else { - if hasStringOrError(value) { - fv := fmt.Sprintf("%#v", value) - qv := fmt.Sprintf("%q", value) - if fv != qv { - c.logf("... %s %s = %s (%s)", label, reflect.TypeOf(value), fv, qv) - return - } - } - if s, ok := value.(string); ok && isMultiLine(s) { - c.logf(`... %s %s = "" +`, label, reflect.TypeOf(value)) - c.logMultiLine(s) - } else { - c.logf("... %s %s = %#v", label, reflect.TypeOf(value), value) - } - } -} - -func (c *C) logMultiLine(s string) { - b := make([]byte, 0, len(s)*2) - i := 0 - n := len(s) - for i < n { - j := i + 1 - for j < n && s[j-1] != '\n' { - j++ - } - b = append(b, "... "...) - b = strconv.AppendQuote(b, s[i:j]) - if j < n { - b = append(b, " +"...) - } - b = append(b, '\n') - i = j - } - c.writeLog(b) -} - -func isMultiLine(s string) bool { - for i := 0; i+1 < len(s); i++ { - if s[i] == '\n' { - return true - } - } - return false -} - -func (c *C) logString(issue string) { - c.log("... ", issue) -} - -func (c *C) logCaller(skip int) { - // This is a bit heavier than it ought to be. - skip += 1 // Our own frame. - pc, callerFile, callerLine, ok := runtime.Caller(skip) - if !ok { - return - } - var testFile string - var testLine int - testFunc := runtime.FuncForPC(c.method.PC()) - if runtime.FuncForPC(pc) != testFunc { - for { - skip += 1 - if pc, file, line, ok := runtime.Caller(skip); ok { - // Note that the test line may be different on - // distinct calls for the same test. Showing - // the "internal" line is helpful when debugging. - if runtime.FuncForPC(pc) == testFunc { - testFile, testLine = file, line - break - } - } else { - break - } - } - } - if testFile != "" && (testFile != callerFile || testLine != callerLine) { - c.logCode(testFile, testLine) - } - c.logCode(callerFile, callerLine) -} - -func (c *C) logCode(path string, line int) { - c.logf("%s:%d:", nicePath(path), line) - code, err := printLine(path, line) - if code == "" { - code = "..." // XXX Open the file and take the raw line. - if err != nil { - code += err.Error() - } - } - c.log(indent(code, " ")) -} - -var valueGo = filepath.Join("reflect", "value.go") -var asmGo = filepath.Join("runtime", "asm_") - -func (c *C) logPanic(skip int, value interface{}) { - skip++ // Our own frame. - initialSkip := skip - for ; ; skip++ { - if pc, file, line, ok := runtime.Caller(skip); ok { - if skip == initialSkip { - c.logf("... Panic: %s (PC=0x%X)\n", value, pc) - } - name := niceFuncName(pc) - path := nicePath(file) - if strings.Contains(path, "/gopkg.in/check.v") { - continue - } - if name == "Value.call" && strings.HasSuffix(path, valueGo) { - continue - } - if name == "call16" && strings.Contains(path, asmGo) { - continue - } - c.logf("%s:%d\n in %s", nicePath(file), line, name) - } else { - break - } - } -} - -func (c *C) logSoftPanic(issue string) { - c.log("... Panic: ", issue) -} - -func (c *C) logArgPanic(method *methodType, expectedType string) { - c.logf("... Panic: %s argument should be %s", - niceFuncName(method.PC()), expectedType) -} - -// ----------------------------------------------------------------------- -// Some simple formatting helpers. - -var initWD, initWDErr = os.Getwd() - -func init() { - if initWDErr == nil { - initWD = strings.Replace(initWD, "\\", "/", -1) + "/" - } -} - -func nicePath(path string) string { - if initWDErr == nil { - if strings.HasPrefix(path, initWD) { - return path[len(initWD):] - } - } - return path -} - -func niceFuncPath(pc uintptr) string { - function := runtime.FuncForPC(pc) - if function != nil { - filename, line := function.FileLine(pc) - return fmt.Sprintf("%s:%d", nicePath(filename), line) - } - return "" -} - -func niceFuncName(pc uintptr) string { - function := runtime.FuncForPC(pc) - if function != nil { - name := path.Base(function.Name()) - if i := strings.Index(name, "."); i > 0 { - name = name[i+1:] - } - if strings.HasPrefix(name, "(*") { - if i := strings.Index(name, ")"); i > 0 { - name = name[2:i] + name[i+1:] - } - } - if i := strings.LastIndex(name, ".*"); i != -1 { - name = name[:i] + "." + name[i+2:] - } - if i := strings.LastIndex(name, "·"); i != -1 { - name = name[:i] + "." + name[i+2:] - } - return name - } - return "" -} - -// ----------------------------------------------------------------------- -// Result tracker to aggregate call results. - -type Result struct { - Succeeded int - Failed int - Skipped int - Panicked int - FixturePanicked int - ExpectedFailures int - Missed int // Not even tried to run, related to a panic in the fixture. - RunError error // Houston, we've got a problem. - WorkDir string // If KeepWorkDir is true -} - -type resultTracker struct { - result Result - _lastWasProblem bool - _waiting int - _missed int - _expectChan chan *C - _doneChan chan *C - _stopChan chan bool -} - -func newResultTracker() *resultTracker { - return &resultTracker{_expectChan: make(chan *C), // Synchronous - _doneChan: make(chan *C, 32), // Asynchronous - _stopChan: make(chan bool)} // Synchronous -} - -func (tracker *resultTracker) start() { - go tracker._loopRoutine() -} - -func (tracker *resultTracker) waitAndStop() { - <-tracker._stopChan -} - -func (tracker *resultTracker) expectCall(c *C) { - tracker._expectChan <- c -} - -func (tracker *resultTracker) callDone(c *C) { - tracker._doneChan <- c -} - -func (tracker *resultTracker) _loopRoutine() { - for { - var c *C - if tracker._waiting > 0 { - // Calls still running. Can't stop. - select { - // XXX Reindent this (not now to make diff clear) - case c = <-tracker._expectChan: - tracker._waiting += 1 - case c = <-tracker._doneChan: - tracker._waiting -= 1 - switch c.status { - case succeededSt: - if c.kind == testKd { - if c.mustFail { - tracker.result.ExpectedFailures++ - } else { - tracker.result.Succeeded++ - } - } - case failedSt: - tracker.result.Failed++ - case panickedSt: - if c.kind == fixtureKd { - tracker.result.FixturePanicked++ - } else { - tracker.result.Panicked++ - } - case fixturePanickedSt: - // Track it as missed, since the panic - // was on the fixture, not on the test. - tracker.result.Missed++ - case missedSt: - tracker.result.Missed++ - case skippedSt: - if c.kind == testKd { - tracker.result.Skipped++ - } - } - } - } else { - // No calls. Can stop, but no done calls here. - select { - case tracker._stopChan <- true: - return - case c = <-tracker._expectChan: - tracker._waiting += 1 - case c = <-tracker._doneChan: - panic("Tracker got an unexpected done call.") - } - } - } -} - -// ----------------------------------------------------------------------- -// The underlying suite runner. - -type suiteRunner struct { - suite interface{} - setUpSuite, tearDownSuite *methodType - setUpTest, tearDownTest *methodType - tests []*methodType - tracker *resultTracker - tempDir *tempDir - keepDir bool - output *outputWriter - reportedProblemLast bool - benchTime time.Duration - benchMem bool -} - -type RunConf struct { - Output io.Writer - Stream bool - Verbose bool - Filter string - Benchmark bool - BenchmarkTime time.Duration // Defaults to 1 second - BenchmarkMem bool - KeepWorkDir bool -} - -// Create a new suiteRunner able to run all methods in the given suite. -func newSuiteRunner(suite interface{}, runConf *RunConf) *suiteRunner { - var conf RunConf - if runConf != nil { - conf = *runConf - } - if conf.Output == nil { - conf.Output = os.Stdout - } - if conf.Benchmark { - conf.Verbose = true - } - - suiteType := reflect.TypeOf(suite) - suiteNumMethods := suiteType.NumMethod() - suiteValue := reflect.ValueOf(suite) - - runner := &suiteRunner{ - suite: suite, - output: newOutputWriter(conf.Output, conf.Stream, conf.Verbose), - tracker: newResultTracker(), - benchTime: conf.BenchmarkTime, - benchMem: conf.BenchmarkMem, - tempDir: &tempDir{}, - keepDir: conf.KeepWorkDir, - tests: make([]*methodType, 0, suiteNumMethods), - } - if runner.benchTime == 0 { - runner.benchTime = 1 * time.Second - } - - var filterRegexp *regexp.Regexp - if conf.Filter != "" { - if regexp, err := regexp.Compile(conf.Filter); err != nil { - msg := "Bad filter expression: " + err.Error() - runner.tracker.result.RunError = errors.New(msg) - return runner - } else { - filterRegexp = regexp - } - } - - for i := 0; i != suiteNumMethods; i++ { - method := newMethod(suiteValue, i) - switch method.Info.Name { - case "SetUpSuite": - runner.setUpSuite = method - case "TearDownSuite": - runner.tearDownSuite = method - case "SetUpTest": - runner.setUpTest = method - case "TearDownTest": - runner.tearDownTest = method - default: - prefix := "Test" - if conf.Benchmark { - prefix = "Benchmark" - } - if !strings.HasPrefix(method.Info.Name, prefix) { - continue - } - if filterRegexp == nil || method.matches(filterRegexp) { - runner.tests = append(runner.tests, method) - } - } - } - return runner -} - -// Run all methods in the given suite. -func (runner *suiteRunner) run() *Result { - if runner.tracker.result.RunError == nil && len(runner.tests) > 0 { - runner.tracker.start() - if runner.checkFixtureArgs() { - c := runner.runFixture(runner.setUpSuite, "", nil) - if c == nil || c.status == succeededSt { - for i := 0; i != len(runner.tests); i++ { - c := runner.runTest(runner.tests[i]) - if c.status == fixturePanickedSt { - runner.skipTests(missedSt, runner.tests[i+1:]) - break - } - } - } else if c != nil && c.status == skippedSt { - runner.skipTests(skippedSt, runner.tests) - } else { - runner.skipTests(missedSt, runner.tests) - } - runner.runFixture(runner.tearDownSuite, "", nil) - } else { - runner.skipTests(missedSt, runner.tests) - } - runner.tracker.waitAndStop() - if runner.keepDir { - runner.tracker.result.WorkDir = runner.tempDir.path - } else { - runner.tempDir.removeAll() - } - } - return &runner.tracker.result -} - -// Create a call object with the given suite method, and fork a -// goroutine with the provided dispatcher for running it. -func (runner *suiteRunner) forkCall(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C { - var logw io.Writer - if runner.output.Stream { - logw = runner.output - } - if logb == nil { - logb = new(logger) - } - c := &C{ - method: method, - kind: kind, - testName: testName, - logb: logb, - logw: logw, - tempDir: runner.tempDir, - done: make(chan *C, 1), - timer: timer{benchTime: runner.benchTime}, - startTime: time.Now(), - benchMem: runner.benchMem, - } - runner.tracker.expectCall(c) - go (func() { - runner.reportCallStarted(c) - defer runner.callDone(c) - dispatcher(c) - })() - return c -} - -// Same as forkCall(), but wait for call to finish before returning. -func (runner *suiteRunner) runFunc(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C { - c := runner.forkCall(method, kind, testName, logb, dispatcher) - <-c.done - return c -} - -// Handle a finished call. If there were any panics, update the call status -// accordingly. Then, mark the call as done and report to the tracker. -func (runner *suiteRunner) callDone(c *C) { - value := recover() - if value != nil { - switch v := value.(type) { - case *fixturePanic: - if v.status == skippedSt { - c.status = skippedSt - } else { - c.logSoftPanic("Fixture has panicked (see related PANIC)") - c.status = fixturePanickedSt - } - default: - c.logPanic(1, value) - c.status = panickedSt - } - } - if c.mustFail { - switch c.status { - case failedSt: - c.status = succeededSt - case succeededSt: - c.status = failedSt - c.logString("Error: Test succeeded, but was expected to fail") - c.logString("Reason: " + c.reason) - } - } - - runner.reportCallDone(c) - c.done <- c -} - -// Runs a fixture call synchronously. The fixture will still be run in a -// goroutine like all suite methods, but this method will not return -// while the fixture goroutine is not done, because the fixture must be -// run in a desired order. -func (runner *suiteRunner) runFixture(method *methodType, testName string, logb *logger) *C { - if method != nil { - c := runner.runFunc(method, fixtureKd, testName, logb, func(c *C) { - c.ResetTimer() - c.StartTimer() - defer c.StopTimer() - c.method.Call([]reflect.Value{reflect.ValueOf(c)}) - }) - return c - } - return nil -} - -// Run the fixture method with runFixture(), but panic with a fixturePanic{} -// in case the fixture method panics. This makes it easier to track the -// fixture panic together with other call panics within forkTest(). -func (runner *suiteRunner) runFixtureWithPanic(method *methodType, testName string, logb *logger, skipped *bool) *C { - if skipped != nil && *skipped { - return nil - } - c := runner.runFixture(method, testName, logb) - if c != nil && c.status != succeededSt { - if skipped != nil { - *skipped = c.status == skippedSt - } - panic(&fixturePanic{c.status, method}) - } - return c -} - -type fixturePanic struct { - status funcStatus - method *methodType -} - -// Run the suite test method, together with the test-specific fixture, -// asynchronously. -func (runner *suiteRunner) forkTest(method *methodType) *C { - testName := method.String() - return runner.forkCall(method, testKd, testName, nil, func(c *C) { - var skipped bool - defer runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, &skipped) - defer c.StopTimer() - benchN := 1 - for { - runner.runFixtureWithPanic(runner.setUpTest, testName, c.logb, &skipped) - mt := c.method.Type() - if mt.NumIn() != 1 || mt.In(0) != reflect.TypeOf(c) { - // Rather than a plain panic, provide a more helpful message when - // the argument type is incorrect. - c.status = panickedSt - c.logArgPanic(c.method, "*check.C") - return - } - if strings.HasPrefix(c.method.Info.Name, "Test") { - c.ResetTimer() - c.StartTimer() - c.method.Call([]reflect.Value{reflect.ValueOf(c)}) - return - } - if !strings.HasPrefix(c.method.Info.Name, "Benchmark") { - panic("unexpected method prefix: " + c.method.Info.Name) - } - - runtime.GC() - c.N = benchN - c.ResetTimer() - c.StartTimer() - c.method.Call([]reflect.Value{reflect.ValueOf(c)}) - c.StopTimer() - if c.status != succeededSt || c.duration >= c.benchTime || benchN >= 1e9 { - return - } - perOpN := int(1e9) - if c.nsPerOp() != 0 { - perOpN = int(c.benchTime.Nanoseconds() / c.nsPerOp()) - } - - // Logic taken from the stock testing package: - // - Run more iterations than we think we'll need for a second (1.5x). - // - Don't grow too fast in case we had timing errors previously. - // - Be sure to run at least one more than last time. - benchN = max(min(perOpN+perOpN/2, 100*benchN), benchN+1) - benchN = roundUp(benchN) - - skipped = true // Don't run the deferred one if this panics. - runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, nil) - skipped = false - } - }) -} - -// Same as forkTest(), but wait for the test to finish before returning. -func (runner *suiteRunner) runTest(method *methodType) *C { - c := runner.forkTest(method) - <-c.done - return c -} - -// Helper to mark tests as skipped or missed. A bit heavy for what -// it does, but it enables homogeneous handling of tracking, including -// nice verbose output. -func (runner *suiteRunner) skipTests(status funcStatus, methods []*methodType) { - for _, method := range methods { - runner.runFunc(method, testKd, "", nil, func(c *C) { - c.status = status - }) - } -} - -// Verify if the fixture arguments are *check.C. In case of errors, -// log the error as a panic in the fixture method call, and return false. -func (runner *suiteRunner) checkFixtureArgs() bool { - succeeded := true - argType := reflect.TypeOf(&C{}) - for _, method := range []*methodType{runner.setUpSuite, runner.tearDownSuite, runner.setUpTest, runner.tearDownTest} { - if method != nil { - mt := method.Type() - if mt.NumIn() != 1 || mt.In(0) != argType { - succeeded = false - runner.runFunc(method, fixtureKd, "", nil, func(c *C) { - c.logArgPanic(method, "*check.C") - c.status = panickedSt - }) - } - } - } - return succeeded -} - -func (runner *suiteRunner) reportCallStarted(c *C) { - runner.output.WriteCallStarted("START", c) -} - -func (runner *suiteRunner) reportCallDone(c *C) { - runner.tracker.callDone(c) - switch c.status { - case succeededSt: - if c.mustFail { - runner.output.WriteCallSuccess("FAIL EXPECTED", c) - } else { - runner.output.WriteCallSuccess("PASS", c) - } - case skippedSt: - runner.output.WriteCallSuccess("SKIP", c) - case failedSt: - runner.output.WriteCallProblem("FAIL", c) - case panickedSt: - runner.output.WriteCallProblem("PANIC", c) - case fixturePanickedSt: - // That's a testKd call reporting that its fixture - // has panicked. The fixture call which caused the - // panic itself was tracked above. We'll report to - // aid debugging. - runner.output.WriteCallProblem("PANIC", c) - case missedSt: - runner.output.WriteCallSuccess("MISS", c) - } -} - -// ----------------------------------------------------------------------- -// Output writer manages atomic output writing according to settings. - -type outputWriter struct { - m sync.Mutex - writer io.Writer - wroteCallProblemLast bool - Stream bool - Verbose bool -} - -func newOutputWriter(writer io.Writer, stream, verbose bool) *outputWriter { - return &outputWriter{writer: writer, Stream: stream, Verbose: verbose} -} - -func (ow *outputWriter) Write(content []byte) (n int, err error) { - ow.m.Lock() - n, err = ow.writer.Write(content) - ow.m.Unlock() - return -} - -func (ow *outputWriter) WriteCallStarted(label string, c *C) { - if ow.Stream { - header := renderCallHeader(label, c, "", "\n") - ow.m.Lock() - ow.writer.Write([]byte(header)) - ow.m.Unlock() - } -} - -func (ow *outputWriter) WriteCallProblem(label string, c *C) { - var prefix string - if !ow.Stream { - prefix = "\n-----------------------------------" + - "-----------------------------------\n" - } - header := renderCallHeader(label, c, prefix, "\n\n") - ow.m.Lock() - ow.wroteCallProblemLast = true - ow.writer.Write([]byte(header)) - if !ow.Stream { - c.logb.WriteTo(ow.writer) - } - ow.m.Unlock() -} - -func (ow *outputWriter) WriteCallSuccess(label string, c *C) { - if ow.Stream || (ow.Verbose && c.kind == testKd) { - // TODO Use a buffer here. - var suffix string - if c.reason != "" { - suffix = " (" + c.reason + ")" - } - if c.status == succeededSt { - suffix += "\t" + c.timerString() - } - suffix += "\n" - if ow.Stream { - suffix += "\n" - } - header := renderCallHeader(label, c, "", suffix) - ow.m.Lock() - // Resist temptation of using line as prefix above due to race. - if !ow.Stream && ow.wroteCallProblemLast { - header = "\n-----------------------------------" + - "-----------------------------------\n" + - header - } - ow.wroteCallProblemLast = false - ow.writer.Write([]byte(header)) - ow.m.Unlock() - } -} - -func renderCallHeader(label string, c *C, prefix, suffix string) string { - pc := c.method.PC() - return fmt.Sprintf("%s%s: %s: %s%s", prefix, label, niceFuncPath(pc), - niceFuncName(pc), suffix) -} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/checkers.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/checkers.go deleted file mode 100644 index bac33872..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/checkers.go +++ /dev/null @@ -1,458 +0,0 @@ -package check - -import ( - "fmt" - "reflect" - "regexp" -) - -// ----------------------------------------------------------------------- -// CommentInterface and Commentf helper, to attach extra information to checks. - -type comment struct { - format string - args []interface{} -} - -// Commentf returns an infomational value to use with Assert or Check calls. -// If the checker test fails, the provided arguments will be passed to -// fmt.Sprintf, and will be presented next to the logged failure. -// -// For example: -// -// c.Assert(v, Equals, 42, Commentf("Iteration #%d failed.", i)) -// -// Note that if the comment is constant, a better option is to -// simply use a normal comment right above or next to the line, as -// it will also get printed with any errors: -// -// c.Assert(l, Equals, 8192) // Ensure buffer size is correct (bug #123) -// -func Commentf(format string, args ...interface{}) CommentInterface { - return &comment{format, args} -} - -// CommentInterface must be implemented by types that attach extra -// information to failed checks. See the Commentf function for details. -type CommentInterface interface { - CheckCommentString() string -} - -func (c *comment) CheckCommentString() string { - return fmt.Sprintf(c.format, c.args...) -} - -// ----------------------------------------------------------------------- -// The Checker interface. - -// The Checker interface must be provided by checkers used with -// the Assert and Check verification methods. -type Checker interface { - Info() *CheckerInfo - Check(params []interface{}, names []string) (result bool, error string) -} - -// See the Checker interface. -type CheckerInfo struct { - Name string - Params []string -} - -func (info *CheckerInfo) Info() *CheckerInfo { - return info -} - -// ----------------------------------------------------------------------- -// Not checker logic inverter. - -// The Not checker inverts the logic of the provided checker. The -// resulting checker will succeed where the original one failed, and -// vice-versa. -// -// For example: -// -// c.Assert(a, Not(Equals), b) -// -func Not(checker Checker) Checker { - return ¬Checker{checker} -} - -type notChecker struct { - sub Checker -} - -func (checker *notChecker) Info() *CheckerInfo { - info := *checker.sub.Info() - info.Name = "Not(" + info.Name + ")" - return &info -} - -func (checker *notChecker) Check(params []interface{}, names []string) (result bool, error string) { - result, error = checker.sub.Check(params, names) - result = !result - return -} - -// ----------------------------------------------------------------------- -// IsNil checker. - -type isNilChecker struct { - *CheckerInfo -} - -// The IsNil checker tests whether the obtained value is nil. -// -// For example: -// -// c.Assert(err, IsNil) -// -var IsNil Checker = &isNilChecker{ - &CheckerInfo{Name: "IsNil", Params: []string{"value"}}, -} - -func (checker *isNilChecker) Check(params []interface{}, names []string) (result bool, error string) { - return isNil(params[0]), "" -} - -func isNil(obtained interface{}) (result bool) { - if obtained == nil { - result = true - } else { - switch v := reflect.ValueOf(obtained); v.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - } - return -} - -// ----------------------------------------------------------------------- -// NotNil checker. Alias for Not(IsNil), since it's so common. - -type notNilChecker struct { - *CheckerInfo -} - -// The NotNil checker verifies that the obtained value is not nil. -// -// For example: -// -// c.Assert(iface, NotNil) -// -// This is an alias for Not(IsNil), made available since it's a -// fairly common check. -// -var NotNil Checker = ¬NilChecker{ - &CheckerInfo{Name: "NotNil", Params: []string{"value"}}, -} - -func (checker *notNilChecker) Check(params []interface{}, names []string) (result bool, error string) { - return !isNil(params[0]), "" -} - -// ----------------------------------------------------------------------- -// Equals checker. - -type equalsChecker struct { - *CheckerInfo -} - -// The Equals checker verifies that the obtained value is equal to -// the expected value, according to usual Go semantics for ==. -// -// For example: -// -// c.Assert(value, Equals, 42) -// -var Equals Checker = &equalsChecker{ - &CheckerInfo{Name: "Equals", Params: []string{"obtained", "expected"}}, -} - -func (checker *equalsChecker) Check(params []interface{}, names []string) (result bool, error string) { - defer func() { - if v := recover(); v != nil { - result = false - error = fmt.Sprint(v) - } - }() - return params[0] == params[1], "" -} - -// ----------------------------------------------------------------------- -// DeepEquals checker. - -type deepEqualsChecker struct { - *CheckerInfo -} - -// The DeepEquals checker verifies that the obtained value is deep-equal to -// the expected value. The check will work correctly even when facing -// slices, interfaces, and values of different types (which always fail -// the test). -// -// For example: -// -// c.Assert(value, DeepEquals, 42) -// c.Assert(array, DeepEquals, []string{"hi", "there"}) -// -var DeepEquals Checker = &deepEqualsChecker{ - &CheckerInfo{Name: "DeepEquals", Params: []string{"obtained", "expected"}}, -} - -func (checker *deepEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) { - return reflect.DeepEqual(params[0], params[1]), "" -} - -// ----------------------------------------------------------------------- -// HasLen checker. - -type hasLenChecker struct { - *CheckerInfo -} - -// The HasLen checker verifies that the obtained value has the -// provided length. In many cases this is superior to using Equals -// in conjuction with the len function because in case the check -// fails the value itself will be printed, instead of its length, -// providing more details for figuring the problem. -// -// For example: -// -// c.Assert(list, HasLen, 5) -// -var HasLen Checker = &hasLenChecker{ - &CheckerInfo{Name: "HasLen", Params: []string{"obtained", "n"}}, -} - -func (checker *hasLenChecker) Check(params []interface{}, names []string) (result bool, error string) { - n, ok := params[1].(int) - if !ok { - return false, "n must be an int" - } - value := reflect.ValueOf(params[0]) - switch value.Kind() { - case reflect.Map, reflect.Array, reflect.Slice, reflect.Chan, reflect.String: - default: - return false, "obtained value type has no length" - } - return value.Len() == n, "" -} - -// ----------------------------------------------------------------------- -// ErrorMatches checker. - -type errorMatchesChecker struct { - *CheckerInfo -} - -// The ErrorMatches checker verifies that the error value -// is non nil and matches the regular expression provided. -// -// For example: -// -// c.Assert(err, ErrorMatches, "perm.*denied") -// -var ErrorMatches Checker = errorMatchesChecker{ - &CheckerInfo{Name: "ErrorMatches", Params: []string{"value", "regex"}}, -} - -func (checker errorMatchesChecker) Check(params []interface{}, names []string) (result bool, errStr string) { - if params[0] == nil { - return false, "Error value is nil" - } - err, ok := params[0].(error) - if !ok { - return false, "Value is not an error" - } - params[0] = err.Error() - names[0] = "error" - return matches(params[0], params[1]) -} - -// ----------------------------------------------------------------------- -// Matches checker. - -type matchesChecker struct { - *CheckerInfo -} - -// The Matches checker verifies that the string provided as the obtained -// value (or the string resulting from obtained.String()) matches the -// regular expression provided. -// -// For example: -// -// c.Assert(err, Matches, "perm.*denied") -// -var Matches Checker = &matchesChecker{ - &CheckerInfo{Name: "Matches", Params: []string{"value", "regex"}}, -} - -func (checker *matchesChecker) Check(params []interface{}, names []string) (result bool, error string) { - return matches(params[0], params[1]) -} - -func matches(value, regex interface{}) (result bool, error string) { - reStr, ok := regex.(string) - if !ok { - return false, "Regex must be a string" - } - valueStr, valueIsStr := value.(string) - if !valueIsStr { - if valueWithStr, valueHasStr := value.(fmt.Stringer); valueHasStr { - valueStr, valueIsStr = valueWithStr.String(), true - } - } - if valueIsStr { - matches, err := regexp.MatchString("^"+reStr+"$", valueStr) - if err != nil { - return false, "Can't compile regex: " + err.Error() - } - return matches, "" - } - return false, "Obtained value is not a string and has no .String()" -} - -// ----------------------------------------------------------------------- -// Panics checker. - -type panicsChecker struct { - *CheckerInfo -} - -// The Panics checker verifies that calling the provided zero-argument -// function will cause a panic which is deep-equal to the provided value. -// -// For example: -// -// c.Assert(func() { f(1, 2) }, Panics, &SomeErrorType{"BOOM"}). -// -// -var Panics Checker = &panicsChecker{ - &CheckerInfo{Name: "Panics", Params: []string{"function", "expected"}}, -} - -func (checker *panicsChecker) Check(params []interface{}, names []string) (result bool, error string) { - f := reflect.ValueOf(params[0]) - if f.Kind() != reflect.Func || f.Type().NumIn() != 0 { - return false, "Function must take zero arguments" - } - defer func() { - // If the function has not panicked, then don't do the check. - if error != "" { - return - } - params[0] = recover() - names[0] = "panic" - result = reflect.DeepEqual(params[0], params[1]) - }() - f.Call(nil) - return false, "Function has not panicked" -} - -type panicMatchesChecker struct { - *CheckerInfo -} - -// The PanicMatches checker verifies that calling the provided zero-argument -// function will cause a panic with an error value matching -// the regular expression provided. -// -// For example: -// -// c.Assert(func() { f(1, 2) }, PanicMatches, `open.*: no such file or directory`). -// -// -var PanicMatches Checker = &panicMatchesChecker{ - &CheckerInfo{Name: "PanicMatches", Params: []string{"function", "expected"}}, -} - -func (checker *panicMatchesChecker) Check(params []interface{}, names []string) (result bool, errmsg string) { - f := reflect.ValueOf(params[0]) - if f.Kind() != reflect.Func || f.Type().NumIn() != 0 { - return false, "Function must take zero arguments" - } - defer func() { - // If the function has not panicked, then don't do the check. - if errmsg != "" { - return - } - obtained := recover() - names[0] = "panic" - if e, ok := obtained.(error); ok { - params[0] = e.Error() - } else if _, ok := obtained.(string); ok { - params[0] = obtained - } else { - errmsg = "Panic value is not a string or an error" - return - } - result, errmsg = matches(params[0], params[1]) - }() - f.Call(nil) - return false, "Function has not panicked" -} - -// ----------------------------------------------------------------------- -// FitsTypeOf checker. - -type fitsTypeChecker struct { - *CheckerInfo -} - -// The FitsTypeOf checker verifies that the obtained value is -// assignable to a variable with the same type as the provided -// sample value. -// -// For example: -// -// c.Assert(value, FitsTypeOf, int64(0)) -// c.Assert(value, FitsTypeOf, os.Error(nil)) -// -var FitsTypeOf Checker = &fitsTypeChecker{ - &CheckerInfo{Name: "FitsTypeOf", Params: []string{"obtained", "sample"}}, -} - -func (checker *fitsTypeChecker) Check(params []interface{}, names []string) (result bool, error string) { - obtained := reflect.ValueOf(params[0]) - sample := reflect.ValueOf(params[1]) - if !obtained.IsValid() { - return false, "" - } - if !sample.IsValid() { - return false, "Invalid sample value" - } - return obtained.Type().AssignableTo(sample.Type()), "" -} - -// ----------------------------------------------------------------------- -// Implements checker. - -type implementsChecker struct { - *CheckerInfo -} - -// The Implements checker verifies that the obtained value -// implements the interface specified via a pointer to an interface -// variable. -// -// For example: -// -// var e os.Error -// c.Assert(err, Implements, &e) -// -var Implements Checker = &implementsChecker{ - &CheckerInfo{Name: "Implements", Params: []string{"obtained", "ifaceptr"}}, -} - -func (checker *implementsChecker) Check(params []interface{}, names []string) (result bool, error string) { - obtained := reflect.ValueOf(params[0]) - ifaceptr := reflect.ValueOf(params[1]) - if !obtained.IsValid() { - return false, "" - } - if !ifaceptr.IsValid() || ifaceptr.Kind() != reflect.Ptr || ifaceptr.Elem().Kind() != reflect.Interface { - return false, "ifaceptr should be a pointer to an interface variable" - } - return obtained.Type().Implements(ifaceptr.Elem().Type()), "" -} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/helpers.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/helpers.go deleted file mode 100644 index 4b6c26da..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/helpers.go +++ /dev/null @@ -1,231 +0,0 @@ -package check - -import ( - "fmt" - "strings" - "time" -) - -// TestName returns the current test name in the form "SuiteName.TestName" -func (c *C) TestName() string { - return c.testName -} - -// ----------------------------------------------------------------------- -// Basic succeeding/failing logic. - -// Failed returns whether the currently running test has already failed. -func (c *C) Failed() bool { - return c.status == failedSt -} - -// Fail marks the currently running test as failed. -// -// Something ought to have been previously logged so the developer can tell -// what went wrong. The higher level helper functions will fail the test -// and do the logging properly. -func (c *C) Fail() { - c.status = failedSt -} - -// FailNow marks the currently running test as failed and stops running it. -// Something ought to have been previously logged so the developer can tell -// what went wrong. The higher level helper functions will fail the test -// and do the logging properly. -func (c *C) FailNow() { - c.Fail() - c.stopNow() -} - -// Succeed marks the currently running test as succeeded, undoing any -// previous failures. -func (c *C) Succeed() { - c.status = succeededSt -} - -// SucceedNow marks the currently running test as succeeded, undoing any -// previous failures, and stops running the test. -func (c *C) SucceedNow() { - c.Succeed() - c.stopNow() -} - -// ExpectFailure informs that the running test is knowingly broken for -// the provided reason. If the test does not fail, an error will be reported -// to raise attention to this fact. This method is useful to temporarily -// disable tests which cover well known problems until a better time to -// fix the problem is found, without forgetting about the fact that a -// failure still exists. -func (c *C) ExpectFailure(reason string) { - if reason == "" { - panic("Missing reason why the test is expected to fail") - } - c.mustFail = true - c.reason = reason -} - -// Skip skips the running test for the provided reason. If run from within -// SetUpTest, the individual test being set up will be skipped, and if run -// from within SetUpSuite, the whole suite is skipped. -func (c *C) Skip(reason string) { - if reason == "" { - panic("Missing reason why the test is being skipped") - } - c.reason = reason - c.status = skippedSt - c.stopNow() -} - -// ----------------------------------------------------------------------- -// Basic logging. - -// GetTestLog returns the current test error output. -func (c *C) GetTestLog() string { - return c.logb.String() -} - -// Log logs some information into the test error output. -// The provided arguments are assembled together into a string with fmt.Sprint. -func (c *C) Log(args ...interface{}) { - c.log(args...) -} - -// Log logs some information into the test error output. -// The provided arguments are assembled together into a string with fmt.Sprintf. -func (c *C) Logf(format string, args ...interface{}) { - c.logf(format, args...) -} - -// Output enables *C to be used as a logger in functions that require only -// the minimum interface of *log.Logger. -func (c *C) Output(calldepth int, s string) error { - d := time.Now().Sub(c.startTime) - msec := d / time.Millisecond - sec := d / time.Second - min := d / time.Minute - - c.Logf("[LOG] %d:%02d.%03d %s", min, sec%60, msec%1000, s) - return nil -} - -// Error logs an error into the test error output and marks the test as failed. -// The provided arguments are assembled together into a string with fmt.Sprint. -func (c *C) Error(args ...interface{}) { - c.logCaller(1) - c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...))) - c.logNewLine() - c.Fail() -} - -// Errorf logs an error into the test error output and marks the test as failed. -// The provided arguments are assembled together into a string with fmt.Sprintf. -func (c *C) Errorf(format string, args ...interface{}) { - c.logCaller(1) - c.logString(fmt.Sprintf("Error: "+format, args...)) - c.logNewLine() - c.Fail() -} - -// Fatal logs an error into the test error output, marks the test as failed, and -// stops the test execution. The provided arguments are assembled together into -// a string with fmt.Sprint. -func (c *C) Fatal(args ...interface{}) { - c.logCaller(1) - c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...))) - c.logNewLine() - c.FailNow() -} - -// Fatlaf logs an error into the test error output, marks the test as failed, and -// stops the test execution. The provided arguments are assembled together into -// a string with fmt.Sprintf. -func (c *C) Fatalf(format string, args ...interface{}) { - c.logCaller(1) - c.logString(fmt.Sprint("Error: ", fmt.Sprintf(format, args...))) - c.logNewLine() - c.FailNow() -} - -// ----------------------------------------------------------------------- -// Generic checks and assertions based on checkers. - -// Check verifies if the first value matches the expected value according -// to the provided checker. If they do not match, an error is logged, the -// test is marked as failed, and the test execution continues. -// -// Some checkers may not need the expected argument (e.g. IsNil). -// -// Extra arguments provided to the function are logged next to the reported -// problem when the matching fails. -func (c *C) Check(obtained interface{}, checker Checker, args ...interface{}) bool { - return c.internalCheck("Check", obtained, checker, args...) -} - -// Assert ensures that the first value matches the expected value according -// to the provided checker. If they do not match, an error is logged, the -// test is marked as failed, and the test execution stops. -// -// Some checkers may not need the expected argument (e.g. IsNil). -// -// Extra arguments provided to the function are logged next to the reported -// problem when the matching fails. -func (c *C) Assert(obtained interface{}, checker Checker, args ...interface{}) { - if !c.internalCheck("Assert", obtained, checker, args...) { - c.stopNow() - } -} - -func (c *C) internalCheck(funcName string, obtained interface{}, checker Checker, args ...interface{}) bool { - if checker == nil { - c.logCaller(2) - c.logString(fmt.Sprintf("%s(obtained, nil!?, ...):", funcName)) - c.logString("Oops.. you've provided a nil checker!") - c.logNewLine() - c.Fail() - return false - } - - // If the last argument is a bug info, extract it out. - var comment CommentInterface - if len(args) > 0 { - if c, ok := args[len(args)-1].(CommentInterface); ok { - comment = c - args = args[:len(args)-1] - } - } - - params := append([]interface{}{obtained}, args...) - info := checker.Info() - - if len(params) != len(info.Params) { - names := append([]string{info.Params[0], info.Name}, info.Params[1:]...) - c.logCaller(2) - c.logString(fmt.Sprintf("%s(%s):", funcName, strings.Join(names, ", "))) - c.logString(fmt.Sprintf("Wrong number of parameters for %s: want %d, got %d", info.Name, len(names), len(params)+1)) - c.logNewLine() - c.Fail() - return false - } - - // Copy since it may be mutated by Check. - names := append([]string{}, info.Params...) - - // Do the actual check. - result, error := checker.Check(params, names) - if !result || error != "" { - c.logCaller(2) - for i := 0; i != len(params); i++ { - c.logValue(names[i], params[i]) - } - if comment != nil { - c.logString(comment.CheckCommentString()) - } - if error != "" { - c.logString(error) - } - c.logNewLine() - c.Fail() - return false - } - return true -} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/printer.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/printer.go deleted file mode 100644 index e0f7557b..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/printer.go +++ /dev/null @@ -1,168 +0,0 @@ -package check - -import ( - "bytes" - "go/ast" - "go/parser" - "go/printer" - "go/token" - "os" -) - -func indent(s, with string) (r string) { - eol := true - for i := 0; i != len(s); i++ { - c := s[i] - switch { - case eol && c == '\n' || c == '\r': - case c == '\n' || c == '\r': - eol = true - case eol: - eol = false - s = s[:i] + with + s[i:] - i += len(with) - } - } - return s -} - -func printLine(filename string, line int) (string, error) { - fset := token.NewFileSet() - file, err := os.Open(filename) - if err != nil { - return "", err - } - fnode, err := parser.ParseFile(fset, filename, file, parser.ParseComments) - if err != nil { - return "", err - } - config := &printer.Config{Mode: printer.UseSpaces, Tabwidth: 4} - lp := &linePrinter{fset: fset, fnode: fnode, line: line, config: config} - ast.Walk(lp, fnode) - result := lp.output.Bytes() - // Comments leave \n at the end. - n := len(result) - for n > 0 && result[n-1] == '\n' { - n-- - } - return string(result[:n]), nil -} - -type linePrinter struct { - config *printer.Config - fset *token.FileSet - fnode *ast.File - line int - output bytes.Buffer - stmt ast.Stmt -} - -func (lp *linePrinter) emit() bool { - if lp.stmt != nil { - lp.trim(lp.stmt) - lp.printWithComments(lp.stmt) - lp.stmt = nil - return true - } - return false -} - -func (lp *linePrinter) printWithComments(n ast.Node) { - nfirst := lp.fset.Position(n.Pos()).Line - nlast := lp.fset.Position(n.End()).Line - for _, g := range lp.fnode.Comments { - cfirst := lp.fset.Position(g.Pos()).Line - clast := lp.fset.Position(g.End()).Line - if clast == nfirst-1 && lp.fset.Position(n.Pos()).Column == lp.fset.Position(g.Pos()).Column { - for _, c := range g.List { - lp.output.WriteString(c.Text) - lp.output.WriteByte('\n') - } - } - if cfirst >= nfirst && cfirst <= nlast && n.End() <= g.List[0].Slash { - // The printer will not include the comment if it starts past - // the node itself. Trick it into printing by overlapping the - // slash with the end of the statement. - g.List[0].Slash = n.End() - 1 - } - } - node := &printer.CommentedNode{n, lp.fnode.Comments} - lp.config.Fprint(&lp.output, lp.fset, node) -} - -func (lp *linePrinter) Visit(n ast.Node) (w ast.Visitor) { - if n == nil { - if lp.output.Len() == 0 { - lp.emit() - } - return nil - } - first := lp.fset.Position(n.Pos()).Line - last := lp.fset.Position(n.End()).Line - if first <= lp.line && last >= lp.line { - // Print the innermost statement containing the line. - if stmt, ok := n.(ast.Stmt); ok { - if _, ok := n.(*ast.BlockStmt); !ok { - lp.stmt = stmt - } - } - if first == lp.line && lp.emit() { - return nil - } - return lp - } - return nil -} - -func (lp *linePrinter) trim(n ast.Node) bool { - stmt, ok := n.(ast.Stmt) - if !ok { - return true - } - line := lp.fset.Position(n.Pos()).Line - if line != lp.line { - return false - } - switch stmt := stmt.(type) { - case *ast.IfStmt: - stmt.Body = lp.trimBlock(stmt.Body) - case *ast.SwitchStmt: - stmt.Body = lp.trimBlock(stmt.Body) - case *ast.TypeSwitchStmt: - stmt.Body = lp.trimBlock(stmt.Body) - case *ast.CaseClause: - stmt.Body = lp.trimList(stmt.Body) - case *ast.CommClause: - stmt.Body = lp.trimList(stmt.Body) - case *ast.BlockStmt: - stmt.List = lp.trimList(stmt.List) - } - return true -} - -func (lp *linePrinter) trimBlock(stmt *ast.BlockStmt) *ast.BlockStmt { - if !lp.trim(stmt) { - return lp.emptyBlock(stmt) - } - stmt.Rbrace = stmt.Lbrace - return stmt -} - -func (lp *linePrinter) trimList(stmts []ast.Stmt) []ast.Stmt { - for i := 0; i != len(stmts); i++ { - if !lp.trim(stmts[i]) { - stmts[i] = lp.emptyStmt(stmts[i]) - break - } - } - return stmts -} - -func (lp *linePrinter) emptyStmt(n ast.Node) *ast.ExprStmt { - return &ast.ExprStmt{&ast.Ellipsis{n.Pos(), nil}} -} - -func (lp *linePrinter) emptyBlock(n ast.Node) *ast.BlockStmt { - p := n.Pos() - return &ast.BlockStmt{p, []ast.Stmt{lp.emptyStmt(n)}, p} -} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/run.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/run.go deleted file mode 100644 index da8fd798..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/check.v1/run.go +++ /dev/null @@ -1,175 +0,0 @@ -package check - -import ( - "bufio" - "flag" - "fmt" - "os" - "testing" - "time" -) - -// ----------------------------------------------------------------------- -// Test suite registry. - -var allSuites []interface{} - -// Suite registers the given value as a test suite to be run. Any methods -// starting with the Test prefix in the given value will be considered as -// a test method. -func Suite(suite interface{}) interface{} { - allSuites = append(allSuites, suite) - return suite -} - -// ----------------------------------------------------------------------- -// Public running interface. - -var ( - oldFilterFlag = flag.String("gocheck.f", "", "Regular expression selecting which tests and/or suites to run") - oldVerboseFlag = flag.Bool("gocheck.v", false, "Verbose mode") - oldStreamFlag = flag.Bool("gocheck.vv", false, "Super verbose mode (disables output caching)") - oldBenchFlag = flag.Bool("gocheck.b", false, "Run benchmarks") - oldBenchTime = flag.Duration("gocheck.btime", 1*time.Second, "approximate run time for each benchmark") - oldListFlag = flag.Bool("gocheck.list", false, "List the names of all tests that will be run") - oldWorkFlag = flag.Bool("gocheck.work", false, "Display and do not remove the test working directory") - - newFilterFlag = flag.String("check.f", "", "Regular expression selecting which tests and/or suites to run") - newVerboseFlag = flag.Bool("check.v", false, "Verbose mode") - newStreamFlag = flag.Bool("check.vv", false, "Super verbose mode (disables output caching)") - newBenchFlag = flag.Bool("check.b", false, "Run benchmarks") - newBenchTime = flag.Duration("check.btime", 1*time.Second, "approximate run time for each benchmark") - newBenchMem = flag.Bool("check.bmem", false, "Report memory benchmarks") - newListFlag = flag.Bool("check.list", false, "List the names of all tests that will be run") - newWorkFlag = flag.Bool("check.work", false, "Display and do not remove the test working directory") -) - -// TestingT runs all test suites registered with the Suite function, -// printing results to stdout, and reporting any failures back to -// the "testing" package. -func TestingT(testingT *testing.T) { - benchTime := *newBenchTime - if benchTime == 1*time.Second { - benchTime = *oldBenchTime - } - conf := &RunConf{ - Filter: *oldFilterFlag + *newFilterFlag, - Verbose: *oldVerboseFlag || *newVerboseFlag, - Stream: *oldStreamFlag || *newStreamFlag, - Benchmark: *oldBenchFlag || *newBenchFlag, - BenchmarkTime: benchTime, - BenchmarkMem: *newBenchMem, - KeepWorkDir: *oldWorkFlag || *newWorkFlag, - } - if *oldListFlag || *newListFlag { - w := bufio.NewWriter(os.Stdout) - for _, name := range ListAll(conf) { - fmt.Fprintln(w, name) - } - w.Flush() - return - } - result := RunAll(conf) - println(result.String()) - if !result.Passed() { - testingT.Fail() - } -} - -// RunAll runs all test suites registered with the Suite function, using the -// provided run configuration. -func RunAll(runConf *RunConf) *Result { - result := Result{} - for _, suite := range allSuites { - result.Add(Run(suite, runConf)) - } - return &result -} - -// Run runs the provided test suite using the provided run configuration. -func Run(suite interface{}, runConf *RunConf) *Result { - runner := newSuiteRunner(suite, runConf) - return runner.run() -} - -// ListAll returns the names of all the test functions registered with the -// Suite function that will be run with the provided run configuration. -func ListAll(runConf *RunConf) []string { - var names []string - for _, suite := range allSuites { - names = append(names, List(suite, runConf)...) - } - return names -} - -// List returns the names of the test functions in the given -// suite that will be run with the provided run configuration. -func List(suite interface{}, runConf *RunConf) []string { - var names []string - runner := newSuiteRunner(suite, runConf) - for _, t := range runner.tests { - names = append(names, t.String()) - } - return names -} - -// ----------------------------------------------------------------------- -// Result methods. - -func (r *Result) Add(other *Result) { - r.Succeeded += other.Succeeded - r.Skipped += other.Skipped - r.Failed += other.Failed - r.Panicked += other.Panicked - r.FixturePanicked += other.FixturePanicked - r.ExpectedFailures += other.ExpectedFailures - r.Missed += other.Missed - if r.WorkDir != "" && other.WorkDir != "" { - r.WorkDir += ":" + other.WorkDir - } else if other.WorkDir != "" { - r.WorkDir = other.WorkDir - } -} - -func (r *Result) Passed() bool { - return (r.Failed == 0 && r.Panicked == 0 && - r.FixturePanicked == 0 && r.Missed == 0 && - r.RunError == nil) -} - -func (r *Result) String() string { - if r.RunError != nil { - return "ERROR: " + r.RunError.Error() - } - - var value string - if r.Failed == 0 && r.Panicked == 0 && r.FixturePanicked == 0 && - r.Missed == 0 { - value = "OK: " - } else { - value = "OOPS: " - } - value += fmt.Sprintf("%d passed", r.Succeeded) - if r.Skipped != 0 { - value += fmt.Sprintf(", %d skipped", r.Skipped) - } - if r.ExpectedFailures != 0 { - value += fmt.Sprintf(", %d expected failures", r.ExpectedFailures) - } - if r.Failed != 0 { - value += fmt.Sprintf(", %d FAILED", r.Failed) - } - if r.Panicked != 0 { - value += fmt.Sprintf(", %d PANICKED", r.Panicked) - } - if r.FixturePanicked != 0 { - value += fmt.Sprintf(", %d FIXTURE-PANICKED", r.FixturePanicked) - } - if r.Missed != 0 { - value += fmt.Sprintf(", %d MISSED", r.Missed) - } - if r.WorkDir != "" { - value += "\nWORK=" + r.WorkDir - } - return value -} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/LICENSE deleted file mode 100644 index a68e67f0..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/LICENSE +++ /dev/null @@ -1,188 +0,0 @@ - -Copyright (c) 2011-2014 - Canonical Inc. - -This software is licensed under the LGPLv3, included below. - -As a special exception to the GNU Lesser General Public License version 3 -("LGPL3"), the copyright holders of this Library give you permission to -convey to a third party a Combined Work that links statically or dynamically -to this Library without providing any Minimal Corresponding Source or -Minimal Application Code as set out in 4d or providing the installation -information set out in section 4e, provided that you comply with the other -provisions of LGPL3 and provided that you meet, for the Application the -terms and conditions of the license(s) which apply to the Application. - -Except as stated in this special exception, the provisions of LGPL3 will -continue to comply in full to this Library. If you modify this Library, you -may apply this exception to your version of this Library, but you are not -obliged to do so. If you do not wish to do so, delete this exception -statement from your version. This exception does not (and cannot) modify any -license terms which apply to the Application, with which you must still -comply. - - - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/LICENSE.libyaml deleted file mode 100644 index 8da58fbf..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/LICENSE.libyaml +++ /dev/null @@ -1,31 +0,0 @@ -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original copyright and license: - - apic.go - emitterc.go - parserc.go - readerc.go - scannerc.go - writerc.go - yamlh.go - yamlprivateh.go - -Copyright (c) 2006 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/README.md b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/README.md deleted file mode 100644 index d6c919e6..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/README.md +++ /dev/null @@ -1,128 +0,0 @@ -# YAML support for the Go language - -Introduction ------------- - -The yaml package enables Go programs to comfortably encode and decode YAML -values. It was developed within [Canonical](https://www.canonical.com) as -part of the [juju](https://juju.ubuntu.com) project, and is based on a -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) -C library to parse and generate YAML data quickly and reliably. - -Compatibility -------------- - -The yaml package supports most of YAML 1.1 and 1.2, including support for -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet -implemented, and base-60 floats from YAML 1.1 are purposefully not -supported since they're a poor design and are gone in YAML 1.2. - -Installation and usage ----------------------- - -The import path for the package is *gopkg.in/yaml.v2*. - -To install it, run: - - go get gopkg.in/yaml.v2 - -API documentation ------------------ - -If opened in a browser, the import path itself leads to the API documentation: - - * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) - -API stability -------------- - -The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). - - -License -------- - -The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details. - - -Example -------- - -```Go -package main - -import ( - "fmt" - "log" - - "gopkg.in/yaml.v2" -) - -var data = ` -a: Easy! -b: - c: 2 - d: [3, 4] -` - -type T struct { - A string - B struct{C int; D []int ",flow"} -} - -func main() { - t := T{} - - err := yaml.Unmarshal([]byte(data), &t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t:\n%v\n\n", t) - - d, err := yaml.Marshal(&t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t dump:\n%s\n\n", string(d)) - - m := make(map[interface{}]interface{}) - - err = yaml.Unmarshal([]byte(data), &m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m:\n%v\n\n", m) - - d, err = yaml.Marshal(&m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m dump:\n%s\n\n", string(d)) -} -``` - -This example will generate the following output: - -``` ---- t: -{Easy! {2 [3 4]}} - ---- t dump: -a: Easy! -b: - c: 2 - d: [3, 4] - - ---- m: -map[a:Easy! b:map[c:2 d:[3 4]]] - ---- m dump: -a: Easy! -b: - c: 2 - d: - - 3 - - 4 -``` - diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/apic.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/apic.go deleted file mode 100644 index 95ec014e..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/apic.go +++ /dev/null @@ -1,742 +0,0 @@ -package yaml - -import ( - "io" - "os" -) - -func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { - //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) - - // Check if we can move the queue at the beginning of the buffer. - if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokens_head != len(parser.tokens) { - copy(parser.tokens, parser.tokens[parser.tokens_head:]) - } - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] - parser.tokens_head = 0 - } - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) - parser.tokens[parser.tokens_head+pos] = *token -} - -// Create a new parser object. -func yaml_parser_initialize(parser *yaml_parser_t) bool { - *parser = yaml_parser_t{ - raw_buffer: make([]byte, 0, input_raw_buffer_size), - buffer: make([]byte, 0, input_buffer_size), - } - return true -} - -// Destroy a parser object. -func yaml_parser_delete(parser *yaml_parser_t) { - *parser = yaml_parser_t{} -} - -// String read handler. -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - if parser.input_pos == len(parser.input) { - return 0, io.EOF - } - n = copy(buffer, parser.input[parser.input_pos:]) - parser.input_pos += n - return n, nil -} - -// File read handler. -func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - return parser.input_file.Read(buffer) -} - -// Set a string input. -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_string_read_handler - parser.input = input - parser.input_pos = 0 -} - -// Set a file input. -func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_file_read_handler - parser.input_file = file -} - -// Set the source encoding. -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { - if parser.encoding != yaml_ANY_ENCODING { - panic("must set the encoding only once") - } - parser.encoding = encoding -} - -// Create a new emitter object. -func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { - *emitter = yaml_emitter_t{ - buffer: make([]byte, output_buffer_size), - raw_buffer: make([]byte, 0, output_raw_buffer_size), - states: make([]yaml_emitter_state_t, 0, initial_stack_size), - events: make([]yaml_event_t, 0, initial_queue_size), - } - return true -} - -// Destroy an emitter object. -func yaml_emitter_delete(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{} -} - -// String write handler. -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - *emitter.output_buffer = append(*emitter.output_buffer, buffer...) - return nil -} - -// File write handler. -func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_file.Write(buffer) - return err -} - -// Set a string output. -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_string_write_handler - emitter.output_buffer = output_buffer -} - -// Set a file output. -func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_file_write_handler - emitter.output_file = file -} - -// Set the output encoding. -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { - if emitter.encoding != yaml_ANY_ENCODING { - panic("must set the output encoding only once") - } - emitter.encoding = encoding -} - -// Set the canonical output style. -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { - emitter.canonical = canonical -} - -//// Set the indentation increment. -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.best_indent = indent -} - -// Set the preferred line width. -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { - if width < 0 { - width = -1 - } - emitter.best_width = width -} - -// Set if unescaped non-ASCII characters are allowed. -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { - emitter.unicode = unicode -} - -// Set the preferred line break character. -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { - emitter.line_break = line_break -} - -///* -// * Destroy a token object. -// */ -// -//YAML_DECLARE(void) -//yaml_token_delete(yaml_token_t *token) -//{ -// assert(token); // Non-NULL token object expected. -// -// switch (token.type) -// { -// case YAML_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case YAML_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case YAML_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case YAML_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case YAML_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -//} -// -///* -// * Check if a string is a valid UTF-8 sequence. -// * -// * Check 'reader.c' for more details on UTF-8 encoding. -// */ -// -//static int -//yaml_check_utf8(yaml_char_t *start, size_t length) -//{ -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -//} -// - -// Create STREAM-START. -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - encoding: encoding, - } - return true -} - -// Create STREAM-END. -func yaml_stream_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - } - return true -} - -// Create DOCUMENT-START. -func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, implicit bool) bool { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: implicit, - } - return true -} - -// Create DOCUMENT-END. -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - implicit: implicit, - } - return true -} - -///* -// * Create ALIAS. -// */ -// -//YAML_DECLARE(int) -//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) -//{ -// mark yaml_mark_t = { 0, 0, 0 } -// anchor_copy *yaml_char_t = NULL -// -// assert(event) // Non-NULL event object is expected. -// assert(anchor) // Non-NULL anchor is expected. -// -// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 -// -// anchor_copy = yaml_strdup(anchor) -// if (!anchor_copy) -// return 0 -// -// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) -// -// return 1 -//} - -// Create SCALAR. -func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - anchor: anchor, - tag: tag, - value: value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-START. -func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-END. -func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - } - return true -} - -// Create MAPPING-START. -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create MAPPING-END. -func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - } - return true -} - -// Destroy an event object. -func yaml_event_delete(event *yaml_event_t) { - *event = yaml_event_t{} -} - -///* -// * Create a document object. -// */ -// -//YAML_DECLARE(int) -//yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives_start *yaml_tag_directive_t, -// tag_directives_end *yaml_tag_directive_t, -// start_implicit int, end_implicit int) -//{ -// struct { -// error yaml_error_type_t -// } context -// struct { -// start *yaml_node_t -// end *yaml_node_t -// top *yaml_node_t -// } nodes = { NULL, NULL, NULL } -// version_directive_copy *yaml_version_directive_t = NULL -// struct { -// start *yaml_tag_directive_t -// end *yaml_tag_directive_t -// top *yaml_tag_directive_t -// } tag_directives_copy = { NULL, NULL, NULL } -// value yaml_tag_directive_t = { NULL, NULL } -// mark yaml_mark_t = { 0, 0, 0 } -// -// assert(document) // Non-NULL document object is expected. -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)) -// // Valid tag directives are expected. -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) -// if (!version_directive_copy) goto error -// version_directive_copy.major = version_directive.major -// version_directive_copy.minor = version_directive.minor -// } -// -// if (tag_directives_start != tag_directives_end) { -// tag_directive *yaml_tag_directive_t -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error -// for (tag_directive = tag_directives_start -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle) -// assert(tag_directive.prefix) -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error -// value.handle = yaml_strdup(tag_directive.handle) -// value.prefix = yaml_strdup(tag_directive.prefix) -// if (!value.handle || !value.prefix) goto error -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error -// value.handle = NULL -// value.prefix = NULL -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark) -// -// return 1 -// -//error: -// STACK_DEL(&context, nodes) -// yaml_free(version_directive_copy) -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// value yaml_tag_directive_t = POP(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// } -// STACK_DEL(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// -// return 0 -//} -// -///* -// * Destroy a document object. -// */ -// -//YAML_DECLARE(void) -//yaml_document_delete(document *yaml_document_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// tag_directive *yaml_tag_directive_t -// -// context.error = YAML_NO_ERROR // Eliminate a compliler warning. -// -// assert(document) // Non-NULL document object is expected. -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// node yaml_node_t = POP(&context, document.nodes) -// yaml_free(node.tag) -// switch (node.type) { -// case YAML_SCALAR_NODE: -// yaml_free(node.data.scalar.value) -// break -// case YAML_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items) -// break -// case YAML_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs) -// break -// default: -// assert(0) // Should not happen. -// } -// } -// STACK_DEL(&context, document.nodes) -// -// yaml_free(document.version_directive) -// for (tag_directive = document.tag_directives.start -// tag_directive != document.tag_directives.end -// tag_directive++) { -// yaml_free(tag_directive.handle) -// yaml_free(tag_directive.prefix) -// } -// yaml_free(document.tag_directives.start) -// -// memset(document, 0, sizeof(yaml_document_t)) -//} -// -///** -// * Get a document node. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_node(document *yaml_document_t, index int) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1 -// } -// return NULL -//} -// -///** -// * Get the root object. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_root_node(document *yaml_document_t) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start -// } -// return NULL -//} -// -///* -// * Add a scalar node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_scalar(document *yaml_document_t, -// tag *yaml_char_t, value *yaml_char_t, length int, -// style yaml_scalar_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// value_copy *yaml_char_t = NULL -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// assert(value) // Non-NULL value is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (length < 0) { -// length = strlen((char *)value) -// } -// -// if (!yaml_check_utf8(value, length)) goto error -// value_copy = yaml_malloc(length+1) -// if (!value_copy) goto error -// memcpy(value_copy, value, length) -// value_copy[length] = '\0' -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// yaml_free(tag_copy) -// yaml_free(value_copy) -// -// return 0 -//} -// -///* -// * Add a sequence node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_sequence(document *yaml_document_t, -// tag *yaml_char_t, style yaml_sequence_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_item_t -// end *yaml_node_item_t -// top *yaml_node_item_t -// } items = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, items) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Add a mapping node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_mapping(document *yaml_document_t, -// tag *yaml_char_t, style yaml_mapping_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_pair_t -// end *yaml_node_pair_t -// top *yaml_node_pair_t -// } pairs = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, pairs) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Append an item to a sequence node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_sequence_item(document *yaml_document_t, -// sequence int, item int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// assert(document) // Non-NULL document is required. -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top) -// // Valid sequence id is required. -// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) -// // A sequence node is required. -// assert(item > 0 && document.nodes.start + item <= document.nodes.top) -// // Valid item id is required. -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0 -// -// return 1 -//} -// -///* -// * Append a pair of a key and a value to a mapping node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_mapping_pair(document *yaml_document_t, -// mapping int, key int, value int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// pair yaml_node_pair_t -// -// assert(document) // Non-NULL document is required. -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top) -// // Valid mapping id is required. -// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) -// // A mapping node is required. -// assert(key > 0 && document.nodes.start + key <= document.nodes.top) -// // Valid key id is required. -// assert(value > 0 && document.nodes.start + value <= document.nodes.top) -// // Valid value id is required. -// -// pair.key = key -// pair.value = value -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0 -// -// return 1 -//} -// -// diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/decode.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/decode.go deleted file mode 100644 index ec9d2710..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/decode.go +++ /dev/null @@ -1,667 +0,0 @@ -package yaml - -import ( - "encoding" - "encoding/base64" - "fmt" - "math" - "reflect" - "strconv" - "time" -) - -const ( - documentNode = 1 << iota - mappingNode - sequenceNode - scalarNode - aliasNode -) - -type node struct { - kind int - line, column int - tag string - value string - implicit bool - children []*node - anchors map[string]*node -} - -// ---------------------------------------------------------------------------- -// Parser, produces a node tree out of a libyaml event stream. - -type parser struct { - parser yaml_parser_t - event yaml_event_t - doc *node -} - -func newParser(b []byte) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - - if len(b) == 0 { - b = []byte{'\n'} - } - - yaml_parser_set_input_string(&p.parser, b) - - p.skip() - if p.event.typ != yaml_STREAM_START_EVENT { - panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) - } - p.skip() - return &p -} - -func (p *parser) destroy() { - if p.event.typ != yaml_NO_EVENT { - yaml_event_delete(&p.event) - } - yaml_parser_delete(&p.parser) -} - -func (p *parser) skip() { - if p.event.typ != yaml_NO_EVENT { - if p.event.typ == yaml_STREAM_END_EVENT { - failf("attempted to go past the end of stream; corrupted value?") - } - yaml_event_delete(&p.event) - } - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } -} - -func (p *parser) fail() { - var where string - var line int - if p.parser.problem_mark.line != 0 { - line = p.parser.problem_mark.line - } else if p.parser.context_mark.line != 0 { - line = p.parser.context_mark.line - } - if line != 0 { - where = "line " + strconv.Itoa(line) + ": " - } - var msg string - if len(p.parser.problem) > 0 { - msg = p.parser.problem - } else { - msg = "unknown problem parsing YAML content" - } - failf("%s%s", where, msg) -} - -func (p *parser) anchor(n *node, anchor []byte) { - if anchor != nil { - p.doc.anchors[string(anchor)] = n - } -} - -func (p *parser) parse() *node { - switch p.event.typ { - case yaml_SCALAR_EVENT: - return p.scalar() - case yaml_ALIAS_EVENT: - return p.alias() - case yaml_MAPPING_START_EVENT: - return p.mapping() - case yaml_SEQUENCE_START_EVENT: - return p.sequence() - case yaml_DOCUMENT_START_EVENT: - return p.document() - case yaml_STREAM_END_EVENT: - // Happens when attempting to decode an empty buffer. - return nil - default: - panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) - } - panic("unreachable") -} - -func (p *parser) node(kind int) *node { - return &node{ - kind: kind, - line: p.event.start_mark.line, - column: p.event.start_mark.column, - } -} - -func (p *parser) document() *node { - n := p.node(documentNode) - n.anchors = make(map[string]*node) - p.doc = n - p.skip() - n.children = append(n.children, p.parse()) - if p.event.typ != yaml_DOCUMENT_END_EVENT { - panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) - } - p.skip() - return n -} - -func (p *parser) alias() *node { - n := p.node(aliasNode) - n.value = string(p.event.anchor) - p.skip() - return n -} - -func (p *parser) scalar() *node { - n := p.node(scalarNode) - n.value = string(p.event.value) - n.tag = string(p.event.tag) - n.implicit = p.event.implicit - p.anchor(n, p.event.anchor) - p.skip() - return n -} - -func (p *parser) sequence() *node { - n := p.node(sequenceNode) - p.anchor(n, p.event.anchor) - p.skip() - for p.event.typ != yaml_SEQUENCE_END_EVENT { - n.children = append(n.children, p.parse()) - } - p.skip() - return n -} - -func (p *parser) mapping() *node { - n := p.node(mappingNode) - p.anchor(n, p.event.anchor) - p.skip() - for p.event.typ != yaml_MAPPING_END_EVENT { - n.children = append(n.children, p.parse(), p.parse()) - } - p.skip() - return n -} - -// ---------------------------------------------------------------------------- -// Decoder, unmarshals a node into a provided value. - -type decoder struct { - doc *node - aliases map[string]bool - mapType reflect.Type - terrors []string -} - -var ( - mapItemType = reflect.TypeOf(MapItem{}) - durationType = reflect.TypeOf(time.Duration(0)) - defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) - ifaceType = defaultMapType.Elem() -) - -func newDecoder() *decoder { - d := &decoder{mapType: defaultMapType} - d.aliases = make(map[string]bool) - return d -} - -func (d *decoder) terror(n *node, tag string, out reflect.Value) { - if n.tag != "" { - tag = n.tag - } - value := n.value - if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { - if len(value) > 10 { - value = " `" + value[:7] + "...`" - } else { - value = " `" + value + "`" - } - } - d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) -} - -func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { - terrlen := len(d.terrors) - err := u.UnmarshalYAML(func(v interface{}) (err error) { - defer handleErr(&err) - d.unmarshal(n, reflect.ValueOf(v)) - if len(d.terrors) > terrlen { - issues := d.terrors[terrlen:] - d.terrors = d.terrors[:terrlen] - return &TypeError{issues} - } - return nil - }) - if e, ok := err.(*TypeError); ok { - d.terrors = append(d.terrors, e.Errors...) - return false - } - if err != nil { - fail(err) - } - return true -} - -// d.prepare initializes and dereferences pointers and calls UnmarshalYAML -// if a value is found to implement it. -// It returns the initialized and dereferenced out value, whether -// unmarshalling was already done by UnmarshalYAML, and if so whether -// its types unmarshalled appropriately. -// -// If n holds a null value, prepare returns before doing anything. -func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") { - return out, false, false - } - again := true - for again { - again = false - if out.Kind() == reflect.Ptr { - if out.IsNil() { - out.Set(reflect.New(out.Type().Elem())) - } - out = out.Elem() - again = true - } - if out.CanAddr() { - if u, ok := out.Addr().Interface().(Unmarshaler); ok { - good = d.callUnmarshaler(n, u) - return out, true, good - } - } - } - return out, false, false -} - -func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { - switch n.kind { - case documentNode: - return d.document(n, out) - case aliasNode: - return d.alias(n, out) - } - out, unmarshaled, good := d.prepare(n, out) - if unmarshaled { - return good - } - switch n.kind { - case scalarNode: - good = d.scalar(n, out) - case mappingNode: - good = d.mapping(n, out) - case sequenceNode: - good = d.sequence(n, out) - default: - panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) - } - return good -} - -func (d *decoder) document(n *node, out reflect.Value) (good bool) { - if len(n.children) == 1 { - d.doc = n - d.unmarshal(n.children[0], out) - return true - } - return false -} - -func (d *decoder) alias(n *node, out reflect.Value) (good bool) { - an, ok := d.doc.anchors[n.value] - if !ok { - failf("unknown anchor '%s' referenced", n.value) - } - if d.aliases[n.value] { - failf("anchor '%s' value contains itself", n.value) - } - d.aliases[n.value] = true - good = d.unmarshal(an, out) - delete(d.aliases, n.value) - return good -} - -var zeroValue reflect.Value - -func resetMap(out reflect.Value) { - for _, k := range out.MapKeys() { - out.SetMapIndex(k, zeroValue) - } -} - -func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { - var tag string - var resolved interface{} - if n.tag == "" && !n.implicit { - tag = yaml_STR_TAG - resolved = n.value - } else { - tag, resolved = resolve(n.tag, n.value) - if tag == yaml_BINARY_TAG { - data, err := base64.StdEncoding.DecodeString(resolved.(string)) - if err != nil { - failf("!!binary value contains invalid base64 data") - } - resolved = string(data) - } - } - if resolved == nil { - if out.Kind() == reflect.Map && !out.CanAddr() { - resetMap(out) - } else { - out.Set(reflect.Zero(out.Type())) - } - return true - } - if s, ok := resolved.(string); ok && out.CanAddr() { - if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { - err := u.UnmarshalText([]byte(s)) - if err != nil { - fail(err) - } - return true - } - } - switch out.Kind() { - case reflect.String: - if tag == yaml_BINARY_TAG { - out.SetString(resolved.(string)) - good = true - } else if resolved != nil { - out.SetString(n.value) - good = true - } - case reflect.Interface: - if resolved == nil { - out.Set(reflect.Zero(out.Type())) - } else { - out.Set(reflect.ValueOf(resolved)) - } - good = true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - switch resolved := resolved.(type) { - case int: - if !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - good = true - } - case int64: - if !out.OverflowInt(resolved) { - out.SetInt(resolved) - good = true - } - case uint64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - good = true - } - case float64: - if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - good = true - } - case string: - if out.Type() == durationType { - d, err := time.ParseDuration(resolved) - if err == nil { - out.SetInt(int64(d)) - good = true - } - } - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - switch resolved := resolved.(type) { - case int: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - good = true - } - case int64: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - good = true - } - case uint64: - if !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - good = true - } - case float64: - if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - good = true - } - } - case reflect.Bool: - switch resolved := resolved.(type) { - case bool: - out.SetBool(resolved) - good = true - } - case reflect.Float32, reflect.Float64: - switch resolved := resolved.(type) { - case int: - out.SetFloat(float64(resolved)) - good = true - case int64: - out.SetFloat(float64(resolved)) - good = true - case uint64: - out.SetFloat(float64(resolved)) - good = true - case float64: - out.SetFloat(resolved) - good = true - } - case reflect.Ptr: - if out.Type().Elem() == reflect.TypeOf(resolved) { - // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? - elem := reflect.New(out.Type().Elem()) - elem.Elem().Set(reflect.ValueOf(resolved)) - out.Set(elem) - good = true - } - } - if !good { - d.terror(n, tag, out) - } - return good -} - -func settableValueOf(i interface{}) reflect.Value { - v := reflect.ValueOf(i) - sv := reflect.New(v.Type()).Elem() - sv.Set(v) - return sv -} - -func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { - l := len(n.children) - - var iface reflect.Value - switch out.Kind() { - case reflect.Slice: - out.Set(reflect.MakeSlice(out.Type(), l, l)) - case reflect.Interface: - // No type hints. Will have to use a generic sequence. - iface = out - out = settableValueOf(make([]interface{}, l)) - default: - d.terror(n, yaml_SEQ_TAG, out) - return false - } - et := out.Type().Elem() - - j := 0 - for i := 0; i < l; i++ { - e := reflect.New(et).Elem() - if ok := d.unmarshal(n.children[i], e); ok { - out.Index(j).Set(e) - j++ - } - } - out.Set(out.Slice(0, j)) - if iface.IsValid() { - iface.Set(out) - } - return true -} - -func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { - switch out.Kind() { - case reflect.Struct: - return d.mappingStruct(n, out) - case reflect.Slice: - return d.mappingSlice(n, out) - case reflect.Map: - // okay - case reflect.Interface: - if d.mapType.Kind() == reflect.Map { - iface := out - out = reflect.MakeMap(d.mapType) - iface.Set(out) - } else { - slicev := reflect.New(d.mapType).Elem() - if !d.mappingSlice(n, slicev) { - return false - } - out.Set(slicev) - return true - } - default: - d.terror(n, yaml_MAP_TAG, out) - return false - } - outt := out.Type() - kt := outt.Key() - et := outt.Elem() - - mapType := d.mapType - if outt.Key() == ifaceType && outt.Elem() == ifaceType { - d.mapType = outt - } - - if out.IsNil() { - out.Set(reflect.MakeMap(outt)) - } - l := len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - k := reflect.New(kt).Elem() - if d.unmarshal(n.children[i], k) { - kkind := k.Kind() - if kkind == reflect.Interface { - kkind = k.Elem().Kind() - } - if kkind == reflect.Map || kkind == reflect.Slice { - failf("invalid map key: %#v", k.Interface()) - } - e := reflect.New(et).Elem() - if d.unmarshal(n.children[i+1], e) { - out.SetMapIndex(k, e) - } - } - } - d.mapType = mapType - return true -} - -func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { - outt := out.Type() - if outt.Elem() != mapItemType { - d.terror(n, yaml_MAP_TAG, out) - return false - } - - mapType := d.mapType - d.mapType = outt - - var slice []MapItem - var l = len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - item := MapItem{} - k := reflect.ValueOf(&item.Key).Elem() - if d.unmarshal(n.children[i], k) { - v := reflect.ValueOf(&item.Value).Elem() - if d.unmarshal(n.children[i+1], v) { - slice = append(slice, item) - } - } - } - out.Set(reflect.ValueOf(slice)) - d.mapType = mapType - return true -} - -func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { - sinfo, err := getStructInfo(out.Type()) - if err != nil { - panic(err) - } - name := settableValueOf("") - l := len(n.children) - for i := 0; i < l; i += 2 { - ni := n.children[i] - if isMerge(ni) { - d.merge(n.children[i+1], out) - continue - } - if !d.unmarshal(ni, name) { - continue - } - if info, ok := sinfo.FieldsMap[name.String()]; ok { - var field reflect.Value - if info.Inline == nil { - field = out.Field(info.Num) - } else { - field = out.FieldByIndex(info.Inline) - } - d.unmarshal(n.children[i+1], field) - } - } - return true -} - -func failWantMap() { - failf("map merge requires map or sequence of maps as the value") -} - -func (d *decoder) merge(n *node, out reflect.Value) { - switch n.kind { - case mappingNode: - d.unmarshal(n, out) - case aliasNode: - an, ok := d.doc.anchors[n.value] - if ok && an.kind != mappingNode { - failWantMap() - } - d.unmarshal(n, out) - case sequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.children) - 1; i >= 0; i-- { - ni := n.children[i] - if ni.kind == aliasNode { - an, ok := d.doc.anchors[ni.value] - if ok && an.kind != mappingNode { - failWantMap() - } - } else if ni.kind != mappingNode { - failWantMap() - } - d.unmarshal(ni, out) - } - default: - failWantMap() - } -} - -func isMerge(n *node) bool { - return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) -} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/emitterc.go deleted file mode 100644 index 9b3dc4a4..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/emitterc.go +++ /dev/null @@ -1,1685 +0,0 @@ -package yaml - -import ( - "bytes" -) - -// Flush the buffer if needed. -func flush(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) { - return yaml_emitter_flush(emitter) - } - return true -} - -// Put a character to the output buffer. -func put(emitter *yaml_emitter_t, value byte) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - emitter.buffer[emitter.buffer_pos] = value - emitter.buffer_pos++ - emitter.column++ - return true -} - -// Put a line break to the output buffer. -func put_break(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - switch emitter.line_break { - case yaml_CR_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer_pos += 1 - case yaml_LN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos += 1 - case yaml_CRLN_BREAK: - emitter.buffer[emitter.buffer_pos+0] = '\r' - emitter.buffer[emitter.buffer_pos+1] = '\n' - emitter.buffer_pos += 2 - default: - panic("unknown line break setting") - } - emitter.column = 0 - emitter.line++ - return true -} - -// Copy a character from a string into buffer. -func write(emitter *yaml_emitter_t, s []byte, i *int) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - p := emitter.buffer_pos - w := width(s[*i]) - switch w { - case 4: - emitter.buffer[p+3] = s[*i+3] - fallthrough - case 3: - emitter.buffer[p+2] = s[*i+2] - fallthrough - case 2: - emitter.buffer[p+1] = s[*i+1] - fallthrough - case 1: - emitter.buffer[p+0] = s[*i+0] - default: - panic("unknown character width") - } - emitter.column++ - emitter.buffer_pos += w - *i += w - return true -} - -// Write a whole string into buffer. -func write_all(emitter *yaml_emitter_t, s []byte) bool { - for i := 0; i < len(s); { - if !write(emitter, s, &i) { - return false - } - } - return true -} - -// Copy a line break character from a string into buffer. -func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { - if s[*i] == '\n' { - if !put_break(emitter) { - return false - } - *i++ - } else { - if !write(emitter, s, i) { - return false - } - emitter.column = 0 - emitter.line++ - } - return true -} - -// Set an emitter error and return false. -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_EMITTER_ERROR - emitter.problem = problem - return false -} - -// Emit an event. -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.events = append(emitter.events, *event) - for !yaml_emitter_need_more_events(emitter) { - event := &emitter.events[emitter.events_head] - if !yaml_emitter_analyze_event(emitter, event) { - return false - } - if !yaml_emitter_state_machine(emitter, event) { - return false - } - yaml_event_delete(event) - emitter.events_head++ - } - return true -} - -// Check if we need to accumulate more events before emitting. -// -// We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { - if emitter.events_head == len(emitter.events) { - return true - } - var accumulate int - switch emitter.events[emitter.events_head].typ { - case yaml_DOCUMENT_START_EVENT: - accumulate = 1 - break - case yaml_SEQUENCE_START_EVENT: - accumulate = 2 - break - case yaml_MAPPING_START_EVENT: - accumulate = 3 - break - default: - return false - } - if len(emitter.events)-emitter.events_head > accumulate { - return false - } - var level int - for i := emitter.events_head; i < len(emitter.events); i++ { - switch emitter.events[i].typ { - case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: - level++ - case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: - level-- - } - if level == 0 { - return false - } - } - return true -} - -// Append a directive to the directives stack. -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { - for i := 0; i < len(emitter.tag_directives); i++ { - if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") - } - } - - // [Go] Do we actually need to copy this given garbage collection - // and the lack of deallocating destructors? - tag_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(tag_copy.handle, value.handle) - copy(tag_copy.prefix, value.prefix) - emitter.tag_directives = append(emitter.tag_directives, tag_copy) - return true -} - -// Increase the indentation level. -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { - emitter.indents = append(emitter.indents, emitter.indent) - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.best_indent - } else { - emitter.indent = 0 - } - } else if !indentless { - emitter.indent += emitter.best_indent - } - return true -} - -// State dispatcher. -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { - switch emitter.state { - default: - case yaml_EMIT_STREAM_START_STATE: - return yaml_emitter_emit_stream_start(emitter, event) - - case yaml_EMIT_FIRST_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, true) - - case yaml_EMIT_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, false) - - case yaml_EMIT_DOCUMENT_CONTENT_STATE: - return yaml_emitter_emit_document_content(emitter, event) - - case yaml_EMIT_DOCUMENT_END_STATE: - return yaml_emitter_emit_document_end(emitter, event) - - case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true) - - case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, false) - - case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, true) - - case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, false) - - case yaml_EMIT_END_STATE: - return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") - } - panic("invalid emitter state") -} - -// Expect STREAM-START. -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_STREAM_START_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") - } - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = event.encoding - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = yaml_UTF8_ENCODING - } - } - if emitter.best_indent < 2 || emitter.best_indent > 9 { - emitter.best_indent = 2 - } - if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { - emitter.best_width = 80 - } - if emitter.best_width < 0 { - emitter.best_width = 1<<31 - 1 - } - if emitter.line_break == yaml_ANY_BREAK { - emitter.line_break = yaml_LN_BREAK - } - - emitter.indent = -1 - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - - if emitter.encoding != yaml_UTF8_ENCODING { - if !yaml_emitter_write_bom(emitter) { - return false - } - } - emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE - return true -} - -// Expect DOCUMENT-START or STREAM-END. -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - - if event.typ == yaml_DOCUMENT_START_EVENT { - - if event.version_directive != nil { - if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { - return false - } - } - - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { - return false - } - if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { - return false - } - } - - for i := 0; i < len(default_tag_directives); i++ { - tag_directive := &default_tag_directives[i] - if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if event.version_directive != nil { - implicit = false - if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if len(event.tag_directives) > 0 { - implicit = false - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { - return false - } - if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if yaml_emitter_check_empty_document(emitter) { - implicit = false - } - if !implicit { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { - return false - } - if emitter.canonical { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE - return true - } - - if event.typ == yaml_STREAM_END_EVENT { - if emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_END_STATE - return true - } - - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") -} - -// Expect the root node. -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - return yaml_emitter_emit_node(emitter, event, true, false, false, false) -} - -// Expect DOCUMENT-END. -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_DOCUMENT_END_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !event.implicit { - // [Go] Allocate the slice elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_DOCUMENT_START_STATE - emitter.tag_directives = emitter.tag_directives[:0] - return true -} - -// Expect a flow item node. -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a flow key node. -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_MAPPING_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a flow value node. -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block item node. -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { - return false - } - } - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a block key node. -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, false) { - return false - } - } - if event.typ == yaml_MAPPING_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block value node. -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a node. -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, - root bool, sequence bool, mapping bool, simple_key bool) bool { - - emitter.root_context = root - emitter.sequence_context = sequence - emitter.mapping_context = mapping - emitter.simple_key_context = simple_key - - switch event.typ { - case yaml_ALIAS_EVENT: - return yaml_emitter_emit_alias(emitter, event) - case yaml_SCALAR_EVENT: - return yaml_emitter_emit_scalar(emitter, event) - case yaml_SEQUENCE_START_EVENT: - return yaml_emitter_emit_sequence_start(emitter, event) - case yaml_MAPPING_START_EVENT: - return yaml_emitter_emit_mapping_start(emitter, event) - default: - return yaml_emitter_set_emitter_error(emitter, - "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") - } - return false -} - -// Expect ALIAS. -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SCALAR. -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_select_scalar_style(emitter, event) { - return false - } - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - if !yaml_emitter_process_scalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SEQUENCE-START. -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || - yaml_emitter_check_empty_sequence(emitter) { - emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - } - return true -} - -// Expect MAPPING-START. -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || - yaml_emitter_check_empty_mapping(emitter) { - emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - } - return true -} - -// Check if the document content is an empty scalar. -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { - return false // [Go] Huh? -} - -// Check if the next events represent an empty sequence. -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT -} - -// Check if the next events represent an empty mapping. -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT -} - -// Check if the next node can be expressed as a simple key. -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { - length := 0 - switch emitter.events[emitter.events_head].typ { - case yaml_ALIAS_EVENT: - length += len(emitter.anchor_data.anchor) - case yaml_SCALAR_EVENT: - if emitter.scalar_data.multiline { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) + - len(emitter.scalar_data.value) - case yaml_SEQUENCE_START_EVENT: - if !yaml_emitter_check_empty_sequence(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - case yaml_MAPPING_START_EVENT: - if !yaml_emitter_check_empty_mapping(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - default: - return false - } - return length <= 128 -} - -// Determine an acceptable scalar style. -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 - if no_tag && !event.implicit && !event.quoted_implicit { - return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") - } - - style := event.scalar_style() - if style == yaml_ANY_SCALAR_STYLE { - style = yaml_PLAIN_SCALAR_STYLE - } - if emitter.canonical { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - if emitter.simple_key_context && emitter.scalar_data.multiline { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if style == yaml_PLAIN_SCALAR_STYLE { - if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || - emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if no_tag && !event.implicit { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { - if !emitter.scalar_data.single_quoted_allowed { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { - if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { - emitter.tag_data.handle = []byte{'!'} - } - emitter.scalar_data.style = style - return true -} - -// Write an achor. -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { - if emitter.anchor_data.anchor == nil { - return true - } - c := []byte{'&'} - if emitter.anchor_data.alias { - c[0] = '*' - } - if !yaml_emitter_write_indicator(emitter, c, true, false, false) { - return false - } - return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) -} - -// Write a tag. -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { - if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { - return true - } - if len(emitter.tag_data.handle) > 0 { - if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { - return false - } - if len(emitter.tag_data.suffix) > 0 { - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - } - } else { - // [Go] Allocate these slices elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { - return false - } - } - return true -} - -// Write a scalar. -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { - switch emitter.scalar_data.style { - case yaml_PLAIN_SCALAR_STYLE: - return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_SINGLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_DOUBLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_LITERAL_SCALAR_STYLE: - return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) - - case yaml_FOLDED_SCALAR_STYLE: - return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) - } - panic("unknown scalar style") -} - -// Check if a %YAML directive is valid. -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { - if version_directive.major != 1 || version_directive.minor != 1 { - return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") - } - return true -} - -// Check if a %TAG directive is valid. -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { - handle := tag_directive.handle - prefix := tag_directive.prefix - if len(handle) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") - } - if handle[0] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") - } - if handle[len(handle)-1] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") - } - for i := 1; i < len(handle)-1; i += width(handle[i]) { - if !is_alpha(handle, i) { - return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") - } - } - if len(prefix) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") - } - return true -} - -// Check if an anchor is valid. -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { - if len(anchor) == 0 { - problem := "anchor value must not be empty" - if alias { - problem = "alias value must not be empty" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !is_alpha(anchor, i) { - problem := "anchor value must contain alphanumerical characters only" - if alias { - problem = "alias value must contain alphanumerical characters only" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - } - emitter.anchor_data.anchor = anchor - emitter.anchor_data.alias = alias - return true -} - -// Check if a tag is valid. -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { - if len(tag) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") - } - for i := 0; i < len(emitter.tag_directives); i++ { - tag_directive := &emitter.tag_directives[i] - if bytes.HasPrefix(tag, tag_directive.prefix) { - emitter.tag_data.handle = tag_directive.handle - emitter.tag_data.suffix = tag[len(tag_directive.prefix):] - return true - } - } - emitter.tag_data.suffix = tag - return true -} - -// Check if a scalar is valid. -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { - var ( - block_indicators = false - flow_indicators = false - line_breaks = false - special_characters = false - - leading_space = false - leading_break = false - trailing_space = false - trailing_break = false - break_space = false - space_break = false - - preceeded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false - ) - - emitter.scalar_data.value = value - - if len(value) == 0 { - emitter.scalar_data.multiline = false - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = false - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - block_indicators = true - flow_indicators = true - } - - preceeded_by_whitespace = true - for i, w := 0, 0; i < len(value); i += w { - w = width(value[0]) - followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flow_indicators = true - block_indicators = true - case '?', ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '-': - if followed_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flow_indicators = true - case ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '#': - if preceeded_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } - - if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { - special_characters = true - } - if is_space(value, i) { - if i == 0 { - leading_space = true - } - if i+width(value[i]) == len(value) { - trailing_space = true - } - if previous_break { - break_space = true - } - previous_space = true - previous_break = false - } else if is_break(value, i) { - line_breaks = true - if i == 0 { - leading_break = true - } - if i+width(value[i]) == len(value) { - trailing_break = true - } - if previous_space { - space_break = true - } - previous_space = false - previous_break = true - } else { - previous_space = false - previous_break = false - } - - // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - preceeded_by_whitespace = is_blankz(value, i) - } - - emitter.scalar_data.multiline = line_breaks - emitter.scalar_data.flow_plain_allowed = true - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = true - - if leading_space || leading_break || trailing_space || trailing_break { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if trailing_space { - emitter.scalar_data.block_allowed = false - } - if break_space { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || special_characters { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - emitter.scalar_data.block_allowed = false - } - if line_breaks { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if flow_indicators { - emitter.scalar_data.flow_plain_allowed = false - } - if block_indicators { - emitter.scalar_data.block_plain_allowed = false - } - return true -} - -// Check if the event data is valid. -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - emitter.anchor_data.anchor = nil - emitter.tag_data.handle = nil - emitter.tag_data.suffix = nil - emitter.scalar_data.value = nil - - switch event.typ { - case yaml_ALIAS_EVENT: - if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { - return false - } - - case yaml_SCALAR_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - if !yaml_emitter_analyze_scalar(emitter, event.value) { - return false - } - - case yaml_SEQUENCE_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - - case yaml_MAPPING_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - } - return true -} - -// Write the BOM character. -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - pos := emitter.buffer_pos - emitter.buffer[pos+0] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.buffer_pos += 3 - return true -} - -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { - if !put_break(emitter) { - return false - } - } - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - emitter.whitespace = true - emitter.indention = true - return true -} - -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, indicator) { - return false - } - emitter.whitespace = is_whitespace - emitter.indention = (emitter.indention && is_indention) - emitter.open_ended = false - return true -} - -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - for i := 0; i < len(value); { - var must_write bool - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': - must_write = true - default: - must_write = is_alpha(value, i) - } - if must_write { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for k := 0; k < w; k++ { - octet := value[i] - i++ - if !put(emitter, '%') { - return false - } - - c := octet >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = octet & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - emitter.whitespace = false - emitter.indention = false - if emitter.root_context { - emitter.open_ended = true - } - - return true -} - -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { - return false - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - spaces := false - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || - is_bom(value, i) || is_break(value, i) || - value[i] == '"' || value[i] == '\\' { - - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - var ok bool - switch v { - case 0x00: - ok = put(emitter, '0') - case 0x07: - ok = put(emitter, 'a') - case 0x08: - ok = put(emitter, 'b') - case 0x09: - ok = put(emitter, 't') - case 0x0A: - ok = put(emitter, 'n') - case 0x0b: - ok = put(emitter, 'v') - case 0x0c: - ok = put(emitter, 'f') - case 0x0d: - ok = put(emitter, 'r') - case 0x1b: - ok = put(emitter, 'e') - case 0x22: - ok = put(emitter, '"') - case 0x5c: - ok = put(emitter, '\\') - case 0x85: - ok = put(emitter, 'N') - case 0xA0: - ok = put(emitter, '_') - case 0x2028: - ok = put(emitter, 'L') - case 0x2029: - ok = put(emitter, 'P') - default: - if v <= 0xFF { - ok = put(emitter, 'x') - w = 2 - } else if v <= 0xFFFF { - ok = put(emitter, 'u') - w = 4 - } else { - ok = put(emitter, 'U') - w = 8 - } - for k := (w - 1) * 4; ok && k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - if digit < 10 { - ok = put(emitter, digit+'0') - } else { - ok = put(emitter, digit+'A'-10) - } - } - } - if !ok { - return false - } - spaces = false - } else if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if is_space(value, i+1) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else if !write(emitter, value, &i) { - return false - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { - if is_space(value, 0) || is_break(value, 0) { - indent_hint := []byte{'0' + byte(emitter.best_indent)} - if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { - return false - } - } - - emitter.open_ended = false - - var chomp_hint [1]byte - if len(value) == 0 { - chomp_hint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - if !is_break(value, i) { - chomp_hint[0] = '-' - } else if i == 0 { - chomp_hint[0] = '+' - emitter.open_ended = true - } else { - i-- - for value[i]&0xC0 == 0x80 { - i-- - } - if is_break(value, i) { - chomp_hint[0] = '+' - emitter.open_ended = true - } - } - } - if chomp_hint[0] != 0 { - if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { - return false - } - } - return true -} - -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - breaks := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - - breaks := true - leading_spaces := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !breaks && !leading_spaces && value[i] == '\n' { - k := 0 - for is_break(value, k) { - k += width(value[k]) - } - if !is_blankz(value, k) { - if !put_break(emitter) { - return false - } - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - leading_spaces = is_blank(value, i) - } - if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - return true -} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/encode.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/encode.go deleted file mode 100644 index b7edc799..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/encode.go +++ /dev/null @@ -1,289 +0,0 @@ -package yaml - -import ( - "encoding" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" -) - -type encoder struct { - emitter yaml_emitter_t - event yaml_event_t - out []byte - flow bool -} - -func newEncoder() (e *encoder) { - e = &encoder{} - e.must(yaml_emitter_initialize(&e.emitter)) - yaml_emitter_set_output_string(&e.emitter, &e.out) - yaml_emitter_set_unicode(&e.emitter, true) - e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) - e.emit() - e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) - e.emit() - return e -} - -func (e *encoder) finish() { - e.must(yaml_document_end_event_initialize(&e.event, true)) - e.emit() - e.emitter.open_ended = false - e.must(yaml_stream_end_event_initialize(&e.event)) - e.emit() -} - -func (e *encoder) destroy() { - yaml_emitter_delete(&e.emitter) -} - -func (e *encoder) emit() { - // This will internally delete the e.event value. - if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { - e.must(false) - } -} - -func (e *encoder) must(ok bool) { - if !ok { - msg := e.emitter.problem - if msg == "" { - msg = "unknown problem generating YAML content" - } - failf("%s", msg) - } -} - -func (e *encoder) marshal(tag string, in reflect.Value) { - if !in.IsValid() { - e.nilv() - return - } - iface := in.Interface() - if m, ok := iface.(Marshaler); ok { - v, err := m.MarshalYAML() - if err != nil { - fail(err) - } - if v == nil { - e.nilv() - return - } - in = reflect.ValueOf(v) - } else if m, ok := iface.(encoding.TextMarshaler); ok { - text, err := m.MarshalText() - if err != nil { - fail(err) - } - in = reflect.ValueOf(string(text)) - } - switch in.Kind() { - case reflect.Interface: - if in.IsNil() { - e.nilv() - } else { - e.marshal(tag, in.Elem()) - } - case reflect.Map: - e.mapv(tag, in) - case reflect.Ptr: - if in.IsNil() { - e.nilv() - } else { - e.marshal(tag, in.Elem()) - } - case reflect.Struct: - e.structv(tag, in) - case reflect.Slice: - if in.Type().Elem() == mapItemType { - e.itemsv(tag, in) - } else { - e.slicev(tag, in) - } - case reflect.String: - e.stringv(tag, in) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if in.Type() == durationType { - e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) - } else { - e.intv(tag, in) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - e.uintv(tag, in) - case reflect.Float32, reflect.Float64: - e.floatv(tag, in) - case reflect.Bool: - e.boolv(tag, in) - default: - panic("cannot marshal type: " + in.Type().String()) - } -} - -func (e *encoder) mapv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - keys := keyList(in.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - e.marshal("", k) - e.marshal("", in.MapIndex(k)) - } - }) -} - -func (e *encoder) itemsv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) - for _, item := range slice { - e.marshal("", reflect.ValueOf(item.Key)) - e.marshal("", reflect.ValueOf(item.Value)) - } - }) -} - -func (e *encoder) structv(tag string, in reflect.Value) { - sinfo, err := getStructInfo(in.Type()) - if err != nil { - panic(err) - } - e.mappingv(tag, func() { - for _, info := range sinfo.FieldsList { - var value reflect.Value - if info.Inline == nil { - value = in.Field(info.Num) - } else { - value = in.FieldByIndex(info.Inline) - } - if info.OmitEmpty && isZero(value) { - continue - } - e.marshal("", reflect.ValueOf(info.Key)) - e.flow = info.Flow - e.marshal("", value) - } - }) -} - -func (e *encoder) mappingv(tag string, f func()) { - implicit := tag == "" - style := yaml_BLOCK_MAPPING_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_MAPPING_STYLE - } - e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - f() - e.must(yaml_mapping_end_event_initialize(&e.event)) - e.emit() -} - -func (e *encoder) slicev(tag string, in reflect.Value) { - implicit := tag == "" - style := yaml_BLOCK_SEQUENCE_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_SEQUENCE_STYLE - } - e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - n := in.Len() - for i := 0; i < n; i++ { - e.marshal("", in.Index(i)) - } - e.must(yaml_sequence_end_event_initialize(&e.event)) - e.emit() -} - -// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. -// -// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported -// in YAML 1.2 and by this package, but these should be marshalled quoted for -// the time being for compatibility with other parsers. -func isBase60Float(s string) (result bool) { - // Fast path. - if s == "" { - return false - } - c := s[0] - if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { - return false - } - // Do the full match. - return base60float.MatchString(s) -} - -// From http://yaml.org/type/float.html, except the regular expression there -// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. -var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) - -func (e *encoder) stringv(tag string, in reflect.Value) { - var style yaml_scalar_style_t - s := in.String() - rtag, rs := resolve("", s) - if rtag == yaml_BINARY_TAG { - if tag == "" || tag == yaml_STR_TAG { - tag = rtag - s = rs.(string) - } else if tag == yaml_BINARY_TAG { - failf("explicitly tagged !!binary data must be base64-encoded") - } else { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) - } - } - if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } else if strings.Contains(s, "\n") { - style = yaml_LITERAL_SCALAR_STYLE - } else { - style = yaml_PLAIN_SCALAR_STYLE - } - e.emitScalar(s, "", tag, style) -} - -func (e *encoder) boolv(tag string, in reflect.Value) { - var s string - if in.Bool() { - s = "true" - } else { - s = "false" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) intv(tag string, in reflect.Value) { - s := strconv.FormatInt(in.Int(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) uintv(tag string, in reflect.Value) { - s := strconv.FormatUint(in.Uint(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) floatv(tag string, in reflect.Value) { - // FIXME: Handle 64 bits here. - s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) nilv() { - e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { - implicit := tag == "" - e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) - e.emit() -} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/parserc.go deleted file mode 100644 index 0a7037ad..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/parserc.go +++ /dev/null @@ -1,1096 +0,0 @@ -package yaml - -import ( - "bytes" -) - -// The parser implements the following grammar: -// -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// implicit_document ::= block_node DOCUMENT-END* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// block_node_or_indentless_sequence ::= -// ALIAS -// | properties (block_content | indentless_block_sequence)? -// | block_content -// | indentless_block_sequence -// block_node ::= ALIAS -// | properties block_content? -// | block_content -// flow_node ::= ALIAS -// | properties flow_content? -// | flow_content -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// block_content ::= block_collection | flow_collection | SCALAR -// flow_content ::= flow_collection | SCALAR -// block_collection ::= block_sequence | block_mapping -// flow_collection ::= flow_sequence | flow_mapping -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// block_mapping ::= BLOCK-MAPPING_START -// ((KEY block_node_or_indentless_sequence?)? -// (VALUE block_node_or_indentless_sequence?)?)* -// BLOCK-END -// flow_sequence ::= FLOW-SEQUENCE-START -// (flow_sequence_entry FLOW-ENTRY)* -// flow_sequence_entry? -// FLOW-SEQUENCE-END -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// flow_mapping ::= FLOW-MAPPING-START -// (flow_mapping_entry FLOW-ENTRY)* -// flow_mapping_entry? -// FLOW-MAPPING-END -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - -// Peek the next token in the token queue. -func peek_token(parser *yaml_parser_t) *yaml_token_t { - if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - return &parser.tokens[parser.tokens_head] - } - return nil -} - -// Remove the next token from the queue (must be called after peek_token). -func skip_token(parser *yaml_parser_t) { - parser.token_available = false - parser.tokens_parsed++ - parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN - parser.tokens_head++ -} - -// Get the next event. -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { - // Erase the event object. - *event = yaml_event_t{} - - // No events after the end of the stream or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { - return true - } - - // Generate the next event. - return yaml_parser_state_machine(parser, event) -} - -// Set parser error. -func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -// State dispatcher. -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { - //trace("yaml_parser_state_machine", "state:", parser.state.String()) - - switch parser.state { - case yaml_PARSE_STREAM_START_STATE: - return yaml_parser_parse_stream_start(parser, event) - - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, true) - - case yaml_PARSE_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, false) - - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return yaml_parser_parse_document_content(parser, event) - - case yaml_PARSE_DOCUMENT_END_STATE: - return yaml_parser_parse_document_end(parser, event) - - case yaml_PARSE_BLOCK_NODE_STATE: - return yaml_parser_parse_node(parser, event, true, false) - - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return yaml_parser_parse_node(parser, event, true, true) - - case yaml_PARSE_FLOW_NODE_STATE: - return yaml_parser_parse_node(parser, event, false, false) - - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, true) - - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, false) - - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_indentless_sequence_entry(parser, event) - - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, true) - - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, false) - - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return yaml_parser_parse_block_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, true) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, false) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) - - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, true) - - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, true) - - default: - panic("invalid parser state") - } - return false -} - -// Parse the production: -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_STREAM_START_TOKEN { - return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) - } - parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - encoding: token.encoding, - } - skip_token(parser) - return true -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// * -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - // Parse extra document end indicators. - if !implicit { - for token.typ == yaml_DOCUMENT_END_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && - token.typ != yaml_TAG_DIRECTIVE_TOKEN && - token.typ != yaml_DOCUMENT_START_TOKEN && - token.typ != yaml_STREAM_END_TOKEN { - // Parse an implicit document. - if !yaml_parser_process_directives(parser, nil, nil) { - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_BLOCK_NODE_STATE - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - } else if token.typ != yaml_STREAM_END_TOKEN { - // Parse an explicit document. - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - start_mark := token.start_mark - if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { - return false - } - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_DOCUMENT_START_TOKEN { - yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE - end_mark := token.end_mark - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: false, - } - skip_token(parser) - - } else { - // Parse the stream end. - parser.state = yaml_PARSE_END_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - } - - return true -} - -// Parse the productions: -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** -// -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || - token.typ == yaml_TAG_DIRECTIVE_TOKEN || - token.typ == yaml_DOCUMENT_START_TOKEN || - token.typ == yaml_DOCUMENT_END_TOKEN || - token.typ == yaml_STREAM_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - return yaml_parser_parse_node(parser, event, true, false) -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - start_mark := token.start_mark - end_mark := token.start_mark - - implicit := true - if token.typ == yaml_DOCUMENT_END_TOKEN { - end_mark = token.end_mark - skip_token(parser) - implicit = false - } - - parser.tag_directives = parser.tag_directives[:0] - - parser.state = yaml_PARSE_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - start_mark: start_mark, - end_mark: end_mark, - implicit: implicit, - } - return true -} - -// Parse the productions: -// block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * -// block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * -// flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* -// block_content ::= block_collection | flow_collection | SCALAR -// ****** -// flow_content ::= flow_collection | SCALAR -// ****** -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { - //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_ALIAS_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - anchor: token.value, - } - skip_token(parser) - return true - } - - start_mark := token.start_mark - end_mark := token.start_mark - - var tag_token bool - var tag_handle, tag_suffix, anchor []byte - var tag_mark yaml_mark_t - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - start_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } else if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - start_mark = token.start_mark - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - var tag []byte - if tag_token { - if len(tag_handle) == 0 { - tag = tag_suffix - tag_suffix = nil - } else { - for i := range parser.tag_directives { - if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { - tag = append([]byte(nil), parser.tag_directives[i].prefix...) - tag = append(tag, tag_suffix...) - break - } - } - if len(tag) == 0 { - yaml_parser_set_parser_error_context(parser, - "while parsing a node", start_mark, - "found undefined tag handle", tag_mark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_SCALAR_TOKEN { - var plain_implicit, quoted_implicit bool - end_mark = token.end_mark - if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { - plain_implicit = true - } else if len(tag) == 0 { - quoted_implicit = true - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(token.style), - } - skip_token(parser) - return true - } - if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { - // [Go] Some of the events below can be merged as they differ only on style. - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_FLOW_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), - } - return true - } - if len(anchor) > 0 || len(tag) > 0 { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - quoted_implicit: false, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true - } - - context := "while parsing a flow node" - if block { - context = "while parsing a block node" - } - yaml_parser_set_parser_error_context(parser, context, start_mark, - "did not find expected node content", token.start_mark) - return false -} - -// Parse the productions: -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* -// -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } - if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block collection", context_mark, - "did not find expected '-' indicator", token.start_mark) -} - -// Parse the productions: -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && - token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? - } - return true -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* -// -// BLOCK-END -// ********* -// -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_KEY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block mapping", context_mark, - "did not find expected key", token.start_mark) -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// -// -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow sequence", context_mark, - "did not find expected ',' or ']'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - implicit: true, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - skip_token(parser) - return true - } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true -} - -// -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - mark := token.end_mark - skip_token(parser) - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? - } - return true -} - -// Parse the productions: -// flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * -// -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow mapping", context_mark, - "did not find expected ',' or '}'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true -} - -// Parse the productions: -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { - token := peek_token(parser) - if token == nil { - return false - } - if empty { - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Generate an empty scalar event. -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: mark, - end_mark: mark, - value: nil, // Empty - implicit: true, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true -} - -var default_tag_directives = []yaml_tag_directive_t{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -// Parse directives. -func yaml_parser_process_directives(parser *yaml_parser_t, - version_directive_ref **yaml_version_directive_t, - tag_directives_ref *[]yaml_tag_directive_t) bool { - - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - token := peek_token(parser) - if token == nil { - return false - } - - for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { - if version_directive != nil { - yaml_parser_set_parser_error(parser, - "found duplicate %YAML directive", token.start_mark) - return false - } - if token.major != 1 || token.minor != 1 { - yaml_parser_set_parser_error(parser, - "found incompatible YAML document", token.start_mark) - return false - } - version_directive = &yaml_version_directive_t{ - major: token.major, - minor: token.minor, - } - } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { - value := yaml_tag_directive_t{ - handle: token.value, - prefix: token.prefix, - } - if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { - return false - } - tag_directives = append(tag_directives, value) - } - - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - - for i := range default_tag_directives { - if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { - return false - } - } - - if version_directive_ref != nil { - *version_directive_ref = version_directive - } - if tag_directives_ref != nil { - *tag_directives_ref = tag_directives - } - return true -} - -// Append a tag directive to the directives stack. -func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { - for i := range parser.tag_directives { - if bytes.Equal(value.handle, parser.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) - } - } - - // [Go] I suspect the copy is unnecessary. This was likely done - // because there was no way to track ownership of the data. - value_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(value_copy.handle, value.handle) - copy(value_copy.prefix, value.prefix) - parser.tag_directives = append(parser.tag_directives, value_copy) - return true -} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/readerc.go deleted file mode 100644 index d5fb0972..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/readerc.go +++ /dev/null @@ -1,391 +0,0 @@ -package yaml - -import ( - "io" -) - -// Set the reader error and return 0. -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { - parser.error = yaml_READER_ERROR - parser.problem = problem - parser.problem_offset = offset - parser.problem_value = value - return false -} - -// Byte order marks. -const ( - bom_UTF8 = "\xef\xbb\xbf" - bom_UTF16LE = "\xff\xfe" - bom_UTF16BE = "\xfe\xff" -) - -// Determine the input stream encoding by checking the BOM symbol. If no BOM is -// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { - // Ensure that we had enough bytes in the raw buffer. - for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { - if !yaml_parser_update_raw_buffer(parser) { - return false - } - } - - // Determine the encoding. - buf := parser.raw_buffer - pos := parser.raw_buffer_pos - avail := len(buf) - pos - if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { - parser.encoding = yaml_UTF16LE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { - parser.encoding = yaml_UTF16BE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { - parser.encoding = yaml_UTF8_ENCODING - parser.raw_buffer_pos += 3 - parser.offset += 3 - } else { - parser.encoding = yaml_UTF8_ENCODING - } - return true -} - -// Update the raw buffer. -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { - size_read := 0 - - // Return if the raw buffer is full. - if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { - return true - } - - // Return on EOF. - if parser.eof { - return true - } - - // Move the remaining bytes in the raw buffer to the beginning. - if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { - copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) - } - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] - parser.raw_buffer_pos = 0 - - // Call the read handler to fill the buffer. - size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) - } - return true -} - -// Ensure that the buffer contains at least `length` characters. -// Return true on success, false on failure. -// -// The length is supposed to be significantly less that the buffer size. -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { - if parser.read_handler == nil { - panic("read handler must be set") - } - - // If the EOF flag is set and the raw buffer is empty, do nothing. - if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - return true - } - - // Return if the buffer contains enough characters. - if parser.unread >= length { - return true - } - - // Determine the input encoding if it is not known yet. - if parser.encoding == yaml_ANY_ENCODING { - if !yaml_parser_determine_encoding(parser) { - return false - } - } - - // Move the unread characters to the beginning of the buffer. - buffer_len := len(parser.buffer) - if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { - copy(parser.buffer, parser.buffer[parser.buffer_pos:]) - buffer_len -= parser.buffer_pos - parser.buffer_pos = 0 - } else if parser.buffer_pos == buffer_len { - buffer_len = 0 - parser.buffer_pos = 0 - } - - // Open the whole buffer for writing, and cut it before returning. - parser.buffer = parser.buffer[:cap(parser.buffer)] - - // Fill the buffer until it has enough characters. - first := true - for parser.unread < length { - - // Fill the raw buffer if necessary. - if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { - if !yaml_parser_update_raw_buffer(parser) { - parser.buffer = parser.buffer[:buffer_len] - return false - } - } - first = false - - // Decode the raw buffer. - inner: - for parser.raw_buffer_pos != len(parser.raw_buffer) { - var value rune - var width int - - raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos - - // Decode the next character. - switch parser.encoding { - case yaml_UTF8_ENCODING: - // Decode a UTF-8 character. Check RFC 3629 - // (http://www.ietf.org/rfc/rfc3629.txt) for more details. - // - // The following table (taken from the RFC) is used for - // decoding. - // - // Char. number range | UTF-8 octet sequence - // (hexadecimal) | (binary) - // --------------------+------------------------------------ - // 0000 0000-0000 007F | 0xxxxxxx - // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - // - // Additionally, the characters in the range 0xD800-0xDFFF - // are prohibited as they are reserved for use with UTF-16 - // surrogate pairs. - - // Determine the length of the UTF-8 sequence. - octet := parser.raw_buffer[parser.raw_buffer_pos] - switch { - case octet&0x80 == 0x00: - width = 1 - case octet&0xE0 == 0xC0: - width = 2 - case octet&0xF0 == 0xE0: - width = 3 - case octet&0xF8 == 0xF0: - width = 4 - default: - // The leading octet is invalid. - return yaml_parser_set_reader_error(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - // Check if the raw buffer contains an incomplete character. - if width > raw_unread { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - break inner - } - - // Decode the leading octet. - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - // Check and decode the trailing octets. - for k := 1; k < width; k++ { - octet = parser.raw_buffer[parser.raw_buffer_pos+k] - - // Check if the octet is valid. - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_reader_error(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - // Decode the octet. - value = (value << 6) + rune(octet&0x3F) - } - - // Check the length of the sequence against the value. - switch { - case width == 1: - case width == 2 && value >= 0x80: - case width == 3 && value >= 0x800: - case width == 4 && value >= 0x10000: - default: - return yaml_parser_set_reader_error(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - // Check the range of the value. - if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { - return yaml_parser_set_reader_error(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - - case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: - var low, high int - if parser.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - high, low = 1, 0 - } - - // The UTF-16 encoding is not as simple as one might - // naively think. Check RFC 2781 - // (http://www.ietf.org/rfc/rfc2781.txt). - // - // Normally, two subsequent bytes describe a Unicode - // character. However a special technique (called a - // surrogate pair) is used for specifying character - // values larger than 0xFFFF. - // - // A surrogate pair consists of two pseudo-characters: - // high surrogate area (0xD800-0xDBFF) - // low surrogate area (0xDC00-0xDFFF) - // - // The following formulas are used for decoding - // and encoding characters using surrogate pairs: - // - // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - // W1 = 110110yyyyyyyyyy - // W2 = 110111xxxxxxxxxx - // - // where U is the character value, W1 is the high surrogate - // area, W2 is the low surrogate area. - - // Check for incomplete UTF-16 character. - if raw_unread < 2 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - break inner - } - - // Get the character. - value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) - - // Check for unexpected low surrogate area. - if value&0xFC00 == 0xDC00 { - return yaml_parser_set_reader_error(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - // Check for a high surrogate area. - if value&0xFC00 == 0xD800 { - width = 4 - - // Check for incomplete surrogate pair. - if raw_unread < 4 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - break inner - } - - // Get the next character. - value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) - - // Check for a low surrogate area. - if value2&0xFC00 != 0xDC00 { - return yaml_parser_set_reader_error(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - // Generate the value of the surrogate pair. - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - width = 2 - } - - default: - panic("impossible") - } - - // Check if the character is in the allowed range: - // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - // | [#x10000-#x10FFFF] (32 bit) - switch { - case value == 0x09: - case value == 0x0A: - case value == 0x0D: - case value >= 0x20 && value <= 0x7E: - case value == 0x85: - case value >= 0xA0 && value <= 0xD7FF: - case value >= 0xE000 && value <= 0xFFFD: - case value >= 0x10000 && value <= 0x10FFFF: - default: - return yaml_parser_set_reader_error(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - // Move the raw pointers. - parser.raw_buffer_pos += width - parser.offset += width - - // Finally put the character into the buffer. - if value <= 0x7F { - // 0000 0000-0000 007F . 0xxxxxxx - parser.buffer[buffer_len+0] = byte(value) - } else if value <= 0x7FF { - // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) - parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) - } else if value <= 0xFFFF { - // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) - } else { - // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) - } - buffer_len += width - - parser.unread++ - } - - // On EOF, put NUL into the buffer and return. - if parser.eof { - parser.buffer[buffer_len] = 0 - buffer_len++ - parser.unread++ - break - } - } - parser.buffer = parser.buffer[:buffer_len] - return true -} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/resolve.go deleted file mode 100644 index 93a86327..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/resolve.go +++ /dev/null @@ -1,203 +0,0 @@ -package yaml - -import ( - "encoding/base64" - "math" - "strconv" - "strings" - "unicode/utf8" -) - -type resolveMapItem struct { - value interface{} - tag string -} - -var resolveTable = make([]byte, 256) -var resolveMap = make(map[string]resolveMapItem) - -func init() { - t := resolveTable - t[int('+')] = 'S' // Sign - t[int('-')] = 'S' - for _, c := range "0123456789" { - t[int(c)] = 'D' // Digit - } - for _, c := range "yYnNtTfFoO~" { - t[int(c)] = 'M' // In map - } - t[int('.')] = '.' // Float (potentially in map) - - var resolveMapList = []struct { - v interface{} - tag string - l []string - }{ - {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, - {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, - {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, - {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, - {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, - {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, - {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, - {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, - {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, - {"<<", yaml_MERGE_TAG, []string{"<<"}}, - } - - m := resolveMap - for _, item := range resolveMapList { - for _, s := range item.l { - m[s] = resolveMapItem{item.v, item.tag} - } - } -} - -const longTagPrefix = "tag:yaml.org,2002:" - -func shortTag(tag string) string { - // TODO This can easily be made faster and produce less garbage. - if strings.HasPrefix(tag, longTagPrefix) { - return "!!" + tag[len(longTagPrefix):] - } - return tag -} - -func longTag(tag string) string { - if strings.HasPrefix(tag, "!!") { - return longTagPrefix + tag[2:] - } - return tag -} - -func resolvableTag(tag string) bool { - switch tag { - case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: - return true - } - return false -} - -func resolve(tag string, in string) (rtag string, out interface{}) { - if !resolvableTag(tag) { - return tag, in - } - - defer func() { - switch tag { - case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: - return - } - failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) - }() - - // Any data is accepted as a !!str or !!binary. - // Otherwise, the prefix is enough of a hint about what it might be. - hint := byte('N') - if in != "" { - hint = resolveTable[in[0]] - } - if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { - // Handle things we can lookup in a map. - if item, ok := resolveMap[in]; ok { - return item.tag, item.value - } - - // Base 60 floats are a bad idea, were dropped in YAML 1.2, and - // are purposefully unsupported here. They're still quoted on - // the way out for compatibility with other parser, though. - - switch hint { - case 'M': - // We've already checked the map above. - - case '.': - // Not in the map, so maybe a normal float. - floatv, err := strconv.ParseFloat(in, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - - case 'D', 'S': - // Int, float, or timestamp. - plain := strings.Replace(in, "_", "", -1) - intv, err := strconv.ParseInt(plain, 0, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - uintv, err := strconv.ParseUint(plain, 0, 64) - if err == nil { - return yaml_INT_TAG, uintv - } - floatv, err := strconv.ParseFloat(plain, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - if strings.HasPrefix(plain, "0b") { - intv, err := strconv.ParseInt(plain[2:], 2, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - uintv, err := strconv.ParseUint(plain[2:], 2, 64) - if err == nil { - return yaml_INT_TAG, uintv - } - } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt(plain[3:], 2, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, -int(intv) - } else { - return yaml_INT_TAG, -intv - } - } - } - // XXX Handle timestamps here. - - default: - panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") - } - } - if tag == yaml_BINARY_TAG { - return yaml_BINARY_TAG, in - } - if utf8.ValidString(in) { - return yaml_STR_TAG, in - } - return yaml_BINARY_TAG, encodeBase64(in) -} - -// encodeBase64 encodes s as base64 that is broken up into multiple lines -// as appropriate for the resulting length. -func encodeBase64(s string) string { - const lineLen = 70 - encLen := base64.StdEncoding.EncodedLen(len(s)) - lines := encLen/lineLen + 1 - buf := make([]byte, encLen*2+lines) - in := buf[0:encLen] - out := buf[encLen:] - base64.StdEncoding.Encode(in, []byte(s)) - k := 0 - for i := 0; i < len(in); i += lineLen { - j := i + lineLen - if j > len(in) { - j = len(in) - } - k += copy(out[k:], in[i:j]) - if lines > 1 { - out[k] = '\n' - k++ - } - } - return string(out[:k]) -} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/scannerc.go deleted file mode 100644 index fe93b190..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/scannerc.go +++ /dev/null @@ -1,2710 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Introduction -// ************ -// -// The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in -// some cases we are less restrictive that it requires. -// -// The process of transforming a YAML stream into a sequence of events is -// divided on two steps: Scanning and Parsing. -// -// The Scanner transforms the input stream into a sequence of tokens, while the -// parser transform the sequence of tokens produced by the Scanner into a -// sequence of parsing events. -// -// The Scanner is rather clever and complicated. The Parser, on the contrary, -// is a straightforward implementation of a recursive-descendant parser (or, -// LL(1) parser, as it is usually called). -// -// Actually there are two issues of Scanning that might be called "clever", the -// rest is quite straightforward. The issues are "block collection start" and -// "simple keys". Both issues are explained below in details. -// -// Here the Scanning step is explained and implemented. We start with the list -// of all the tokens produced by the Scanner together with short descriptions. -// -// Now, tokens: -// -// STREAM-START(encoding) # The stream start. -// STREAM-END # The stream end. -// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. -// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. -// DOCUMENT-START # '---' -// DOCUMENT-END # '...' -// BLOCK-SEQUENCE-START # Indentation increase denoting a block -// BLOCK-MAPPING-START # sequence or a block mapping. -// BLOCK-END # Indentation decrease. -// FLOW-SEQUENCE-START # '[' -// FLOW-SEQUENCE-END # ']' -// BLOCK-SEQUENCE-START # '{' -// BLOCK-SEQUENCE-END # '}' -// BLOCK-ENTRY # '-' -// FLOW-ENTRY # ',' -// KEY # '?' or nothing (simple keys). -// VALUE # ':' -// ALIAS(anchor) # '*anchor' -// ANCHOR(anchor) # '&anchor' -// TAG(handle,suffix) # '!handle!suffix' -// SCALAR(value,style) # A scalar. -// -// The following two tokens are "virtual" tokens denoting the beginning and the -// end of the stream: -// -// STREAM-START(encoding) -// STREAM-END -// -// We pass the information about the input stream encoding with the -// STREAM-START token. -// -// The next two tokens are responsible for tags: -// -// VERSION-DIRECTIVE(major,minor) -// TAG-DIRECTIVE(handle,prefix) -// -// Example: -// -// %YAML 1.1 -// %TAG ! !foo -// %TAG !yaml! tag:yaml.org,2002: -// --- -// -// The correspoding sequence of tokens: -// -// STREAM-START(utf-8) -// VERSION-DIRECTIVE(1,1) -// TAG-DIRECTIVE("!","!foo") -// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") -// DOCUMENT-START -// STREAM-END -// -// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole -// line. -// -// The document start and end indicators are represented by: -// -// DOCUMENT-START -// DOCUMENT-END -// -// Note that if a YAML stream contains an implicit document (without '---' -// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be -// produced. -// -// In the following examples, we present whole documents together with the -// produced tokens. -// -// 1. An implicit document: -// -// 'a scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// STREAM-END -// -// 2. An explicit document: -// -// --- -// 'a scalar' -// ... -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// SCALAR("a scalar",single-quoted) -// DOCUMENT-END -// STREAM-END -// -// 3. Several documents in a stream: -// -// 'a scalar' -// --- -// 'another scalar' -// --- -// 'yet another scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// DOCUMENT-START -// SCALAR("another scalar",single-quoted) -// DOCUMENT-START -// SCALAR("yet another scalar",single-quoted) -// STREAM-END -// -// We have already introduced the SCALAR token above. The following tokens are -// used to describe aliases, anchors, tag, and scalars: -// -// ALIAS(anchor) -// ANCHOR(anchor) -// TAG(handle,suffix) -// SCALAR(value,style) -// -// The following series of examples illustrate the usage of these tokens: -// -// 1. A recursive sequence: -// -// &A [ *A ] -// -// Tokens: -// -// STREAM-START(utf-8) -// ANCHOR("A") -// FLOW-SEQUENCE-START -// ALIAS("A") -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A tagged scalar: -// -// !!float "3.14" # A good approximation. -// -// Tokens: -// -// STREAM-START(utf-8) -// TAG("!!","float") -// SCALAR("3.14",double-quoted) -// STREAM-END -// -// 3. Various scalar styles: -// -// --- # Implicit empty plain scalars do not produce tokens. -// --- a plain scalar -// --- 'a single-quoted scalar' -// --- "a double-quoted scalar" -// --- |- -// a literal scalar -// --- >- -// a folded -// scalar -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// DOCUMENT-START -// SCALAR("a plain scalar",plain) -// DOCUMENT-START -// SCALAR("a single-quoted scalar",single-quoted) -// DOCUMENT-START -// SCALAR("a double-quoted scalar",double-quoted) -// DOCUMENT-START -// SCALAR("a literal scalar",literal) -// DOCUMENT-START -// SCALAR("a folded scalar",folded) -// STREAM-END -// -// Now it's time to review collection-related tokens. We will start with -// flow collections: -// -// FLOW-SEQUENCE-START -// FLOW-SEQUENCE-END -// FLOW-MAPPING-START -// FLOW-MAPPING-END -// FLOW-ENTRY -// KEY -// VALUE -// -// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and -// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' -// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the -// indicators '?' and ':', which are used for denoting mapping keys and values, -// are represented by the KEY and VALUE tokens. -// -// The following examples show flow collections: -// -// 1. A flow sequence: -// -// [item 1, item 2, item 3] -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-SEQUENCE-START -// SCALAR("item 1",plain) -// FLOW-ENTRY -// SCALAR("item 2",plain) -// FLOW-ENTRY -// SCALAR("item 3",plain) -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A flow mapping: -// -// { -// a simple key: a value, # Note that the KEY token is produced. -// ? a complex key: another value, -// } -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// FLOW-ENTRY -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// FLOW-ENTRY -// FLOW-MAPPING-END -// STREAM-END -// -// A simple key is a key which is not denoted by the '?' indicator. Note that -// the Scanner still produce the KEY token whenever it encounters a simple key. -// -// For scanning block collections, the following tokens are used (note that we -// repeat KEY and VALUE here): -// -// BLOCK-SEQUENCE-START -// BLOCK-MAPPING-START -// BLOCK-END -// BLOCK-ENTRY -// KEY -// VALUE -// -// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation -// increase that precedes a block collection (cf. the INDENT token in Python). -// The token BLOCK-END denote indentation decrease that ends a block collection -// (cf. the DEDENT token in Python). However YAML has some syntax pecularities -// that makes detections of these tokens more complex. -// -// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators -// '-', '?', and ':' correspondingly. -// -// The following examples show how the tokens BLOCK-SEQUENCE-START, -// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: -// -// 1. Block sequences: -// -// - item 1 -// - item 2 -// - -// - item 3.1 -// - item 3.2 -// - -// key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 3.1",plain) -// BLOCK-ENTRY -// SCALAR("item 3.2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Block mappings: -// -// a simple key: a value # The KEY token is produced here. -// ? a complex key -// : another value -// a mapping: -// key 1: value 1 -// key 2: value 2 -// a sequence: -// - item 1 -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// KEY -// SCALAR("a mapping",plain) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML does not always require to start a new block collection from a new -// line. If the current line contains only '-', '?', and ':' indicators, a new -// block collection may start at the current line. The following examples -// illustrate this case: -// -// 1. Collections in a sequence: -// -// - - item 1 -// - item 2 -// - key 1: value 1 -// key 2: value 2 -// - ? complex key -// : complex value -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("complex key") -// VALUE -// SCALAR("complex value") -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Collections in a mapping: -// -// ? a sequence -// : - item 1 -// - item 2 -// ? a mapping -// : key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// KEY -// SCALAR("a mapping",plain) -// VALUE -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML also permits non-indented sequences if they are included into a block -// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: -// -// key: -// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key",plain) -// VALUE -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// - -// Ensure that the buffer contains the required number of characters. -// Return true on success, false on failure (reader error or memory error). -func cache(parser *yaml_parser_t, length int) bool { - // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) - return parser.unread >= length || yaml_parser_update_buffer(parser, length) -} - -// Advance the buffer pointer. -func skip(parser *yaml_parser_t) { - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) -} - -func skip_line(parser *yaml_parser_t) { - if is_crlf(parser.buffer, parser.buffer_pos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.buffer_pos += 2 - } else if is_break(parser.buffer, parser.buffer_pos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) - } -} - -// Copy a character to a string buffer and advance pointers. -func read(parser *yaml_parser_t, s []byte) []byte { - w := width(parser.buffer[parser.buffer_pos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.buffer_pos] - parser.buffer_pos++ - } else { - s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) - parser.buffer_pos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -// Copy a line break character to a string buffer and advance pointers. -func read_line(parser *yaml_parser_t, s []byte) []byte { - buf := parser.buffer - pos := parser.buffer_pos - switch { - case buf[pos] == '\r' && buf[pos+1] == '\n': - // CR LF . LF - s = append(s, '\n') - parser.buffer_pos += 2 - parser.mark.index++ - parser.unread-- - case buf[pos] == '\r' || buf[pos] == '\n': - // CR|LF . LF - s = append(s, '\n') - parser.buffer_pos += 1 - case buf[pos] == '\xC2' && buf[pos+1] == '\x85': - // NEL . LF - s = append(s, '\n') - parser.buffer_pos += 2 - case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): - // LS|PS . LS|PS - s = append(s, buf[parser.buffer_pos:pos+3]...) - parser.buffer_pos += 3 - default: - return s - } - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - return s -} - -// Get the next token. -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { - // Erase the token object. - *token = yaml_token_t{} // [Go] Is this necessary? - - // No tokens after STREAM-END or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR { - return true - } - - // Ensure that the tokens queue contains enough tokens. - if !parser.token_available { - if !yaml_parser_fetch_more_tokens(parser) { - return false - } - } - - // Fetch the next token from the queue. - *token = parser.tokens[parser.tokens_head] - parser.tokens_head++ - parser.tokens_parsed++ - parser.token_available = false - - if token.typ == yaml_STREAM_END_TOKEN { - parser.stream_end_produced = true - } - return true -} - -// Set the scanner error and return false. -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { - parser.error = yaml_SCANNER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = parser.mark - return false -} - -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { - context := "while parsing a tag" - if directive { - context = "while parsing a %TAG directive" - } - return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") -} - -func trace(args ...interface{}) func() { - pargs := append([]interface{}{"+++"}, args...) - fmt.Println(pargs...) - pargs = append([]interface{}{"---"}, args...) - return func() { fmt.Println(pargs...) } -} - -// Ensure that the tokens queue contains at least one token which can be -// returned to the Parser. -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { - // While we need more tokens to fetch, do it. - for { - // Check if we really need to fetch more tokens. - need_more_tokens := false - - if parser.tokens_head == len(parser.tokens) { - // Queue is empty. - need_more_tokens = true - } else { - // Check if any potential simple key may occupy the head position. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - if simple_key.possible && simple_key.token_number == parser.tokens_parsed { - need_more_tokens = true - break - } - } - } - - // We are finished. - if !need_more_tokens { - break - } - // Fetch the next token. - if !yaml_parser_fetch_next_token(parser) { - return false - } - } - - parser.token_available = true - return true -} - -// The dispatcher for token fetchers. -func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { - // Ensure that the buffer is initialized. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we just started scanning. Fetch STREAM-START then. - if !parser.stream_start_produced { - return yaml_parser_fetch_stream_start(parser) - } - - // Eat whitespaces and comments until we reach the next token. - if !yaml_parser_scan_to_next_token(parser) { - return false - } - - // Remove obsolete potential simple keys. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - // Check the indentation level against the current column. - if !yaml_parser_unroll_indent(parser, parser.mark.column) { - return false - } - - // Ensure that the buffer contains at least 4 characters. 4 is the length - // of the longest indicators ('--- ' and '... '). - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - // Is it the end of the stream? - if is_z(parser.buffer, parser.buffer_pos) { - return yaml_parser_fetch_stream_end(parser) - } - - // Is it a directive? - if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { - return yaml_parser_fetch_directive(parser) - } - - buf := parser.buffer - pos := parser.buffer_pos - - // Is it the document start indicator? - if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) - } - - // Is it the document end indicator? - if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) - } - - // Is it the flow sequence start indicator? - if buf[pos] == '[' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) - } - - // Is it the flow mapping start indicator? - if parser.buffer[parser.buffer_pos] == '{' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) - } - - // Is it the flow sequence end indicator? - if parser.buffer[parser.buffer_pos] == ']' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_SEQUENCE_END_TOKEN) - } - - // Is it the flow mapping end indicator? - if parser.buffer[parser.buffer_pos] == '}' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_MAPPING_END_TOKEN) - } - - // Is it the flow entry indicator? - if parser.buffer[parser.buffer_pos] == ',' { - return yaml_parser_fetch_flow_entry(parser) - } - - // Is it the block entry indicator? - if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { - return yaml_parser_fetch_block_entry(parser) - } - - // Is it the key indicator? - if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_key(parser) - } - - // Is it the value indicator? - if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_value(parser) - } - - // Is it an alias? - if parser.buffer[parser.buffer_pos] == '*' { - return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) - } - - // Is it an anchor? - if parser.buffer[parser.buffer_pos] == '&' { - return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) - } - - // Is it a tag? - if parser.buffer[parser.buffer_pos] == '!' { - return yaml_parser_fetch_tag(parser) - } - - // Is it a literal scalar? - if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, true) - } - - // Is it a folded scalar? - if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, false) - } - - // Is it a single-quoted scalar? - if parser.buffer[parser.buffer_pos] == '\'' { - return yaml_parser_fetch_flow_scalar(parser, true) - } - - // Is it a double-quoted scalar? - if parser.buffer[parser.buffer_pos] == '"' { - return yaml_parser_fetch_flow_scalar(parser, false) - } - - // Is it a plain scalar? - // - // A plain scalar may start with any non-blank characters except - // - // '-', '?', ':', ',', '[', ']', '{', '}', - // '#', '&', '*', '!', '|', '>', '\'', '\"', - // '%', '@', '`'. - // - // In the block context (and, for the '-' indicator, in the flow context - // too), it may also start with the characters - // - // '-', '?', ':' - // - // if it is followed by a non-space character. - // - // The last rule is more restrictive than the specification requires. - // [Go] Make this logic more reasonable. - //switch parser.buffer[parser.buffer_pos] { - //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': - //} - if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || - parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || - parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || - (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level == 0 && - (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && - !is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_plain_scalar(parser) - } - - // If we don't determine the token type so far, it is an error. - return yaml_parser_set_scanner_error(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -// Check the list of potential simple keys and remove the positions that -// cannot contain simple keys anymore. -func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { - // Check for a potential simple key for each flow level. - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - - // The specification requires that a simple key - // - // - is limited to a single line, - // - is shorter than 1024 characters. - if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { - - // Check if the potential simple key to be removed is required. - if simple_key.required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - simple_key.possible = false - } - } - return true -} - -// Check if a simple key may start at the current position and add it if -// needed. -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { - // A simple key is required at the current position if the scanner is in - // the block context and the current column coincides with the indentation - // level. - - required := parser.flow_level == 0 && parser.indent == parser.mark.column - - // A simple key is required only when it is the first token in the current - // line. Therefore it is always allowed. But we add a check anyway. - if required && !parser.simple_key_allowed { - panic("should not happen") - } - - // - // If the current position may start a simple key, save it. - // - if parser.simple_key_allowed { - simple_key := yaml_simple_key_t{ - possible: true, - required: required, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - } - simple_key.mark = parser.mark - - if !yaml_parser_remove_simple_key(parser) { - return false - } - parser.simple_keys[len(parser.simple_keys)-1] = simple_key - } - return true -} - -// Remove a potential simple key at the current flow level. -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { - i := len(parser.simple_keys) - 1 - if parser.simple_keys[i].possible { - // If the key is required, it is an error. - if parser.simple_keys[i].required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", parser.simple_keys[i].mark, - "could not find expected ':'") - } - } - // Remove the key from the stack. - parser.simple_keys[i].possible = false - return true -} - -// Increase the flow level and resize the simple key list if needed. -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { - // Reset the simple key on the next level. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - // Increase the flow level. - parser.flow_level++ - return true -} - -// Decrease the flow level. -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { - if parser.flow_level > 0 { - parser.flow_level-- - parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] - } - return true -} - -// Push the current indentation level to the stack and set the new level -// the current column is greater than the indentation level. In this case, -// append or insert the specified token into the token queue. -func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - if parser.indent < column { - // Push the current indentation level to the stack and set the new - // indentation level. - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - - // Create a token and insert it into the queue. - token := yaml_token_t{ - typ: typ, - start_mark: mark, - end_mark: mark, - } - if number > -1 { - number -= parser.tokens_parsed - } - yaml_insert_token(parser, number, &token) - } - return true -} - -// Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each intendation level, append -// the BLOCK-END token. -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - // Loop through the intendation levels in the stack. - for parser.indent > column { - // Create a token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - - // Pop the indentation level. - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - return true -} - -// Initialize the scanner and produce the STREAM-START token. -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { - - // Set the initial indentation. - parser.indent = -1 - - // Initialize the simple key stack. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - // A simple key is allowed at the beginning of the stream. - parser.simple_key_allowed = true - - // We have started. - parser.stream_start_produced = true - - // Create the STREAM-START token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_START_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - encoding: parser.encoding, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the STREAM-END token and shut down the scanner. -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { - - // Force new line. - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the STREAM-END token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. - token := yaml_token_t{} - if !yaml_parser_scan_directive(parser, &token) { - return false - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the DOCUMENT-START or DOCUMENT-END token. -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Consume the token. - start_mark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - end_mark := parser.mark - - // Create the DOCUMENT-START or DOCUMENT-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // The indicators '[' and '{' may start a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // Increase the flow level. - if !yaml_parser_increase_flow_level(parser) { - return false - } - - // A simple key may follow the indicators '[' and '{'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset any potential simple key on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Decrease the flow level. - if !yaml_parser_decrease_flow_level(parser) { - return false - } - - // No simple keys after the indicators ']' and '}'. - parser.simple_key_allowed = false - - // Consume the token. - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-ENTRY token. -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after ','. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_FLOW_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the BLOCK-ENTRY token. -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { - // Check if the scanner is in the block context. - if parser.flow_level == 0 { - // Check if we are allowed to start a new entry. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - // Add the BLOCK-SEQUENCE-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { - return false - } - } else { - // It is an error for the '-' indicator to occur in the flow context, - // but we let the Parser detect and report about it because the Parser - // is able to point to the context. - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '-'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the BLOCK-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the KEY token. -func yaml_parser_fetch_key(parser *yaml_parser_t) bool { - - // In the block context, additional checks are required. - if parser.flow_level == 0 { - // Check if we are allowed to start a new key (not nessesary simple). - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '?' in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the KEY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the VALUE token. -func yaml_parser_fetch_value(parser *yaml_parser_t) bool { - - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - // Have we found a simple key? - if simple_key.possible { - // Create the KEY token and insert it into the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: simple_key.mark, - end_mark: simple_key.mark, - } - yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) - - // In the block context, we may need to add the BLOCK-MAPPING-START token. - if !yaml_parser_roll_indent(parser, simple_key.mark.column, - simple_key.token_number, - yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { - return false - } - - // Remove the simple key. - simple_key.possible = false - - // A simple key cannot follow another simple key. - parser.simple_key_allowed = false - - } else { - // The ':' indicator follows a complex key. - - // In the block context, extra checks are required. - if parser.flow_level == 0 { - - // Check if we are allowed to start a complex value. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Simple keys after ':' are allowed in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - } - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the VALUE token and append it to the queue. - token := yaml_token_t{ - typ: yaml_VALUE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the ALIAS or ANCHOR token. -func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // An anchor or an alias could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow an anchor or an alias. - parser.simple_key_allowed = false - - // Create the ALIAS or ANCHOR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_anchor(parser, &token, typ) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the TAG token. -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { - // A tag could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a tag. - parser.simple_key_allowed = false - - // Create the TAG token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_tag(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { - // Remove any potential simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // A simple key may follow a block scalar. - parser.simple_key_allowed = true - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_block_scalar(parser, &token, literal) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_flow_scalar(parser, &token, single) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,plain) token. -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_plain_scalar(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Eat whitespaces and comments until the next token is found. -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { - - // Until the next token is not found. - for { - // Allow the BOM mark to start a line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { - skip(parser) - } - - // Eat whitespaces. - // Tabs are allowed: - // - in the flow context - // - in the block context, but not at the beginning of the line or - // after '-', '?', or ':' (complex value). - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Eat a comment until a line break. - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // If it is a line break, eat it. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - - // In the block context, a new line may start a simple key. - if parser.flow_level == 0 { - parser.simple_key_allowed = true - } - } else { - break // We have found a token. - } - } - - return true -} - -// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { - // Eat '%'. - start_mark := parser.mark - skip(parser) - - // Scan the directive name. - var name []byte - if !yaml_parser_scan_directive_name(parser, start_mark, &name) { - return false - } - - // Is it a YAML directive? - if bytes.Equal(name, []byte("YAML")) { - // Scan the VERSION directive value. - var major, minor int8 - if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { - return false - } - end_mark := parser.mark - - // Create a VERSION-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_VERSION_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - major: major, - minor: minor, - } - - // Is it a TAG directive? - } else if bytes.Equal(name, []byte("TAG")) { - // Scan the TAG directive value. - var handle, prefix []byte - if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { - return false - } - end_mark := parser.mark - - // Create a TAG-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_TAG_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - prefix: prefix, - } - - // Unknown directive. - } else { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found uknown directive name") - return false - } - - // Eat the rest of the line including any comments. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - return true -} - -// Scan the directive name. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ -// -func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { - // Consume the directive name. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the name is empty. - if len(s) == 0 { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "could not find expected directive name") - return false - } - - // Check for an blank character after the name. - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unexpected non-alphabetical character") - return false - } - *name = s - return true -} - -// Scan the value of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the major version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { - return false - } - - // Eat '.'. - if parser.buffer[parser.buffer_pos] != '.' { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected digit or '.' character") - } - - skip(parser) - - // Consume the minor version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { - return false - } - return true -} - -const max_number_length = 2 - -// Scan the version number of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { - - // Repeat while the next character is digit. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var value, length int8 - for is_digit(parser.buffer, parser.buffer_pos) { - // Check if the number is too long. - length++ - if length > max_number_length { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "found extremely long version number") - } - value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the number was present. - if length == 0 { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected version number") - } - *number = value - return true -} - -// Scan the value of a TAG-DIRECTIVE token. -// -// Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { - var handle_value, prefix_value []byte - - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a handle. - if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { - return false - } - - // Expect a whitespace. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blank(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace") - return false - } - - // Eat whitespaces. - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a prefix. - if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { - return false - } - - // Expect a whitespace or line break. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace or line break") - return false - } - - *handle = handle_value - *prefix = prefix_value - return true -} - -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { - var s []byte - - // Eat the indicator character. - start_mark := parser.mark - skip(parser) - - // Consume the value. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - end_mark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - if len(s) == 0 || - !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || - parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '`') { - context := "while scanning an alias" - if typ == yaml_ANCHOR_TOKEN { - context = "while scanning an anchor" - } - yaml_parser_set_scanner_error(parser, context, start_mark, - "did not find expected alphabetic or numeric character") - return false - } - - // Create a token. - *token = yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { - var handle, suffix []byte - - start_mark := parser.mark - - // Check if the tag is in the canonical form. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - if parser.buffer[parser.buffer_pos+1] == '<' { - // Keep the handle as '' - - // Eat '!<' - skip(parser) - skip(parser) - - // Consume the tag value. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - - // Check for '>' and eat it. - if parser.buffer[parser.buffer_pos] != '>' { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find the expected '>'") - return false - } - - skip(parser) - } else { - // The tag has either the '!suffix' or the '!handle!suffix' form. - - // First, try to scan a handle. - if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { - return false - } - - // Check if it is, indeed, handle. - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - // Scan the suffix now. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - } else { - // It wasn't a handle after all. Scan the rest of the tag. - if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { - return false - } - - // Set the handle to '!'. - handle = []byte{'!'} - - // A special case: the '!' tag. Set the handle to '' and the - // suffix to '!'. - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - } - } - - // Check the character which ends the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find expected whitespace or line break") - return false - } - - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_TAG_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - suffix: suffix, - } - return true -} - -// Scan a tag handle. -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { - // Check the initial '!' character. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] != '!' { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - - var s []byte - - // Copy the '!' character. - s = read(parser, s) - - // Copy all subsequent alphabetical and numerical characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the trailing character is '!' and copy it. - if parser.buffer[parser.buffer_pos] == '!' { - s = read(parser, s) - } else { - // It's either the '!' tag or not really a tag handle. If it's a %TAG - // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && !(s[0] == '!' && s[1] == 0) { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - } - - *handle = s - return true -} - -// Scan a tag. -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { - //size_t length = head ? strlen((char *)head) : 0 - var s []byte - - // Copy the head if needed. - // - // Note that we don't copy the leading '!' character. - if len(head) > 1 { - s = append(s, head[1:]...) - } - - // Scan the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // The set of characters that may appear in URI is as follows: - // - // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - // '%'. - // [Go] Convert this into more reasonable logic. - for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || - parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || - parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || - parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || - parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || - parser.buffer[parser.buffer_pos] == '%' { - // Check if it is a URI-escape sequence. - if parser.buffer[parser.buffer_pos] == '%' { - if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { - return false - } - } else { - s = read(parser, s) - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the tag is non-empty. - if len(s) == 0 { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected tag URI") - return false - } - *uri = s - return true -} - -// Decode an URI-escape sequence corresponding to a single UTF-8 character. -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { - - // Decode the required number of characters. - w := 1024 - for w > 0 { - // Check for a URI-escaped octet. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - - if !(parser.buffer[parser.buffer_pos] == '%' && - is_hex(parser.buffer, parser.buffer_pos+1) && - is_hex(parser.buffer, parser.buffer_pos+2)) { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find URI escaped octet") - } - - // Get the octet. - octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) - - // If it is the leading octet, determine the length of the UTF-8 sequence. - if w == 1024 { - w = width(octet) - if w == 0 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect leading UTF-8 octet") - } - } else { - // Check if the trailing octet is correct. - if octet&0xC0 != 0x80 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect trailing UTF-8 octet") - } - } - - // Copy the octet and move the pointers. - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - return true -} - -// Scan a block scalar. -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { - // Eat the indicator '|' or '>'. - start_mark := parser.mark - skip(parser) - - // Scan the additional block scalar indicators. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check for a chomping indicator. - var chomping, increment int - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - // Set the chomping method and eat the indicator. - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - - // Check for an indentation indicator. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_digit(parser.buffer, parser.buffer_pos) { - // Check that the intendation is greater than 0. - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an intendation indicator equal to 0") - return false - } - - // Get the intendation level and eat the indicator. - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - } - - } else if is_digit(parser.buffer, parser.buffer_pos) { - // Do the same as above, but in the opposite order. - - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an intendation indicator equal to 0") - return false - } - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - } - } - - // Eat whitespaces and comments to the end of the line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - end_mark := parser.mark - - // Set the intendation level if it was specified. - var indent int - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - // Scan the leading line breaks and determine the indentation level if needed. - var s, leading_break, trailing_breaks []byte - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - - // Scan the block scalar content. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var leading_blank, trailing_blank bool - for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { - // We are at the beginning of a non-empty line. - - // Is it a trailing whitespace? - trailing_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Check if we need to fold the leading line break. - if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { - // Do we need to join the lines by space? - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } - } else { - s = append(s, leading_break...) - } - leading_break = leading_break[:0] - - // Append the remaining line breaks. - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - - // Is it a leading whitespace? - leading_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Consume the current line. - for !is_breakz(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - leading_break = read_line(parser, leading_break) - - // Eat the following intendation spaces and line breaks. - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - } - - // Chomp the tail. - if chomping != -1 { - s = append(s, leading_break...) - } - if chomping == 1 { - s = append(s, trailing_breaks...) - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_LITERAL_SCALAR_STYLE, - } - if !literal { - token.style = yaml_FOLDED_SCALAR_STYLE - } - return true -} - -// Scan intendation spaces and line breaks for a block scalar. Determine the -// intendation level if needed. -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { - *end_mark = parser.mark - - // Eat the intendation spaces and line breaks. - max_indent := 0 - for { - // Eat the intendation spaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.mark.column > max_indent { - max_indent = parser.mark.column - } - - // Check for a tab character messing the intendation. - if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { - return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an intendation space is expected") - } - - // Have we found a non-empty line? - if !is_break(parser.buffer, parser.buffer_pos) { - break - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - // [Go] Should really be returning breaks instead. - *breaks = read_line(parser, *breaks) - *end_mark = parser.mark - } - - // Determine the indentation level if needed. - if *indent == 0 { - *indent = max_indent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - return true -} - -// Scan a quoted scalar. -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { - // Eat the left quote. - start_mark := parser.mark - skip(parser) - - // Consume the content of the quoted scalar. - var s, leading_break, trailing_breaks, whitespaces []byte - for { - // Check that there are no document indicators at the beginning of the line. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected document indicator") - return false - } - - // Check for EOF. - if is_z(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected end of stream") - return false - } - - // Consume non-blank characters. - leading_blanks := false - for !is_blankz(parser.buffer, parser.buffer_pos) { - if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - - } else if single && parser.buffer[parser.buffer_pos] == '\'' { - // It is a right single quote. - break - } else if !single && parser.buffer[parser.buffer_pos] == '"' { - // It is a right double quote. - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { - // It is an escaped line break. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - skip(parser) - skip_line(parser) - leading_blanks = true - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' { - // It is an escape sequence. - code_length := 0 - - // Check the escape character. - switch parser.buffer[parser.buffer_pos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '\'': - s = append(s, '\'') - case '\\': - s = append(s, '\\') - case 'N': // NEL (#x85) - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': // #xA0 - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': // LS (#x2028) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': // PS (#x2029) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - code_length = 2 - case 'u': - code_length = 4 - case 'U': - code_length = 8 - default: - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - // Consume an arbitrary escape code. - if code_length > 0 { - var value int - - // Scan the character value. - if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { - return false - } - for k := 0; k < code_length; k++ { - if !is_hex(parser.buffer, parser.buffer_pos+k) { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) - } - - // Check the value and write the character. - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found invalid Unicode character escape code") - return false - } - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - // Advance the pointer. - for k := 0; k < code_length; k++ { - skip(parser) - } - } - } else { - // It is a non-escaped non-blank character. - s = read(parser, s) - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Check if we are at the end of the scalar. - if single { - if parser.buffer[parser.buffer_pos] == '\'' { - break - } - } else { - if parser.buffer[parser.buffer_pos] == '"' { - break - } - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Join the whitespaces or fold line breaks. - if leading_blanks { - // Do we need to fold line breaks? - if len(leading_break) > 0 && leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Eat the right quote. - skip(parser) - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_SINGLE_QUOTED_SCALAR_STYLE, - } - if !single { - token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - return true -} - -// Scan a plain scalar. -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { - - var s, leading_break, trailing_breaks, whitespaces []byte - var leading_blanks bool - var indent = parser.indent + 1 - - start_mark := parser.mark - end_mark := parser.mark - - // Consume the content of the plain scalar. - for { - // Check for a document indicator. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - break - } - - // Check for a comment. - if parser.buffer[parser.buffer_pos] == '#' { - break - } - - // Consume non-blank characters. - for !is_blankz(parser.buffer, parser.buffer_pos) { - - // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". - if parser.flow_level > 0 && - parser.buffer[parser.buffer_pos] == ':' && - !is_blankz(parser.buffer, parser.buffer_pos+1) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found unexpected ':'") - return false - } - - // Check for indicators that may end a plain scalar. - if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level > 0 && - (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}')) { - break - } - - // Check if we need to join whitespaces and breaks. - if leading_blanks || len(whitespaces) > 0 { - if leading_blanks { - // Do we need to fold line breaks? - if leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - leading_blanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Copy the character. - s = read(parser, s) - - end_mark = parser.mark - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Is it the end? - if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { - break - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - - // Check for tab character that abuse intendation. - if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violate intendation") - return false - } - - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check intendation level. - if parser.flow_level == 0 && parser.mark.column < indent { - break - } - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_PLAIN_SCALAR_STYLE, - } - - // Note that we change the 'simple_key_allowed' flag. - if leading_blanks { - parser.simple_key_allowed = true - } - return true -} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/sorter.go deleted file mode 100644 index 5958822f..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/sorter.go +++ /dev/null @@ -1,104 +0,0 @@ -package yaml - -import ( - "reflect" - "unicode" -) - -type keyList []reflect.Value - -func (l keyList) Len() int { return len(l) } -func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l keyList) Less(i, j int) bool { - a := l[i] - b := l[j] - ak := a.Kind() - bk := b.Kind() - for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { - a = a.Elem() - ak = a.Kind() - } - for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { - b = b.Elem() - bk = b.Kind() - } - af, aok := keyFloat(a) - bf, bok := keyFloat(b) - if aok && bok { - if af != bf { - return af < bf - } - if ak != bk { - return ak < bk - } - return numLess(a, b) - } - if ak != reflect.String || bk != reflect.String { - return ak < bk - } - ar, br := []rune(a.String()), []rune(b.String()) - for i := 0; i < len(ar) && i < len(br); i++ { - if ar[i] == br[i] { - continue - } - al := unicode.IsLetter(ar[i]) - bl := unicode.IsLetter(br[i]) - if al && bl { - return ar[i] < br[i] - } - if al || bl { - return bl - } - var ai, bi int - var an, bn int64 - for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { - an = an*10 + int64(ar[ai]-'0') - } - for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { - bn = bn*10 + int64(br[bi]-'0') - } - if an != bn { - return an < bn - } - if ai != bi { - return ai < bi - } - return ar[i] < br[i] - } - return len(ar) < len(br) -} - -// keyFloat returns a float value for v if it is a number/bool -// and whether it is a number/bool or not. -func keyFloat(v reflect.Value) (f float64, ok bool) { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return float64(v.Int()), true - case reflect.Float32, reflect.Float64: - return v.Float(), true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return float64(v.Uint()), true - case reflect.Bool: - if v.Bool() { - return 1, true - } - return 0, true - } - return 0, false -} - -// numLess returns whether a < b. -// a and b must necessarily have the same kind. -func numLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return a.Int() < b.Int() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Bool: - return !a.Bool() && b.Bool() - } - panic("not a number") -} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/writerc.go deleted file mode 100644 index 190362f2..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/writerc.go +++ /dev/null @@ -1,89 +0,0 @@ -package yaml - -// Set the writer error and return false. -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - return false -} - -// Flush the output buffer. -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("write handler not set") - } - - // Check if the buffer is empty. - if emitter.buffer_pos == 0 { - return true - } - - // If the output encoding is UTF-8, we don't need to recode the buffer. - if emitter.encoding == yaml_UTF8_ENCODING { - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true - } - - // Recode the buffer into the raw buffer. - var low, high int - if emitter.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - high, low = 1, 0 - } - - pos := 0 - for pos < emitter.buffer_pos { - // See the "reader.c" code for more details on UTF-8 encoding. Note - // that we assume that the buffer contains a valid UTF-8 sequence. - - // Read the next UTF-8 character. - octet := emitter.buffer[pos] - - var w int - var value rune - switch { - case octet&0x80 == 0x00: - w, value = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, value = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, value = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, value = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = emitter.buffer[pos+k] - value = (value << 6) + (rune(octet) & 0x3F) - } - pos += w - - // Write the character. - if value < 0x10000 { - var b [2]byte - b[high] = byte(value >> 8) - b[low] = byte(value & 0xFF) - emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) - } else { - // Write the character using a surrogate pair (check "reader.c"). - var b [4]byte - value -= 0x10000 - b[high] = byte(0xD8 + (value >> 18)) - b[low] = byte((value >> 10) & 0xFF) - b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) - b[low+2] = byte(value & 0xFF) - emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) - } - } - - // Write the raw buffer. - if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - emitter.raw_buffer = emitter.raw_buffer[:0] - return true -} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yaml.go deleted file mode 100644 index e3e01edc..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yaml.go +++ /dev/null @@ -1,344 +0,0 @@ -// Package yaml implements YAML support for the Go language. -// -// Source code and other details for the project are available at GitHub: -// -// https://github.com/go-yaml/yaml -// -package yaml - -import ( - "errors" - "fmt" - "reflect" - "strings" - "sync" -) - -// MapSlice encodes and decodes as a YAML map. -// The order of keys is preserved when encoding and decoding. -type MapSlice []MapItem - -// MapItem is an item in a MapSlice. -type MapItem struct { - Key, Value interface{} -} - -// The Unmarshaler interface may be implemented by types to customize their -// behavior when being unmarshaled from a YAML document. The UnmarshalYAML -// method receives a function that may be called to unmarshal the original -// YAML value into a field or variable. It is safe to call the unmarshal -// function parameter more than once if necessary. -type Unmarshaler interface { - UnmarshalYAML(unmarshal func(interface{}) error) error -} - -// The Marshaler interface may be implemented by types to customize their -// behavior when being marshaled into a YAML document. The returned value -// is marshaled in place of the original value implementing Marshaler. -// -// If an error is returned by MarshalYAML, the marshaling procedure stops -// and returns with the provided error. -type Marshaler interface { - MarshalYAML() (interface{}, error) -} - -// Unmarshal decodes the first document found within the in byte slice -// and assigns decoded values into the out value. -// -// Maps and pointers (to a struct, string, int, etc) are accepted as out -// values. If an internal pointer within a struct is not initialized, -// the yaml package will initialize it if necessary for unmarshalling -// the provided data. The out parameter must not be nil. -// -// The type of the decoded values should be compatible with the respective -// values in out. If one or more values cannot be decoded due to a type -// mismatches, decoding continues partially until the end of the YAML -// content, and a *yaml.TypeError is returned with details for all -// missed values. -// -// Struct fields are only unmarshalled if they are exported (have an -// upper case first letter), and are unmarshalled using the field name -// lowercased as the default key. Custom keys may be defined via the -// "yaml" name in the field tag: the content preceding the first comma -// is used as the key, and the following comma-separated options are -// used to tweak the marshalling process (see Marshal). -// Conflicting names result in a runtime error. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// var t T -// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) -// -// See the documentation of Marshal for the format of tags and a list of -// supported tag options. -// -func Unmarshal(in []byte, out interface{}) (err error) { - defer handleErr(&err) - d := newDecoder() - p := newParser(in) - defer p.destroy() - node := p.parse() - if node != nil { - v := reflect.ValueOf(out) - if v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - d.unmarshal(node, v) - } - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -// Marshal serializes the value provided into a YAML document. The structure -// of the generated document will reflect the structure of the value itself. -// Maps and pointers (to struct, string, int, etc) are accepted as the in value. -// -// Struct fields are only unmarshalled if they are exported (have an upper case -// first letter), and are unmarshalled using the field name lowercased as the -// default key. Custom keys may be defined via the "yaml" name in the field -// tag: the content preceding the first comma is used as the key, and the -// following comma-separated options are used to tweak the marshalling process. -// Conflicting names result in a runtime error. -// -// The field tag format accepted is: -// -// `(...) yaml:"[][,[,]]" (...)` -// -// The following flags are currently supported: -// -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// Does not apply to zero valued structs. -// -// flow Marshal using a flow style (useful for structs, -// sequences and maps. -// -// inline Inline the struct it's applied to, so its fields -// are processed as if they were part of the outer -// struct. -// -// In addition, if the key is "-", the field is ignored. -// -// For example: -// -// type T struct { -// F int "a,omitempty" -// B int -// } -// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" -// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" -// -func Marshal(in interface{}) (out []byte, err error) { - defer handleErr(&err) - e := newEncoder() - defer e.destroy() - e.marshal("", reflect.ValueOf(in)) - e.finish() - out = e.out - return -} - -func handleErr(err *error) { - if v := recover(); v != nil { - if e, ok := v.(yamlError); ok { - *err = e.err - } else { - panic(v) - } - } -} - -type yamlError struct { - err error -} - -func fail(err error) { - panic(yamlError{err}) -} - -func failf(format string, args ...interface{}) { - panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) -} - -// A TypeError is returned by Unmarshal when one or more fields in -// the YAML document cannot be properly decoded into the requested -// types. When this error is returned, the value is still -// unmarshaled partially. -type TypeError struct { - Errors []string -} - -func (e *TypeError) Error() string { - return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) -} - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -// The code in this section was copied from mgo/bson. - -// structInfo holds details for the serialization of fields of -// a given struct. -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - - // InlineMap is the number of the field in the struct that - // contains an ,inline map, or -1 if there's none. - InlineMap int -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - Flow bool - - // Inline holds the field index if the field is part of an inlined struct. - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var fieldMapMutex sync.RWMutex - -func getStructInfo(st reflect.Type) (*structInfo, error) { - fieldMapMutex.RLock() - sinfo, found := structMap[st] - fieldMapMutex.RUnlock() - if found { - return sinfo, nil - } - - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("yaml") - if tag == "" && strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "flow": - info.Flow = true - case "inline": - inline = true - default: - return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - // TODO: Implement support for inline maps. - //case reflect.Map: - // if inlineMap >= 0 { - // return nil, errors.New("Multiple ,inline maps in struct " + st.String()) - // } - // if field.Type.Key() != reflect.TypeOf("") { - // return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) - // } - // inlineMap = info.Num - case reflect.Struct: - sinfo, err := getStructInfo(field.Type) - if err != nil { - return nil, err - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - default: - //return nil, errors.New("Option ,inline needs a struct value or map field") - return nil, errors.New("Option ,inline needs a struct value field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "Duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - - sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} - - fieldMapMutex.Lock() - structMap[st] = sinfo - fieldMapMutex.Unlock() - return sinfo, nil -} - -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.String: - return len(v.String()) == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Slice: - return v.Len() == 0 - case reflect.Map: - return v.Len() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Struct: - vt := v.Type() - for i := v.NumField()-1; i >= 0; i-- { - if vt.Field(i).PkgPath != "" { - continue // Private field - } - if !isZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yamlh.go deleted file mode 100644 index d60a6b6b..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yamlh.go +++ /dev/null @@ -1,716 +0,0 @@ -package yaml - -import ( - "io" -) - -// The version directive data. -type yaml_version_directive_t struct { - major int8 // The major version number. - minor int8 // The minor version number. -} - -// The tag directive data. -type yaml_tag_directive_t struct { - handle []byte // The tag handle. - prefix []byte // The tag prefix. -} - -type yaml_encoding_t int - -// The stream encoding. -const ( - // Let the parser choose the encoding. - yaml_ANY_ENCODING yaml_encoding_t = iota - - yaml_UTF8_ENCODING // The default UTF-8 encoding. - yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. - yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. -) - -type yaml_break_t int - -// Line break types. -const ( - // Let the parser choose the break type. - yaml_ANY_BREAK yaml_break_t = iota - - yaml_CR_BREAK // Use CR for line breaks (Mac style). - yaml_LN_BREAK // Use LN for line breaks (Unix style). - yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). -) - -type yaml_error_type_t int - -// Many bad things could happen with the parser and emitter. -const ( - // No error is produced. - yaml_NO_ERROR yaml_error_type_t = iota - - yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. - yaml_READER_ERROR // Cannot read or decode the input stream. - yaml_SCANNER_ERROR // Cannot scan the input stream. - yaml_PARSER_ERROR // Cannot parse the input stream. - yaml_COMPOSER_ERROR // Cannot compose a YAML document. - yaml_WRITER_ERROR // Cannot write to the output stream. - yaml_EMITTER_ERROR // Cannot emit a YAML stream. -) - -// The pointer position. -type yaml_mark_t struct { - index int // The position index. - line int // The position line. - column int // The position column. -} - -// Node Styles - -type yaml_style_t int8 - -type yaml_scalar_style_t yaml_style_t - -// Scalar styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota - - yaml_PLAIN_SCALAR_STYLE // The plain scalar style. - yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. - yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. - yaml_LITERAL_SCALAR_STYLE // The literal scalar style. - yaml_FOLDED_SCALAR_STYLE // The folded scalar style. -) - -type yaml_sequence_style_t yaml_style_t - -// Sequence styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota - - yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. - yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. -) - -type yaml_mapping_style_t yaml_style_t - -// Mapping styles. -const ( - // Let the emitter choose the style. - yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota - - yaml_BLOCK_MAPPING_STYLE // The block mapping style. - yaml_FLOW_MAPPING_STYLE // The flow mapping style. -) - -// Tokens - -type yaml_token_type_t int - -// Token types. -const ( - // An empty token. - yaml_NO_TOKEN yaml_token_type_t = iota - - yaml_STREAM_START_TOKEN // A STREAM-START token. - yaml_STREAM_END_TOKEN // A STREAM-END token. - - yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. - yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. - yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. - yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. - - yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. - yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. - yaml_BLOCK_END_TOKEN // A BLOCK-END token. - - yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. - yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. - yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. - yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. - - yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. - yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. - yaml_KEY_TOKEN // A KEY token. - yaml_VALUE_TOKEN // A VALUE token. - - yaml_ALIAS_TOKEN // An ALIAS token. - yaml_ANCHOR_TOKEN // An ANCHOR token. - yaml_TAG_TOKEN // A TAG token. - yaml_SCALAR_TOKEN // A SCALAR token. -) - -func (tt yaml_token_type_t) String() string { - switch tt { - case yaml_NO_TOKEN: - return "yaml_NO_TOKEN" - case yaml_STREAM_START_TOKEN: - return "yaml_STREAM_START_TOKEN" - case yaml_STREAM_END_TOKEN: - return "yaml_STREAM_END_TOKEN" - case yaml_VERSION_DIRECTIVE_TOKEN: - return "yaml_VERSION_DIRECTIVE_TOKEN" - case yaml_TAG_DIRECTIVE_TOKEN: - return "yaml_TAG_DIRECTIVE_TOKEN" - case yaml_DOCUMENT_START_TOKEN: - return "yaml_DOCUMENT_START_TOKEN" - case yaml_DOCUMENT_END_TOKEN: - return "yaml_DOCUMENT_END_TOKEN" - case yaml_BLOCK_SEQUENCE_START_TOKEN: - return "yaml_BLOCK_SEQUENCE_START_TOKEN" - case yaml_BLOCK_MAPPING_START_TOKEN: - return "yaml_BLOCK_MAPPING_START_TOKEN" - case yaml_BLOCK_END_TOKEN: - return "yaml_BLOCK_END_TOKEN" - case yaml_FLOW_SEQUENCE_START_TOKEN: - return "yaml_FLOW_SEQUENCE_START_TOKEN" - case yaml_FLOW_SEQUENCE_END_TOKEN: - return "yaml_FLOW_SEQUENCE_END_TOKEN" - case yaml_FLOW_MAPPING_START_TOKEN: - return "yaml_FLOW_MAPPING_START_TOKEN" - case yaml_FLOW_MAPPING_END_TOKEN: - return "yaml_FLOW_MAPPING_END_TOKEN" - case yaml_BLOCK_ENTRY_TOKEN: - return "yaml_BLOCK_ENTRY_TOKEN" - case yaml_FLOW_ENTRY_TOKEN: - return "yaml_FLOW_ENTRY_TOKEN" - case yaml_KEY_TOKEN: - return "yaml_KEY_TOKEN" - case yaml_VALUE_TOKEN: - return "yaml_VALUE_TOKEN" - case yaml_ALIAS_TOKEN: - return "yaml_ALIAS_TOKEN" - case yaml_ANCHOR_TOKEN: - return "yaml_ANCHOR_TOKEN" - case yaml_TAG_TOKEN: - return "yaml_TAG_TOKEN" - case yaml_SCALAR_TOKEN: - return "yaml_SCALAR_TOKEN" - } - return "" -} - -// The token structure. -type yaml_token_t struct { - // The token type. - typ yaml_token_type_t - - // The start/end of the token. - start_mark, end_mark yaml_mark_t - - // The stream encoding (for yaml_STREAM_START_TOKEN). - encoding yaml_encoding_t - - // The alias/anchor/scalar value or tag/tag directive handle - // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). - value []byte - - // The tag suffix (for yaml_TAG_TOKEN). - suffix []byte - - // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). - prefix []byte - - // The scalar style (for yaml_SCALAR_TOKEN). - style yaml_scalar_style_t - - // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). - major, minor int8 -} - -// Events - -type yaml_event_type_t int8 - -// Event types. -const ( - // An empty event. - yaml_NO_EVENT yaml_event_type_t = iota - - yaml_STREAM_START_EVENT // A STREAM-START event. - yaml_STREAM_END_EVENT // A STREAM-END event. - yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. - yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. - yaml_ALIAS_EVENT // An ALIAS event. - yaml_SCALAR_EVENT // A SCALAR event. - yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. - yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. - yaml_MAPPING_START_EVENT // A MAPPING-START event. - yaml_MAPPING_END_EVENT // A MAPPING-END event. -) - -// The event structure. -type yaml_event_t struct { - - // The event type. - typ yaml_event_type_t - - // The start and end of the event. - start_mark, end_mark yaml_mark_t - - // The document encoding (for yaml_STREAM_START_EVENT). - encoding yaml_encoding_t - - // The version directive (for yaml_DOCUMENT_START_EVENT). - version_directive *yaml_version_directive_t - - // The list of tag directives (for yaml_DOCUMENT_START_EVENT). - tag_directives []yaml_tag_directive_t - - // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). - anchor []byte - - // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - tag []byte - - // The scalar value (for yaml_SCALAR_EVENT). - value []byte - - // Is the document start/end indicator implicit, or the tag optional? - // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). - implicit bool - - // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). - quoted_implicit bool - - // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - style yaml_style_t -} - -func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } -func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } -func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } - -// Nodes - -const ( - yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. - yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. - yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. - yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. - yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. - yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. - - yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. - yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. - - // Not in original libyaml. - yaml_BINARY_TAG = "tag:yaml.org,2002:binary" - yaml_MERGE_TAG = "tag:yaml.org,2002:merge" - - yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. - yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. - yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. -) - -type yaml_node_type_t int - -// Node types. -const ( - // An empty node. - yaml_NO_NODE yaml_node_type_t = iota - - yaml_SCALAR_NODE // A scalar node. - yaml_SEQUENCE_NODE // A sequence node. - yaml_MAPPING_NODE // A mapping node. -) - -// An element of a sequence node. -type yaml_node_item_t int - -// An element of a mapping node. -type yaml_node_pair_t struct { - key int // The key of the element. - value int // The value of the element. -} - -// The node structure. -type yaml_node_t struct { - typ yaml_node_type_t // The node type. - tag []byte // The node tag. - - // The node data. - - // The scalar parameters (for yaml_SCALAR_NODE). - scalar struct { - value []byte // The scalar value. - length int // The length of the scalar value. - style yaml_scalar_style_t // The scalar style. - } - - // The sequence parameters (for YAML_SEQUENCE_NODE). - sequence struct { - items_data []yaml_node_item_t // The stack of sequence items. - style yaml_sequence_style_t // The sequence style. - } - - // The mapping parameters (for yaml_MAPPING_NODE). - mapping struct { - pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). - pairs_start *yaml_node_pair_t // The beginning of the stack. - pairs_end *yaml_node_pair_t // The end of the stack. - pairs_top *yaml_node_pair_t // The top of the stack. - style yaml_mapping_style_t // The mapping style. - } - - start_mark yaml_mark_t // The beginning of the node. - end_mark yaml_mark_t // The end of the node. - -} - -// The document structure. -type yaml_document_t struct { - - // The document nodes. - nodes []yaml_node_t - - // The version directive. - version_directive *yaml_version_directive_t - - // The list of tag directives. - tag_directives_data []yaml_tag_directive_t - tag_directives_start int // The beginning of the tag directives list. - tag_directives_end int // The end of the tag directives list. - - start_implicit int // Is the document start indicator implicit? - end_implicit int // Is the document end indicator implicit? - - // The start/end of the document. - start_mark, end_mark yaml_mark_t -} - -// The prototype of a read handler. -// -// The read handler is called when the parser needs to read more bytes from the -// source. The handler should write not more than size bytes to the buffer. -// The number of written bytes should be set to the size_read variable. -// -// [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). -// [out] buffer The buffer to write the data from the source. -// [in] size The size of the buffer. -// [out] size_read The actual number of bytes read from the source. -// -// On success, the handler should return 1. If the handler failed, -// the returned value should be 0. On EOF, the handler should set the -// size_read to 0 and return 1. -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) - -// This structure holds information about a potential simple key. -type yaml_simple_key_t struct { - possible bool // Is a simple key possible? - required bool // Is a simple key required? - token_number int // The number of the token. - mark yaml_mark_t // The position mark. -} - -// The states of the parser. -type yaml_parser_state_t int - -const ( - yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota - - yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. - yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. - yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. - yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. - yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. - yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. - yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. - yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. - yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. - yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. - yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. - yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. - yaml_PARSE_END_STATE // Expect nothing. -) - -func (ps yaml_parser_state_t) String() string { - switch ps { - case yaml_PARSE_STREAM_START_STATE: - return "yaml_PARSE_STREAM_START_STATE" - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_START_STATE: - return "yaml_PARSE_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return "yaml_PARSE_DOCUMENT_CONTENT_STATE" - case yaml_PARSE_DOCUMENT_END_STATE: - return "yaml_PARSE_DOCUMENT_END_STATE" - case yaml_PARSE_BLOCK_NODE_STATE: - return "yaml_PARSE_BLOCK_NODE_STATE" - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" - case yaml_PARSE_FLOW_NODE_STATE: - return "yaml_PARSE_FLOW_NODE_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" - case yaml_PARSE_END_STATE: - return "yaml_PARSE_END_STATE" - } - return "" -} - -// This structure holds aliases data. -type yaml_alias_data_t struct { - anchor []byte // The anchor. - index int // The node id. - mark yaml_mark_t // The anchor mark. -} - -// The parser structure. -// -// All members are internal. Manage the structure using the -// yaml_parser_ family of functions. -type yaml_parser_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - - problem string // Error description. - - // The byte about which the problem occured. - problem_offset int - problem_value int - problem_mark yaml_mark_t - - // The error context. - context string - context_mark yaml_mark_t - - // Reader stuff - - read_handler yaml_read_handler_t // Read handler. - - input_file io.Reader // File input data. - input []byte // String input data. - input_pos int - - eof bool // EOF flag - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - unread int // The number of unread characters in the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The input encoding. - - offset int // The offset of the current position (in bytes). - mark yaml_mark_t // The mark of the current position. - - // Scanner stuff - - stream_start_produced bool // Have we started to scan the input stream? - stream_end_produced bool // Have we reached the end of the input stream? - - flow_level int // The number of unclosed '[' and '{' indicators. - - tokens []yaml_token_t // The tokens queue. - tokens_head int // The head of the tokens queue. - tokens_parsed int // The number of tokens fetched from the queue. - token_available bool // Does the tokens queue contain a token ready for dequeueing. - - indent int // The current indentation level. - indents []int // The indentation levels stack. - - simple_key_allowed bool // May a simple key occur at the current position? - simple_keys []yaml_simple_key_t // The stack of simple keys. - - // Parser stuff - - state yaml_parser_state_t // The current parser state. - states []yaml_parser_state_t // The parser states stack. - marks []yaml_mark_t // The stack of marks. - tag_directives []yaml_tag_directive_t // The list of TAG directives. - - // Dumper stuff - - aliases []yaml_alias_data_t // The alias data. - - document *yaml_document_t // The currently parsed document. -} - -// Emitter Definitions - -// The prototype of a write handler. -// -// The write handler is called when the emitter needs to flush the accumulated -// characters to the output. The handler should write @a size bytes of the -// @a buffer to the output. -// -// @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). -// @param[in] buffer The buffer with bytes to be written. -// @param[in] size The size of the buffer. -// -// @returns On success, the handler should return @c 1. If the handler failed, -// the returned value should be @c 0. -// -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error - -type yaml_emitter_state_t int - -// The emitter states. -const ( - // Expect STREAM-START. - yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota - - yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. - yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. - yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. - yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. - yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. - yaml_EMIT_END_STATE // Expect nothing. -) - -// The emitter structure. -// -// All members are internal. Manage the structure using the @c yaml_emitter_ -// family of functions. -type yaml_emitter_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - problem string // Error description. - - // Writer stuff - - write_handler yaml_write_handler_t // Write handler. - - output_buffer *[]byte // String output data. - output_file io.Writer // File output data. - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The stream encoding. - - // Emitter stuff - - canonical bool // If the output is in the canonical style? - best_indent int // The number of indentation spaces. - best_width int // The preferred width of the output lines. - unicode bool // Allow unescaped non-ASCII characters? - line_break yaml_break_t // The preferred line break. - - state yaml_emitter_state_t // The current emitter state. - states []yaml_emitter_state_t // The stack of states. - - events []yaml_event_t // The event queue. - events_head int // The head of the event queue. - - indents []int // The stack of indentation levels. - - tag_directives []yaml_tag_directive_t // The list of tag directives. - - indent int // The current indentation level. - - flow_level int // The current flow level. - - root_context bool // Is it the document root context? - sequence_context bool // Is it a sequence context? - mapping_context bool // Is it a mapping context? - simple_key_context bool // Is it a simple mapping key context? - - line int // The current line. - column int // The current column. - whitespace bool // If the last character was a whitespace? - indention bool // If the last character was an indentation character (' ', '-', '?', ':')? - open_ended bool // If an explicit document end is required? - - // Anchor analysis. - anchor_data struct { - anchor []byte // The anchor value. - alias bool // Is it an alias? - } - - // Tag analysis. - tag_data struct { - handle []byte // The tag handle. - suffix []byte // The tag suffix. - } - - // Scalar analysis. - scalar_data struct { - value []byte // The scalar value. - multiline bool // Does the scalar contain line breaks? - flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? - block_plain_allowed bool // Can the scalar be expressed in the block plain style? - single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? - block_allowed bool // Can the scalar be expressed in the literal or folded styles? - style yaml_scalar_style_t // The output style. - } - - // Dumper stuff - - opened bool // If the stream was already opened? - closed bool // If the stream was already closed? - - // The information associated with the document nodes. - anchors *struct { - references int // The number of references. - anchor int // The anchor id. - serialized bool // If the node has been emitted? - } - - last_anchor_id int // The last assigned anchor id. - - document *yaml_document_t // The currently emitted document. -} diff --git a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yamlprivateh.go deleted file mode 100644 index 8110ce3c..00000000 --- a/vendor/github.com/docker/distribution/vendor/gopkg.in/yaml.v2/yamlprivateh.go +++ /dev/null @@ -1,173 +0,0 @@ -package yaml - -const ( - // The size of the input raw buffer. - input_raw_buffer_size = 512 - - // The size of the input buffer. - // It should be possible to decode the whole raw buffer. - input_buffer_size = input_raw_buffer_size * 3 - - // The size of the output buffer. - output_buffer_size = 128 - - // The size of the output raw buffer. - // It should be possible to encode the whole output buffer. - output_raw_buffer_size = (output_buffer_size*2 + 2) - - // The size of other stacks and queues. - initial_stack_size = 16 - initial_queue_size = 16 - initial_string_size = 16 -) - -// Check if the character at the specified position is an alphabetical -// character, a digit, '_', or '-'. -func is_alpha(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' -} - -// Check if the character at the specified position is a digit. -func is_digit(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' -} - -// Get the value of a digit. -func as_digit(b []byte, i int) int { - return int(b[i]) - '0' -} - -// Check if the character at the specified position is a hex-digit. -func is_hex(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' -} - -// Get the value of a hex-digit. -func as_hex(b []byte, i int) int { - bi := b[i] - if bi >= 'A' && bi <= 'F' { - return int(bi) - 'A' + 10 - } - if bi >= 'a' && bi <= 'f' { - return int(bi) - 'a' + 10 - } - return int(bi) - '0' -} - -// Check if the character is ASCII. -func is_ascii(b []byte, i int) bool { - return b[i] <= 0x7F -} - -// Check if the character at the start of the buffer can be printed unescaped. -func is_printable(b []byte, i int) bool { - return ((b[i] == 0x0A) || // . == #x0A - (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E - (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && // #xE000 <= . <= #xFFFD - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -// Check if the character at the specified position is NUL. -func is_z(b []byte, i int) bool { - return b[i] == 0x00 -} - -// Check if the beginning of the buffer is a BOM. -func is_bom(b []byte, i int) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -// Check if the character at the specified position is space. -func is_space(b []byte, i int) bool { - return b[i] == ' ' -} - -// Check if the character at the specified position is tab. -func is_tab(b []byte, i int) bool { - return b[i] == '\t' -} - -// Check if the character at the specified position is blank (space or tab). -func is_blank(b []byte, i int) bool { - //return is_space(b, i) || is_tab(b, i) - return b[i] == ' ' || b[i] == '\t' -} - -// Check if the character at the specified position is a line break. -func is_break(b []byte, i int) bool { - return (b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) -} - -func is_crlf(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// Check if the character is a line break or NUL. -func is_breakz(b []byte, i int) bool { - //return is_break(b, i) || is_z(b, i) - return ( // is_break: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - // is_z: - b[i] == 0) -} - -// Check if the character is a line break, space, or NUL. -func is_spacez(b []byte, i int) bool { - //return is_space(b, i) || is_breakz(b, i) - return ( // is_space: - b[i] == ' ' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Check if the character is a line break, space, tab, or NUL. -func is_blankz(b []byte, i int) bool { - //return is_blank(b, i) || is_breakz(b, i) - return ( // is_blank: - b[i] == ' ' || b[i] == '\t' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Determine the width of the character. -func width(b byte) int { - // Don't replace these by a switch without first - // confirming that it is being inlined. - if b&0x80 == 0x00 { - return 1 - } - if b&0xE0 == 0xC0 { - return 2 - } - if b&0xF0 == 0xE0 { - return 3 - } - if b&0xF8 == 0xF0 { - return 4 - } - return 0 - -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/LICENSE b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/LICENSE deleted file mode 100644 index 6a66aea5..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/README b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/README deleted file mode 100644 index 98a875f3..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/README +++ /dev/null @@ -1,152 +0,0 @@ -package letsencrypt // import "rsc.io/letsencrypt" - -Package letsencrypt obtains TLS certificates from LetsEncrypt.org. - -LetsEncrypt.org is a service that issues free SSL/TLS certificates to -servers that can prove control over the given domain's DNS records or the -servers pointed at by those records. - - -Quick Start - -A complete HTTP/HTTPS web server using TLS certificates from -LetsEncrypt.org, redirecting all HTTP access to HTTPS, and maintaining TLS -certificates in a file letsencrypt.cache across server restarts. - - package main - - import ( - "fmt" - "log" - "net/http" - "rsc.io/letsencrypt" - ) - - func main() { - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, "Hello, TLS!\n") - }) - var m letsencrypt.Manager - if err := m.CacheFile("letsencrypt.cache"); err != nil { - log.Fatal(err) - } - log.Fatal(m.Serve()) - } - - -Overview - -The fundamental type in this package is the Manager, which manages obtaining -and refreshing a collection of TLS certificates, typically for use by an -HTTPS server. The example above shows the most basic use of a Manager. The -use can be customized by calling additional methods of the Manager. - - -Registration - -A Manager m registers anonymously with LetsEncrypt.org, including agreeing -to the letsencrypt.org terms of service, the first time it needs to obtain a -certificate. To register with a particular email address and with the option -of a prompt for agreement with the terms of service, call m.Register. - - -GetCertificate - -The Manager's GetCertificate method returns certificates from the Manager's -cache, filling the cache by requesting certificates from LetsEncrypt.org. In -this way, a server with a tls.Config.GetCertificate set to m.GetCertificate -will demand load a certificate for any host name it serves. To force loading -of certificates ahead of time, install m.GetCertificate as before but then -call m.Cert for each host name. - -A Manager can only obtain a certificate for a given host name if it can -prove control of that host name to LetsEncrypt.org. By default it proves -control by answering an HTTPS-based challenge: when the LetsEncrypt.org -servers connect to the named host on port 443 (HTTPS), the TLS SNI handshake -must use m.GetCertificate to obtain a per-host certificate. The most common -way to satisfy this requirement is for the host name to resolve to the IP -address of a (single) computer running m.ServeHTTPS, or at least running a -Go TLS server with tls.Config.GetCertificate set to m.GetCertificate. -However, other configurations are possible. For example, a group of machines -could use an implementation of tls.Config.GetCertificate that cached -certificates but handled cache misses by making RPCs to a Manager m on an -elected leader machine. - -In typical usage, then, the setting of tls.Config.GetCertificate to -m.GetCertificate serves two purposes: it provides certificates to the TLS -server for ordinary serving, and it also answers challenges to prove -ownership of the domains in order to obtain those certificates. - -To force the loading of a certificate for a given host into the Manager's -cache, use m.Cert. - - -Persistent Storage - -If a server always starts with a zero Manager m, the server effectively -fetches a new certificate for each of its host name from LetsEncrypt.org on -each restart. This is unfortunate both because the server cannot start if -LetsEncrypt.org is unavailable and because LetsEncrypt.org limits how often -it will issue a certificate for a given host name (at time of writing, the -limit is 5 per week for a given host name). To save server state proactively -to a cache file and to reload the server state from that same file when -creating a new manager, call m.CacheFile with the name of the file to use. - -For alternate storage uses, m.Marshal returns the current state of the -Manager as an opaque string, m.Unmarshal sets the state of the Manager using -a string previously returned by m.Marshal (usually a different m), and -m.Watch returns a channel that receives notifications about state changes. - - -Limits - -To avoid hitting basic rate limits on LetsEncrypt.org, a given Manager -limits all its interactions to at most one request every minute, with an -initial allowed burst of 20 requests. - -By default, if GetCertificate is asked for a certificate it does not have, -it will in turn ask LetsEncrypt.org for that certificate. This opens a -potential attack where attackers connect to a server by IP address and -pretend to be asking for an incorrect host name. Then GetCertificate will -attempt to obtain a certificate for that host, incorrectly, eventually -hitting LetsEncrypt.org's rate limit for certificate requests and making it -impossible to obtain actual certificates. Because servers hold certificates -for months at a time, however, an attack would need to be sustained over a -time period of at least a month in order to cause real problems. - -To mitigate this kind of attack, a given Manager limits itself to an average -of one certificate request for a new host every three hours, with an initial -allowed burst of up to 20 requests. Long-running servers will therefore stay -within the LetsEncrypt.org limit of 300 failed requests per month. -Certificate refreshes are not subject to this limit. - -To eliminate the attack entirely, call m.SetHosts to enumerate the exact set -of hosts that are allowed in certificate requests. - - -Web Servers - -The basic requirement for use of a Manager is that there be an HTTPS server -running on port 443 and calling m.GetCertificate to obtain TLS certificates. -Using standard primitives, the way to do this is: - - srv := &http.Server{ - Addr: ":https", - TLSConfig: &tls.Config{ - GetCertificate: m.GetCertificate, - }, - } - srv.ListenAndServeTLS("", "") - -However, this pattern of serving HTTPS with demand-loaded TLS certificates -comes up enough to wrap into a single method m.ServeHTTPS. - -Similarly, many HTTPS servers prefer to redirect HTTP clients to the HTTPS -URLs. That functionality is provided by RedirectHTTP. - -The combination of serving HTTPS with demand-loaded TLS certificates and -serving HTTPS redirects to HTTP clients is provided by m.Serve, as used in -the original example above. - -func RedirectHTTP(w http.ResponseWriter, r *http.Request) -type Manager struct { ... } diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/lets.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/lets.go deleted file mode 100644 index 3a845363..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/lets.go +++ /dev/null @@ -1,753 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package letsencrypt obtains TLS certificates from LetsEncrypt.org. -// -// LetsEncrypt.org is a service that issues free SSL/TLS certificates to servers -// that can prove control over the given domain's DNS records or -// the servers pointed at by those records. -// -// Quick Start -// -// A complete HTTP/HTTPS web server using TLS certificates from LetsEncrypt.org, -// redirecting all HTTP access to HTTPS, and maintaining TLS certificates in a file -// letsencrypt.cache across server restarts. -// -// package main -// -// import ( -// "fmt" -// "log" -// "net/http" -// "rsc.io/letsencrypt" -// ) -// -// func main() { -// http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { -// fmt.Fprintf(w, "Hello, TLS!\n") -// }) -// var m letsencrypt.Manager -// if err := m.CacheFile("letsencrypt.cache"); err != nil { -// log.Fatal(err) -// } -// log.Fatal(m.Serve()) -// } -// -// Overview -// -// The fundamental type in this package is the Manager, which -// manages obtaining and refreshing a collection of TLS certificates, -// typically for use by an HTTPS server. -// The example above shows the most basic use of a Manager. -// The use can be customized by calling additional methods of the Manager. -// -// Registration -// -// A Manager m registers anonymously with LetsEncrypt.org, including agreeing to -// the letsencrypt.org terms of service, the first time it needs to obtain a certificate. -// To register with a particular email address and with the option of a -// prompt for agreement with the terms of service, call m.Register. -// -// GetCertificate -// -// The Manager's GetCertificate method returns certificates -// from the Manager's cache, filling the cache by requesting certificates -// from LetsEncrypt.org. In this way, a server with a tls.Config.GetCertificate -// set to m.GetCertificate will demand load a certificate for any host name -// it serves. To force loading of certificates ahead of time, install m.GetCertificate -// as before but then call m.Cert for each host name. -// -// A Manager can only obtain a certificate for a given host name if it can prove -// control of that host name to LetsEncrypt.org. By default it proves control by -// answering an HTTPS-based challenge: when -// the LetsEncrypt.org servers connect to the named host on port 443 (HTTPS), -// the TLS SNI handshake must use m.GetCertificate to obtain a per-host certificate. -// The most common way to satisfy this requirement is for the host name to -// resolve to the IP address of a (single) computer running m.ServeHTTPS, -// or at least running a Go TLS server with tls.Config.GetCertificate set to m.GetCertificate. -// However, other configurations are possible. For example, a group of machines -// could use an implementation of tls.Config.GetCertificate that cached -// certificates but handled cache misses by making RPCs to a Manager m -// on an elected leader machine. -// -// In typical usage, then, the setting of tls.Config.GetCertificate to m.GetCertificate -// serves two purposes: it provides certificates to the TLS server for ordinary serving, -// and it also answers challenges to prove ownership of the domains in order to -// obtain those certificates. -// -// To force the loading of a certificate for a given host into the Manager's cache, -// use m.Cert. -// -// Persistent Storage -// -// If a server always starts with a zero Manager m, the server effectively fetches -// a new certificate for each of its host name from LetsEncrypt.org on each restart. -// This is unfortunate both because the server cannot start if LetsEncrypt.org is -// unavailable and because LetsEncrypt.org limits how often it will issue a certificate -// for a given host name (at time of writing, the limit is 5 per week for a given host name). -// To save server state proactively to a cache file and to reload the server state from -// that same file when creating a new manager, call m.CacheFile with the name of -// the file to use. -// -// For alternate storage uses, m.Marshal returns the current state of the Manager -// as an opaque string, m.Unmarshal sets the state of the Manager using a string -// previously returned by m.Marshal (usually a different m), and m.Watch returns -// a channel that receives notifications about state changes. -// -// Limits -// -// To avoid hitting basic rate limits on LetsEncrypt.org, a given Manager limits all its -// interactions to at most one request every minute, with an initial allowed burst of -// 20 requests. -// -// By default, if GetCertificate is asked for a certificate it does not have, it will in turn -// ask LetsEncrypt.org for that certificate. This opens a potential attack where attackers -// connect to a server by IP address and pretend to be asking for an incorrect host name. -// Then GetCertificate will attempt to obtain a certificate for that host, incorrectly, -// eventually hitting LetsEncrypt.org's rate limit for certificate requests and making it -// impossible to obtain actual certificates. Because servers hold certificates for months -// at a time, however, an attack would need to be sustained over a time period -// of at least a month in order to cause real problems. -// -// To mitigate this kind of attack, a given Manager limits -// itself to an average of one certificate request for a new host every three hours, -// with an initial allowed burst of up to 20 requests. -// Long-running servers will therefore stay -// within the LetsEncrypt.org limit of 300 failed requests per month. -// Certificate refreshes are not subject to this limit. -// -// To eliminate the attack entirely, call m.SetHosts to enumerate the exact set -// of hosts that are allowed in certificate requests. -// -// Web Servers -// -// The basic requirement for use of a Manager is that there be an HTTPS server -// running on port 443 and calling m.GetCertificate to obtain TLS certificates. -// Using standard primitives, the way to do this is: -// -// srv := &http.Server{ -// Addr: ":https", -// TLSConfig: &tls.Config{ -// GetCertificate: m.GetCertificate, -// }, -// } -// srv.ListenAndServeTLS("", "") -// -// However, this pattern of serving HTTPS with demand-loaded TLS certificates -// comes up enough to wrap into a single method m.ServeHTTPS. -// -// Similarly, many HTTPS servers prefer to redirect HTTP clients to the HTTPS URLs. -// That functionality is provided by RedirectHTTP. -// -// The combination of serving HTTPS with demand-loaded TLS certificates and -// serving HTTPS redirects to HTTP clients is provided by m.Serve, as used in -// the original example above. -// -package letsencrypt - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/tls" - "crypto/x509" - "encoding/json" - "encoding/pem" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - "os" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/time/rate" - - "github.com/xenolf/lego/acme" -) - -const letsEncryptURL = "https://acme-v01.api.letsencrypt.org/directory" -const debug = false - -// A Manager m takes care of obtaining and refreshing a collection of TLS certificates -// obtained by LetsEncrypt.org. -// The zero Manager is not yet registered with LetsEncrypt.org and has no TLS certificates -// but is nonetheless ready for use. -// See the package comment for an overview of how to use a Manager. -type Manager struct { - mu sync.Mutex - state state - rateLimit *rate.Limiter - newHostLimit *rate.Limiter - certCache map[string]*cacheEntry - certTokens map[string]*tls.Certificate - watchChan chan struct{} -} - -// Serve runs an HTTP/HTTPS web server using TLS certificates obtained by the manager. -// The HTTP server redirects all requests to the HTTPS server. -// The HTTPS server obtains TLS certificates as needed and responds to requests -// by invoking http.DefaultServeMux. -// -// Serve does not return unitil the HTTPS server fails to start or else stops. -// Either way, Serve can only return a non-nil error, never nil. -func (m *Manager) Serve() error { - l, err := net.Listen("tcp", ":http") - if err != nil { - return err - } - defer l.Close() - go http.Serve(l, http.HandlerFunc(RedirectHTTP)) - - return m.ServeHTTPS() -} - -// ServeHTTPS runs an HTTPS web server using TLS certificates obtained by the manager. -// The HTTPS server obtains TLS certificates as needed and responds to requests -// by invoking http.DefaultServeMux. -// ServeHTTPS does not return unitil the HTTPS server fails to start or else stops. -// Either way, ServeHTTPS can only return a non-nil error, never nil. -func (m *Manager) ServeHTTPS() error { - srv := &http.Server{ - Addr: ":https", - TLSConfig: &tls.Config{ - GetCertificate: m.GetCertificate, - }, - } - return srv.ListenAndServeTLS("", "") -} - -// RedirectHTTP is an HTTP handler (suitable for use with http.HandleFunc) -// that responds to all requests by redirecting to the same URL served over HTTPS. -// It should only be invoked for requests received over HTTP. -func RedirectHTTP(w http.ResponseWriter, r *http.Request) { - if r.TLS != nil || r.Host == "" { - http.Error(w, "not found", 404) - } - - u := r.URL - u.Host = r.Host - u.Scheme = "https" - http.Redirect(w, r, u.String(), 302) -} - -// state is the serializable state for the Manager. -// It also implements acme.User. -type state struct { - Email string - Reg *acme.RegistrationResource - Key string - key *ecdsa.PrivateKey - Hosts []string - Certs map[string]stateCert -} - -func (s *state) GetEmail() string { return s.Email } -func (s *state) GetRegistration() *acme.RegistrationResource { return s.Reg } -func (s *state) GetPrivateKey() crypto.PrivateKey { return s.key } - -type stateCert struct { - Cert string - Key string -} - -func (cert stateCert) toTLS() (*tls.Certificate, error) { - c, err := tls.X509KeyPair([]byte(cert.Cert), []byte(cert.Key)) - if err != nil { - return nil, err - } - return &c, err -} - -type cacheEntry struct { - host string - m *Manager - - mu sync.Mutex - cert *tls.Certificate - timeout time.Time - refreshing bool - err error -} - -func (m *Manager) init() { - m.mu.Lock() - if m.certCache == nil { - m.rateLimit = rate.NewLimiter(rate.Every(1*time.Minute), 20) - m.newHostLimit = rate.NewLimiter(rate.Every(3*time.Hour), 20) - m.certCache = map[string]*cacheEntry{} - m.certTokens = map[string]*tls.Certificate{} - m.watchChan = make(chan struct{}, 1) - m.watchChan <- struct{}{} - } - m.mu.Unlock() -} - -// Watch returns the manager's watch channel, -// which delivers a notification after every time the -// manager's state (as exposed by Marshal and Unmarshal) changes. -// All calls to Watch return the same watch channel. -// -// The watch channel includes notifications about changes -// before the first call to Watch, so that in the pattern below, -// the range loop executes once immediately, saving -// the result of setup (along with any background updates that -// may have raced in quickly). -// -// m := new(letsencrypt.Manager) -// setup(m) -// go backgroundUpdates(m) -// for range m.Watch() { -// save(m.Marshal()) -// } -// -func (m *Manager) Watch() <-chan struct{} { - m.init() - m.updated() - return m.watchChan -} - -func (m *Manager) updated() { - select { - case m.watchChan <- struct{}{}: - default: - } -} - -func (m *Manager) CacheFile(name string) error { - f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE, 0600) - if err != nil { - return err - } - f.Close() - data, err := ioutil.ReadFile(name) - if err != nil { - return err - } - if len(data) > 0 { - if err := m.Unmarshal(string(data)); err != nil { - return err - } - } - go func() { - for range m.Watch() { - err := ioutil.WriteFile(name, []byte(m.Marshal()), 0600) - if err != nil { - log.Printf("writing letsencrypt cache: %v", err) - } - } - }() - return nil -} - -// Registered reports whether the manager has registered with letsencrypt.org yet. -func (m *Manager) Registered() bool { - m.init() - m.mu.Lock() - defer m.mu.Unlock() - return m.registered() -} - -func (m *Manager) registered() bool { - return m.state.Reg != nil && m.state.Reg.Body.Agreement != "" -} - -// Register registers the manager with letsencrypt.org, using the given email address. -// Registration may require agreeing to the letsencrypt.org terms of service. -// If so, Register calls prompt(url) where url is the URL of the terms of service. -// Prompt should report whether the caller agrees to the terms. -// A nil prompt func is taken to mean that the user always agrees. -// The email address is sent to LetsEncrypt.org but otherwise unchecked; -// it can be omitted by passing the empty string. -// -// Calling Register is only required to make sure registration uses a -// particular email address or to insert an explicit prompt into the -// registration sequence. If the manager is not registered, it will -// automatically register with no email address and automatic -// agreement to the terms of service at the first call to Cert or GetCertificate. -func (m *Manager) Register(email string, prompt func(string) bool) error { - m.init() - m.mu.Lock() - defer m.mu.Unlock() - - return m.register(email, prompt) -} - -func (m *Manager) register(email string, prompt func(string) bool) error { - if m.registered() { - return fmt.Errorf("already registered") - } - m.state.Email = email - if m.state.key == nil { - key, err := newKey() - if err != nil { - return fmt.Errorf("generating key: %v", err) - } - Key, err := marshalKey(key) - if err != nil { - return fmt.Errorf("generating key: %v", err) - } - m.state.key = key - m.state.Key = string(Key) - } - - c, err := acme.NewClient(letsEncryptURL, &m.state, acme.EC256) - if err != nil { - return fmt.Errorf("create client: %v", err) - } - reg, err := c.Register() - if err != nil { - return fmt.Errorf("register: %v", err) - } - - m.state.Reg = reg - if reg.Body.Agreement == "" { - if prompt != nil && !prompt(reg.TosURL) { - return fmt.Errorf("did not agree to TOS") - } - if err := c.AgreeToTOS(); err != nil { - return fmt.Errorf("agreeing to TOS: %v", err) - } - } - - m.updated() - - return nil -} - -// Marshal returns an encoding of the manager's state, -// suitable for writing to disk and reloading by calling Unmarshal. -// The state includes registration status, the configured host list -// from SetHosts, and all known certificates, including their private -// cryptographic keys. -// Consequently, the state should be kept private. -func (m *Manager) Marshal() string { - m.init() - js, err := json.MarshalIndent(&m.state, "", "\t") - if err != nil { - panic("unexpected json.Marshal failure") - } - return string(js) -} - -// Unmarshal restores the state encoded by a previous call to Marshal -// (perhaps on a different Manager in a different program). -func (m *Manager) Unmarshal(enc string) error { - m.init() - var st state - if err := json.Unmarshal([]byte(enc), &st); err != nil { - return err - } - if st.Key != "" { - key, err := unmarshalKey(st.Key) - if err != nil { - return err - } - st.key = key - } - m.state = st - for host, cert := range m.state.Certs { - c, err := cert.toTLS() - if err != nil { - log.Printf("letsencrypt: ignoring entry for %s: %v", host, err) - continue - } - m.certCache[host] = &cacheEntry{host: host, m: m, cert: c} - } - m.updated() - return nil -} - -// SetHosts sets the manager's list of known host names. -// If the list is non-nil, the manager will only ever attempt to acquire -// certificates for host names on the list. -// If the list is nil, the manager does not restrict the hosts it will -// ask for certificates for. -func (m *Manager) SetHosts(hosts []string) { - m.init() - m.mu.Lock() - m.state.Hosts = append(m.state.Hosts[:0], hosts...) - m.mu.Unlock() - m.updated() -} - -// GetCertificate can be placed a tls.Config's GetCertificate field to make -// the TLS server use Let's Encrypt certificates. -// Each time a client connects to the TLS server expecting a new host name, -// the TLS server's call to GetCertificate will trigger an exchange with the -// Let's Encrypt servers to obtain that certificate, subject to the manager rate limits. -// -// As noted in the Manager's documentation comment, -// to obtain a certificate for a given host name, that name -// must resolve to a computer running a TLS server on port 443 -// that obtains TLS SNI certificates by calling m.GetCertificate. -// In the standard usage, then, installing m.GetCertificate in the tls.Config -// both automatically provisions the TLS certificates needed for -// ordinary HTTPS service and answers the challenges from LetsEncrypt.org. -func (m *Manager) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { - m.init() - - host := clientHello.ServerName - - if debug { - log.Printf("GetCertificate %s", host) - } - - if strings.HasSuffix(host, ".acme.invalid") { - m.mu.Lock() - cert := m.certTokens[host] - m.mu.Unlock() - if cert == nil { - return nil, fmt.Errorf("unknown host") - } - return cert, nil - } - - return m.Cert(host) -} - -// Cert returns the certificate for the given host name, obtaining a new one if necessary. -// -// As noted in the documentation for Manager and for the GetCertificate method, -// obtaining a certificate requires that m.GetCertificate be associated with host. -// In most servers, simply starting a TLS server with a configuration referring -// to m.GetCertificate is sufficient, and Cert need not be called. -// -// The main use of Cert is to force the manager to obtain a certificate -// for a particular host name ahead of time. -func (m *Manager) Cert(host string) (*tls.Certificate, error) { - host = strings.ToLower(host) - if debug { - log.Printf("Cert %s", host) - } - - m.init() - m.mu.Lock() - if !m.registered() { - m.register("", nil) - } - - ok := false - if m.state.Hosts == nil { - ok = true - } else { - for _, h := range m.state.Hosts { - if host == h { - ok = true - break - } - } - } - if !ok { - m.mu.Unlock() - return nil, fmt.Errorf("unknown host") - } - - // Otherwise look in our cert cache. - entry, ok := m.certCache[host] - if !ok { - r := m.rateLimit.Reserve() - ok := r.OK() - if ok { - ok = m.newHostLimit.Allow() - if !ok { - r.Cancel() - } - } - if !ok { - m.mu.Unlock() - return nil, fmt.Errorf("rate limited") - } - entry = &cacheEntry{host: host, m: m} - m.certCache[host] = entry - } - m.mu.Unlock() - - entry.mu.Lock() - defer entry.mu.Unlock() - entry.init() - if entry.err != nil { - return nil, entry.err - } - return entry.cert, nil -} - -func (e *cacheEntry) init() { - if e.err != nil && time.Now().Before(e.timeout) { - return - } - if e.cert != nil { - if e.timeout.IsZero() { - t, err := certRefreshTime(e.cert) - if err != nil { - e.err = err - e.timeout = time.Now().Add(1 * time.Minute) - e.cert = nil - return - } - e.timeout = t - } - if time.Now().After(e.timeout) && !e.refreshing { - e.refreshing = true - go e.refresh() - } - return - } - - cert, refreshTime, err := e.m.verify(e.host) - e.m.mu.Lock() - e.m.certCache[e.host] = e - e.m.mu.Unlock() - e.install(cert, refreshTime, err) -} - -func (e *cacheEntry) install(cert *tls.Certificate, refreshTime time.Time, err error) { - e.cert = nil - e.timeout = time.Time{} - e.err = nil - - if err != nil { - e.err = err - e.timeout = time.Now().Add(1 * time.Minute) - return - } - - e.cert = cert - e.timeout = refreshTime -} - -func (e *cacheEntry) refresh() { - e.m.rateLimit.Wait(context.Background()) - cert, refreshTime, err := e.m.verify(e.host) - - e.mu.Lock() - defer e.mu.Unlock() - e.refreshing = false - if err == nil { - e.install(cert, refreshTime, nil) - } -} - -func (m *Manager) verify(host string) (cert *tls.Certificate, refreshTime time.Time, err error) { - c, err := acme.NewClient(letsEncryptURL, &m.state, acme.EC256) - if err != nil { - return - } - if err = c.SetChallengeProvider(acme.TLSSNI01, tlsProvider{m}); err != nil { - return - } - c.SetChallengeProvider(acme.TLSSNI01, tlsProvider{m}) - c.ExcludeChallenges([]acme.Challenge{acme.HTTP01}) - acmeCert, errmap := c.ObtainCertificate([]string{host}, true, nil) - if len(errmap) > 0 { - if debug { - log.Printf("ObtainCertificate %v => %v", host, errmap) - } - err = fmt.Errorf("%v", errmap) - return - } - entryCert := stateCert{ - Cert: string(acmeCert.Certificate), - Key: string(acmeCert.PrivateKey), - } - cert, err = entryCert.toTLS() - if err != nil { - if debug { - log.Printf("ObtainCertificate %v toTLS failure: %v", host, err) - } - err = err - return - } - if refreshTime, err = certRefreshTime(cert); err != nil { - return - } - - m.mu.Lock() - if m.state.Certs == nil { - m.state.Certs = make(map[string]stateCert) - } - m.state.Certs[host] = entryCert - m.mu.Unlock() - m.updated() - - return cert, refreshTime, nil -} - -func certRefreshTime(cert *tls.Certificate) (time.Time, error) { - xc, err := x509.ParseCertificate(cert.Certificate[0]) - if err != nil { - if debug { - log.Printf("ObtainCertificate to X.509 failure: %v", err) - } - return time.Time{}, err - } - t := xc.NotBefore.Add(xc.NotAfter.Sub(xc.NotBefore) / 2) - monthEarly := xc.NotAfter.Add(-30 * 24 * time.Hour) - if t.Before(monthEarly) { - t = monthEarly - } - return t, nil -} - -// tlsProvider implements acme.ChallengeProvider for TLS handshake challenges. -type tlsProvider struct { - m *Manager -} - -func (p tlsProvider) Present(domain, token, keyAuth string) error { - cert, dom, err := acme.TLSSNI01ChallengeCertDomain(keyAuth) - if err != nil { - return err - } - - p.m.mu.Lock() - p.m.certTokens[dom] = &cert - p.m.mu.Unlock() - - return nil -} - -func (p tlsProvider) CleanUp(domain, token, keyAuth string) error { - _, dom, err := acme.TLSSNI01ChallengeCertDomain(keyAuth) - if err != nil { - return err - } - - p.m.mu.Lock() - delete(p.m.certTokens, dom) - p.m.mu.Unlock() - - return nil -} - -func marshalKey(key *ecdsa.PrivateKey) ([]byte, error) { - data, err := x509.MarshalECPrivateKey(key) - if err != nil { - return nil, err - } - return pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: data}), nil -} - -func unmarshalKey(text string) (*ecdsa.PrivateKey, error) { - b, _ := pem.Decode([]byte(text)) - if b == nil { - return nil, fmt.Errorf("unmarshalKey: missing key") - } - if b.Type != "EC PRIVATE KEY" { - return nil, fmt.Errorf("unmarshalKey: found %q, not %q", b.Type, "EC PRIVATE KEY") - } - k, err := x509.ParseECPrivateKey(b.Bytes) - if err != nil { - return nil, fmt.Errorf("unmarshalKey: %v", err) - } - return k, nil -} - -func newKey() (*ecdsa.PrivateKey, error) { - return ecdsa.GenerateKey(elliptic.P384(), rand.Reader) -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/LICENSE b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/LICENSE deleted file mode 100644 index 17460b71..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Sebastian Erhart - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/challenges.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/challenges.go deleted file mode 100644 index 85790050..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/challenges.go +++ /dev/null @@ -1,16 +0,0 @@ -package acme - -// Challenge is a string that identifies a particular type and version of ACME challenge. -type Challenge string - -const ( - // HTTP01 is the "http-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#http - // Note: HTTP01ChallengePath returns the URL path to fulfill this challenge - HTTP01 = Challenge("http-01") - // TLSSNI01 is the "tls-sni-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#tls-with-server-name-indication-tls-sni - // Note: TLSSNI01ChallengeCert returns a certificate to fulfill this challenge - TLSSNI01 = Challenge("tls-sni-01") - // DNS01 is the "dns-01" ACME challenge https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#dns - // Note: DNS01Record returns a DNS record which will fulfill this challenge - DNS01 = Challenge("dns-01") -) diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client.go deleted file mode 100644 index 16e4cbe0..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client.go +++ /dev/null @@ -1,638 +0,0 @@ -// Package acme implements the ACME protocol for Let's Encrypt and other conforming providers. -package acme - -import ( - "crypto" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "log" - "net" - "regexp" - "strconv" - "strings" - "time" -) - -var ( - // Logger is an optional custom logger. - Logger *log.Logger -) - -// logf writes a log entry. It uses Logger if not -// nil, otherwise it uses the default log.Logger. -func logf(format string, args ...interface{}) { - if Logger != nil { - Logger.Printf(format, args...) - } else { - log.Printf(format, args...) - } -} - -// User interface is to be implemented by users of this library. -// It is used by the client type to get user specific information. -type User interface { - GetEmail() string - GetRegistration() *RegistrationResource - GetPrivateKey() crypto.PrivateKey -} - -// Interface for all challenge solvers to implement. -type solver interface { - Solve(challenge challenge, domain string) error -} - -type validateFunc func(j *jws, domain, uri string, chlng challenge) error - -// Client is the user-friendy way to ACME -type Client struct { - directory directory - user User - jws *jws - keyType KeyType - issuerCert []byte - solvers map[Challenge]solver -} - -// NewClient creates a new ACME client on behalf of the user. The client will depend on -// the ACME directory located at caDirURL for the rest of its actions. It will -// generate private keys for certificates of size keyBits. -func NewClient(caDirURL string, user User, keyType KeyType) (*Client, error) { - privKey := user.GetPrivateKey() - if privKey == nil { - return nil, errors.New("private key was nil") - } - - var dir directory - if _, err := getJSON(caDirURL, &dir); err != nil { - return nil, fmt.Errorf("get directory at '%s': %v", caDirURL, err) - } - - if dir.NewRegURL == "" { - return nil, errors.New("directory missing new registration URL") - } - if dir.NewAuthzURL == "" { - return nil, errors.New("directory missing new authz URL") - } - if dir.NewCertURL == "" { - return nil, errors.New("directory missing new certificate URL") - } - if dir.RevokeCertURL == "" { - return nil, errors.New("directory missing revoke certificate URL") - } - - jws := &jws{privKey: privKey, directoryURL: caDirURL} - - // REVIEW: best possibility? - // Add all available solvers with the right index as per ACME - // spec to this map. Otherwise they won`t be found. - solvers := make(map[Challenge]solver) - solvers[HTTP01] = &httpChallenge{jws: jws, validate: validate, provider: &HTTPProviderServer{}} - solvers[TLSSNI01] = &tlsSNIChallenge{jws: jws, validate: validate, provider: &TLSProviderServer{}} - - return &Client{directory: dir, user: user, jws: jws, keyType: keyType, solvers: solvers}, nil -} - -// SetChallengeProvider specifies a custom provider that will make the solution available -func (c *Client) SetChallengeProvider(challenge Challenge, p ChallengeProvider) error { - switch challenge { - case HTTP01: - c.solvers[challenge] = &httpChallenge{jws: c.jws, validate: validate, provider: p} - case TLSSNI01: - c.solvers[challenge] = &tlsSNIChallenge{jws: c.jws, validate: validate, provider: p} - default: - return fmt.Errorf("Unknown challenge %v", challenge) - } - return nil -} - -// SetHTTPAddress specifies a custom interface:port to be used for HTTP based challenges. -// If this option is not used, the default port 80 and all interfaces will be used. -// To only specify a port and no interface use the ":port" notation. -func (c *Client) SetHTTPAddress(iface string) error { - host, port, err := net.SplitHostPort(iface) - if err != nil { - return err - } - - if chlng, ok := c.solvers[HTTP01]; ok { - chlng.(*httpChallenge).provider = NewHTTPProviderServer(host, port) - } - - return nil -} - -// SetTLSAddress specifies a custom interface:port to be used for TLS based challenges. -// If this option is not used, the default port 443 and all interfaces will be used. -// To only specify a port and no interface use the ":port" notation. -func (c *Client) SetTLSAddress(iface string) error { - host, port, err := net.SplitHostPort(iface) - if err != nil { - return err - } - - if chlng, ok := c.solvers[TLSSNI01]; ok { - chlng.(*tlsSNIChallenge).provider = NewTLSProviderServer(host, port) - } - return nil -} - -// ExcludeChallenges explicitly removes challenges from the pool for solving. -func (c *Client) ExcludeChallenges(challenges []Challenge) { - // Loop through all challenges and delete the requested one if found. - for _, challenge := range challenges { - delete(c.solvers, challenge) - } -} - -// Register the current account to the ACME server. -func (c *Client) Register() (*RegistrationResource, error) { - if c == nil || c.user == nil { - return nil, errors.New("acme: cannot register a nil client or user") - } - logf("[INFO] acme: Registering account for %s", c.user.GetEmail()) - - regMsg := registrationMessage{ - Resource: "new-reg", - } - if c.user.GetEmail() != "" { - regMsg.Contact = []string{"mailto:" + c.user.GetEmail()} - } else { - regMsg.Contact = []string{} - } - - var serverReg Registration - hdr, err := postJSON(c.jws, c.directory.NewRegURL, regMsg, &serverReg) - if err != nil { - return nil, err - } - - reg := &RegistrationResource{Body: serverReg} - - links := parseLinks(hdr["Link"]) - reg.URI = hdr.Get("Location") - if links["terms-of-service"] != "" { - reg.TosURL = links["terms-of-service"] - } - - if links["next"] != "" { - reg.NewAuthzURL = links["next"] - } else { - return nil, errors.New("acme: The server did not return 'next' link to proceed") - } - - return reg, nil -} - -// AgreeToTOS updates the Client registration and sends the agreement to -// the server. -func (c *Client) AgreeToTOS() error { - reg := c.user.GetRegistration() - - reg.Body.Agreement = c.user.GetRegistration().TosURL - reg.Body.Resource = "reg" - _, err := postJSON(c.jws, c.user.GetRegistration().URI, c.user.GetRegistration().Body, nil) - return err -} - -// ObtainCertificate tries to obtain a single certificate using all domains passed into it. -// The first domain in domains is used for the CommonName field of the certificate, all other -// domains are added using the Subject Alternate Names extension. A new private key is generated -// for every invocation of this function. If you do not want that you can supply your own private key -// in the privKey parameter. If this parameter is non-nil it will be used instead of generating a new one. -// If bundle is true, the []byte contains both the issuer certificate and -// your issued certificate as a bundle. -// This function will never return a partial certificate. If one domain in the list fails, -// the whole certificate will fail. -func (c *Client) ObtainCertificate(domains []string, bundle bool, privKey crypto.PrivateKey) (CertificateResource, map[string]error) { - if bundle { - logf("[INFO][%s] acme: Obtaining bundled SAN certificate", strings.Join(domains, ", ")) - } else { - logf("[INFO][%s] acme: Obtaining SAN certificate", strings.Join(domains, ", ")) - } - - challenges, failures := c.getChallenges(domains) - // If any challenge fails - return. Do not generate partial SAN certificates. - if len(failures) > 0 { - return CertificateResource{}, failures - } - - errs := c.solveChallenges(challenges) - // If any challenge fails - return. Do not generate partial SAN certificates. - if len(errs) > 0 { - return CertificateResource{}, errs - } - - logf("[INFO][%s] acme: Validations succeeded; requesting certificates", strings.Join(domains, ", ")) - - cert, err := c.requestCertificate(challenges, bundle, privKey) - if err != nil { - for _, chln := range challenges { - failures[chln.Domain] = err - } - } - - return cert, failures -} - -// RevokeCertificate takes a PEM encoded certificate or bundle and tries to revoke it at the CA. -func (c *Client) RevokeCertificate(certificate []byte) error { - certificates, err := parsePEMBundle(certificate) - if err != nil { - return err - } - - x509Cert := certificates[0] - if x509Cert.IsCA { - return fmt.Errorf("Certificate bundle starts with a CA certificate") - } - - encodedCert := base64.URLEncoding.EncodeToString(x509Cert.Raw) - - _, err = postJSON(c.jws, c.directory.RevokeCertURL, revokeCertMessage{Resource: "revoke-cert", Certificate: encodedCert}, nil) - return err -} - -// RenewCertificate takes a CertificateResource and tries to renew the certificate. -// If the renewal process succeeds, the new certificate will ge returned in a new CertResource. -// Please be aware that this function will return a new certificate in ANY case that is not an error. -// If the server does not provide us with a new cert on a GET request to the CertURL -// this function will start a new-cert flow where a new certificate gets generated. -// If bundle is true, the []byte contains both the issuer certificate and -// your issued certificate as a bundle. -// For private key reuse the PrivateKey property of the passed in CertificateResource should be non-nil. -func (c *Client) RenewCertificate(cert CertificateResource, bundle bool) (CertificateResource, error) { - // Input certificate is PEM encoded. Decode it here as we may need the decoded - // cert later on in the renewal process. The input may be a bundle or a single certificate. - certificates, err := parsePEMBundle(cert.Certificate) - if err != nil { - return CertificateResource{}, err - } - - x509Cert := certificates[0] - if x509Cert.IsCA { - return CertificateResource{}, fmt.Errorf("[%s] Certificate bundle starts with a CA certificate", cert.Domain) - } - - // This is just meant to be informal for the user. - timeLeft := x509Cert.NotAfter.Sub(time.Now().UTC()) - logf("[INFO][%s] acme: Trying renewal with %d hours remaining", cert.Domain, int(timeLeft.Hours())) - - // The first step of renewal is to check if we get a renewed cert - // directly from the cert URL. - resp, err := httpGet(cert.CertURL) - if err != nil { - return CertificateResource{}, err - } - defer resp.Body.Close() - serverCertBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return CertificateResource{}, err - } - - serverCert, err := x509.ParseCertificate(serverCertBytes) - if err != nil { - return CertificateResource{}, err - } - - // If the server responds with a different certificate we are effectively renewed. - // TODO: Further test if we can actually use the new certificate (Our private key works) - if !x509Cert.Equal(serverCert) { - logf("[INFO][%s] acme: Server responded with renewed certificate", cert.Domain) - issuedCert := pemEncode(derCertificateBytes(serverCertBytes)) - // If bundle is true, we want to return a certificate bundle. - // To do this, we need the issuer certificate. - if bundle { - // The issuer certificate link is always supplied via an "up" link - // in the response headers of a new certificate. - links := parseLinks(resp.Header["Link"]) - issuerCert, err := c.getIssuerCertificate(links["up"]) - if err != nil { - // If we fail to acquire the issuer cert, return the issued certificate - do not fail. - logf("[ERROR][%s] acme: Could not bundle issuer certificate: %v", cert.Domain, err) - } else { - // Success - append the issuer cert to the issued cert. - issuerCert = pemEncode(derCertificateBytes(issuerCert)) - issuedCert = append(issuedCert, issuerCert...) - } - } - - cert.Certificate = issuedCert - return cert, nil - } - - var privKey crypto.PrivateKey - if cert.PrivateKey != nil { - privKey, err = parsePEMPrivateKey(cert.PrivateKey) - if err != nil { - return CertificateResource{}, err - } - } - - var domains []string - var failures map[string]error - // check for SAN certificate - if len(x509Cert.DNSNames) > 1 { - domains = append(domains, x509Cert.Subject.CommonName) - for _, sanDomain := range x509Cert.DNSNames { - if sanDomain == x509Cert.Subject.CommonName { - continue - } - domains = append(domains, sanDomain) - } - } else { - domains = append(domains, x509Cert.Subject.CommonName) - } - - newCert, failures := c.ObtainCertificate(domains, bundle, privKey) - return newCert, failures[cert.Domain] -} - -// Looks through the challenge combinations to find a solvable match. -// Then solves the challenges in series and returns. -func (c *Client) solveChallenges(challenges []authorizationResource) map[string]error { - // loop through the resources, basically through the domains. - failures := make(map[string]error) - for _, authz := range challenges { - // no solvers - no solving - if solvers := c.chooseSolvers(authz.Body, authz.Domain); solvers != nil { - for i, solver := range solvers { - // TODO: do not immediately fail if one domain fails to validate. - err := solver.Solve(authz.Body.Challenges[i], authz.Domain) - if err != nil { - failures[authz.Domain] = err - } - } - } else { - failures[authz.Domain] = fmt.Errorf("[%s] acme: Could not determine solvers", authz.Domain) - } - } - - return failures -} - -// Checks all combinations from the server and returns an array of -// solvers which should get executed in series. -func (c *Client) chooseSolvers(auth authorization, domain string) map[int]solver { - for _, combination := range auth.Combinations { - solvers := make(map[int]solver) - for _, idx := range combination { - if solver, ok := c.solvers[auth.Challenges[idx].Type]; ok { - solvers[idx] = solver - } else { - logf("[INFO][%s] acme: Could not find solver for: %s", domain, auth.Challenges[idx].Type) - } - } - - // If we can solve the whole combination, return the solvers - if len(solvers) == len(combination) { - return solvers - } - } - return nil -} - -// Get the challenges needed to proof our identifier to the ACME server. -func (c *Client) getChallenges(domains []string) ([]authorizationResource, map[string]error) { - resc, errc := make(chan authorizationResource), make(chan domainError) - - for _, domain := range domains { - go func(domain string) { - authMsg := authorization{Resource: "new-authz", Identifier: identifier{Type: "dns", Value: domain}} - var authz authorization - hdr, err := postJSON(c.jws, c.user.GetRegistration().NewAuthzURL, authMsg, &authz) - if err != nil { - errc <- domainError{Domain: domain, Error: err} - return - } - - links := parseLinks(hdr["Link"]) - if links["next"] == "" { - logf("[ERROR][%s] acme: Server did not provide next link to proceed", domain) - return - } - - resc <- authorizationResource{Body: authz, NewCertURL: links["next"], AuthURL: hdr.Get("Location"), Domain: domain} - }(domain) - } - - responses := make(map[string]authorizationResource) - failures := make(map[string]error) - for i := 0; i < len(domains); i++ { - select { - case res := <-resc: - responses[res.Domain] = res - case err := <-errc: - failures[err.Domain] = err.Error - } - } - - challenges := make([]authorizationResource, 0, len(responses)) - for _, domain := range domains { - if challenge, ok := responses[domain]; ok { - challenges = append(challenges, challenge) - } - } - - close(resc) - close(errc) - - return challenges, failures -} - -func (c *Client) requestCertificate(authz []authorizationResource, bundle bool, privKey crypto.PrivateKey) (CertificateResource, error) { - if len(authz) == 0 { - return CertificateResource{}, errors.New("Passed no authorizations to requestCertificate!") - } - - commonName := authz[0] - var err error - if privKey == nil { - privKey, err = generatePrivateKey(c.keyType) - if err != nil { - return CertificateResource{}, err - } - } - - var san []string - var authURLs []string - for _, auth := range authz[1:] { - san = append(san, auth.Domain) - authURLs = append(authURLs, auth.AuthURL) - } - - // TODO: should the CSR be customizable? - csr, err := generateCsr(privKey, commonName.Domain, san) - if err != nil { - return CertificateResource{}, err - } - - csrString := base64.URLEncoding.EncodeToString(csr) - jsonBytes, err := json.Marshal(csrMessage{Resource: "new-cert", Csr: csrString, Authorizations: authURLs}) - if err != nil { - return CertificateResource{}, err - } - - resp, err := c.jws.post(commonName.NewCertURL, jsonBytes) - if err != nil { - return CertificateResource{}, err - } - - privateKeyPem := pemEncode(privKey) - cerRes := CertificateResource{ - Domain: commonName.Domain, - CertURL: resp.Header.Get("Location"), - PrivateKey: privateKeyPem} - - for { - switch resp.StatusCode { - case 201, 202: - cert, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024)) - resp.Body.Close() - if err != nil { - return CertificateResource{}, err - } - - // The server returns a body with a length of zero if the - // certificate was not ready at the time this request completed. - // Otherwise the body is the certificate. - if len(cert) > 0 { - - cerRes.CertStableURL = resp.Header.Get("Content-Location") - cerRes.AccountRef = c.user.GetRegistration().URI - - issuedCert := pemEncode(derCertificateBytes(cert)) - // If bundle is true, we want to return a certificate bundle. - // To do this, we need the issuer certificate. - if bundle { - // The issuer certificate link is always supplied via an "up" link - // in the response headers of a new certificate. - links := parseLinks(resp.Header["Link"]) - issuerCert, err := c.getIssuerCertificate(links["up"]) - if err != nil { - // If we fail to acquire the issuer cert, return the issued certificate - do not fail. - logf("[WARNING][%s] acme: Could not bundle issuer certificate: %v", commonName.Domain, err) - } else { - // Success - append the issuer cert to the issued cert. - issuerCert = pemEncode(derCertificateBytes(issuerCert)) - issuedCert = append(issuedCert, issuerCert...) - } - } - - cerRes.Certificate = issuedCert - logf("[INFO][%s] Server responded with a certificate.", commonName.Domain) - return cerRes, nil - } - - // The certificate was granted but is not yet issued. - // Check retry-after and loop. - ra := resp.Header.Get("Retry-After") - retryAfter, err := strconv.Atoi(ra) - if err != nil { - return CertificateResource{}, err - } - - logf("[INFO][%s] acme: Server responded with status 202; retrying after %ds", commonName.Domain, retryAfter) - time.Sleep(time.Duration(retryAfter) * time.Second) - - break - default: - return CertificateResource{}, handleHTTPError(resp) - } - - resp, err = httpGet(cerRes.CertURL) - if err != nil { - return CertificateResource{}, err - } - } -} - -// getIssuerCertificate requests the issuer certificate and caches it for -// subsequent requests. -func (c *Client) getIssuerCertificate(url string) ([]byte, error) { - logf("[INFO] acme: Requesting issuer cert from %s", url) - if c.issuerCert != nil { - return c.issuerCert, nil - } - - resp, err := httpGet(url) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - issuerBytes, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024)) - if err != nil { - return nil, err - } - - _, err = x509.ParseCertificate(issuerBytes) - if err != nil { - return nil, err - } - - c.issuerCert = issuerBytes - return issuerBytes, err -} - -func parseLinks(links []string) map[string]string { - aBrkt := regexp.MustCompile("[<>]") - slver := regexp.MustCompile("(.+) *= *\"(.+)\"") - linkMap := make(map[string]string) - - for _, link := range links { - - link = aBrkt.ReplaceAllString(link, "") - parts := strings.Split(link, ";") - - matches := slver.FindStringSubmatch(parts[1]) - if len(matches) > 0 { - linkMap[matches[2]] = parts[0] - } - } - - return linkMap -} - -// validate makes the ACME server start validating a -// challenge response, only returning once it is done. -func validate(j *jws, domain, uri string, chlng challenge) error { - var challengeResponse challenge - - hdr, err := postJSON(j, uri, chlng, &challengeResponse) - if err != nil { - return err - } - - // After the path is sent, the ACME server will access our server. - // Repeatedly check the server for an updated status on our request. - for { - switch challengeResponse.Status { - case "valid": - logf("[INFO][%s] The server validated our request", domain) - return nil - case "pending": - break - case "invalid": - return handleChallengeError(challengeResponse) - default: - return errors.New("The server returned an unexpected state.") - } - - ra, err := strconv.Atoi(hdr.Get("Retry-After")) - if err != nil { - // The ACME server MUST return a Retry-After. - // If it doesn't, we'll just poll hard. - ra = 1 - } - time.Sleep(time.Duration(ra) * time.Second) - - hdr, err = getJSON(uri, &challengeResponse) - if err != nil { - return err - } - } -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client_test.go deleted file mode 100644 index e309554f..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client_test.go +++ /dev/null @@ -1,198 +0,0 @@ -package acme - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "encoding/json" - "net" - "net/http" - "net/http/httptest" - "strings" - "testing" -) - -func TestNewClient(t *testing.T) { - keyBits := 32 // small value keeps test fast - keyType := RSA2048 - key, err := rsa.GenerateKey(rand.Reader, keyBits) - if err != nil { - t.Fatal("Could not generate test key:", err) - } - user := mockUser{ - email: "test@test.com", - regres: new(RegistrationResource), - privatekey: key, - } - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - data, _ := json.Marshal(directory{NewAuthzURL: "http://test", NewCertURL: "http://test", NewRegURL: "http://test", RevokeCertURL: "http://test"}) - w.Write(data) - })) - - client, err := NewClient(ts.URL, user, keyType) - if err != nil { - t.Fatalf("Could not create client: %v", err) - } - - if client.jws == nil { - t.Fatalf("Expected client.jws to not be nil") - } - if expected, actual := key, client.jws.privKey; actual != expected { - t.Errorf("Expected jws.privKey to be %p but was %p", expected, actual) - } - - if client.keyType != keyType { - t.Errorf("Expected keyType to be %s but was %s", keyType, client.keyType) - } - - if expected, actual := 2, len(client.solvers); actual != expected { - t.Fatalf("Expected %d solver(s), got %d", expected, actual) - } -} - -func TestClientOptPort(t *testing.T) { - keyBits := 32 // small value keeps test fast - key, err := rsa.GenerateKey(rand.Reader, keyBits) - if err != nil { - t.Fatal("Could not generate test key:", err) - } - user := mockUser{ - email: "test@test.com", - regres: new(RegistrationResource), - privatekey: key, - } - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - data, _ := json.Marshal(directory{NewAuthzURL: "http://test", NewCertURL: "http://test", NewRegURL: "http://test", RevokeCertURL: "http://test"}) - w.Write(data) - })) - - optPort := "1234" - optHost := "" - client, err := NewClient(ts.URL, user, RSA2048) - if err != nil { - t.Fatalf("Could not create client: %v", err) - } - client.SetHTTPAddress(net.JoinHostPort(optHost, optPort)) - client.SetTLSAddress(net.JoinHostPort(optHost, optPort)) - - httpSolver, ok := client.solvers[HTTP01].(*httpChallenge) - if !ok { - t.Fatal("Expected http-01 solver to be httpChallenge type") - } - if httpSolver.jws != client.jws { - t.Error("Expected http-01 to have same jws as client") - } - if got := httpSolver.provider.(*HTTPProviderServer).port; got != optPort { - t.Errorf("Expected http-01 to have port %s but was %s", optPort, got) - } - if got := httpSolver.provider.(*HTTPProviderServer).iface; got != optHost { - t.Errorf("Expected http-01 to have iface %s but was %s", optHost, got) - } - - httpsSolver, ok := client.solvers[TLSSNI01].(*tlsSNIChallenge) - if !ok { - t.Fatal("Expected tls-sni-01 solver to be httpChallenge type") - } - if httpsSolver.jws != client.jws { - t.Error("Expected tls-sni-01 to have same jws as client") - } - if got := httpsSolver.provider.(*TLSProviderServer).port; got != optPort { - t.Errorf("Expected tls-sni-01 to have port %s but was %s", optPort, got) - } - if got := httpsSolver.provider.(*TLSProviderServer).iface; got != optHost { - t.Errorf("Expected tls-sni-01 to have port %s but was %s", optHost, got) - } - - // test setting different host - optHost = "127.0.0.1" - client.SetHTTPAddress(net.JoinHostPort(optHost, optPort)) - client.SetTLSAddress(net.JoinHostPort(optHost, optPort)) - - if got := httpSolver.provider.(*HTTPProviderServer).iface; got != optHost { - t.Errorf("Expected http-01 to have iface %s but was %s", optHost, got) - } - if got := httpsSolver.provider.(*TLSProviderServer).port; got != optPort { - t.Errorf("Expected tls-sni-01 to have port %s but was %s", optPort, got) - } -} - -func TestValidate(t *testing.T) { - var statuses []string - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Minimal stub ACME server for validation. - w.Header().Add("Replay-Nonce", "12345") - w.Header().Add("Retry-After", "0") - switch r.Method { - case "HEAD": - case "POST": - st := statuses[0] - statuses = statuses[1:] - writeJSONResponse(w, &challenge{Type: "http-01", Status: st, URI: "http://example.com/", Token: "token"}) - - case "GET": - st := statuses[0] - statuses = statuses[1:] - writeJSONResponse(w, &challenge{Type: "http-01", Status: st, URI: "http://example.com/", Token: "token"}) - - default: - http.Error(w, r.Method, http.StatusMethodNotAllowed) - } - })) - defer ts.Close() - - privKey, _ := rsa.GenerateKey(rand.Reader, 512) - j := &jws{privKey: privKey, directoryURL: ts.URL} - - tsts := []struct { - name string - statuses []string - want string - }{ - {"POST-unexpected", []string{"weird"}, "unexpected"}, - {"POST-valid", []string{"valid"}, ""}, - {"POST-invalid", []string{"invalid"}, "Error Detail"}, - {"GET-unexpected", []string{"pending", "weird"}, "unexpected"}, - {"GET-valid", []string{"pending", "valid"}, ""}, - {"GET-invalid", []string{"pending", "invalid"}, "Error Detail"}, - } - - for _, tst := range tsts { - statuses = tst.statuses - if err := validate(j, "example.com", ts.URL, challenge{Type: "http-01", Token: "token"}); err == nil && tst.want != "" { - t.Errorf("[%s] validate: got error %v, want something with %q", tst.name, err, tst.want) - } else if err != nil && !strings.Contains(err.Error(), tst.want) { - t.Errorf("[%s] validate: got error %v, want something with %q", tst.name, err, tst.want) - } - } -} - -// writeJSONResponse marshals the body as JSON and writes it to the response. -func writeJSONResponse(w http.ResponseWriter, body interface{}) { - bs, err := json.Marshal(body) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - w.Header().Set("Content-Type", "application/json") - if _, err := w.Write(bs); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } -} - -// stubValidate is like validate, except it does nothing. -func stubValidate(j *jws, domain, uri string, chlng challenge) error { - return nil -} - -type mockUser struct { - email string - regres *RegistrationResource - privatekey *rsa.PrivateKey -} - -func (u mockUser) GetEmail() string { return u.email } -func (u mockUser) GetRegistration() *RegistrationResource { return u.regres } -func (u mockUser) GetPrivateKey() crypto.PrivateKey { return u.privatekey } diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto.go deleted file mode 100644 index fc20442f..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto.go +++ /dev/null @@ -1,323 +0,0 @@ -package acme - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/base64" - "encoding/pem" - "errors" - "fmt" - "io" - "io/ioutil" - "math/big" - "net/http" - "strings" - "time" - - "golang.org/x/crypto/ocsp" -) - -// KeyType represents the key algo as well as the key size or curve to use. -type KeyType string -type derCertificateBytes []byte - -// Constants for all key types we support. -const ( - EC256 = KeyType("P256") - EC384 = KeyType("P384") - RSA2048 = KeyType("2048") - RSA4096 = KeyType("4096") - RSA8192 = KeyType("8192") -) - -const ( - // OCSPGood means that the certificate is valid. - OCSPGood = ocsp.Good - // OCSPRevoked means that the certificate has been deliberately revoked. - OCSPRevoked = ocsp.Revoked - // OCSPUnknown means that the OCSP responder doesn't know about the certificate. - OCSPUnknown = ocsp.Unknown - // OCSPServerFailed means that the OCSP responder failed to process the request. - OCSPServerFailed = ocsp.ServerFailed -) - -// GetOCSPForCert takes a PEM encoded cert or cert bundle returning the raw OCSP response, -// the parsed response, and an error, if any. The returned []byte can be passed directly -// into the OCSPStaple property of a tls.Certificate. If the bundle only contains the -// issued certificate, this function will try to get the issuer certificate from the -// IssuingCertificateURL in the certificate. If the []byte and/or ocsp.Response return -// values are nil, the OCSP status may be assumed OCSPUnknown. -func GetOCSPForCert(bundle []byte) ([]byte, *ocsp.Response, error) { - certificates, err := parsePEMBundle(bundle) - if err != nil { - return nil, nil, err - } - - // We expect the certificate slice to be ordered downwards the chain. - // SRV CRT -> CA. We need to pull the leaf and issuer certs out of it, - // which should always be the first two certificates. If there's no - // OCSP server listed in the leaf cert, there's nothing to do. And if - // we have only one certificate so far, we need to get the issuer cert. - issuedCert := certificates[0] - if len(issuedCert.OCSPServer) == 0 { - return nil, nil, errors.New("no OCSP server specified in cert") - } - if len(certificates) == 1 { - // TODO: build fallback. If this fails, check the remaining array entries. - if len(issuedCert.IssuingCertificateURL) == 0 { - return nil, nil, errors.New("no issuing certificate URL") - } - - resp, err := httpGet(issuedCert.IssuingCertificateURL[0]) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - issuerBytes, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024)) - if err != nil { - return nil, nil, err - } - - issuerCert, err := x509.ParseCertificate(issuerBytes) - if err != nil { - return nil, nil, err - } - - // Insert it into the slice on position 0 - // We want it ordered right SRV CRT -> CA - certificates = append(certificates, issuerCert) - } - issuerCert := certificates[1] - - // Finally kick off the OCSP request. - ocspReq, err := ocsp.CreateRequest(issuedCert, issuerCert, nil) - if err != nil { - return nil, nil, err - } - - reader := bytes.NewReader(ocspReq) - req, err := httpPost(issuedCert.OCSPServer[0], "application/ocsp-request", reader) - if err != nil { - return nil, nil, err - } - defer req.Body.Close() - - ocspResBytes, err := ioutil.ReadAll(limitReader(req.Body, 1024*1024)) - ocspRes, err := ocsp.ParseResponse(ocspResBytes, issuerCert) - if err != nil { - return nil, nil, err - } - - if ocspRes.Certificate == nil { - err = ocspRes.CheckSignatureFrom(issuerCert) - if err != nil { - return nil, nil, err - } - } - - return ocspResBytes, ocspRes, nil -} - -func getKeyAuthorization(token string, key interface{}) (string, error) { - var publicKey crypto.PublicKey - switch k := key.(type) { - case *ecdsa.PrivateKey: - publicKey = k.Public() - case *rsa.PrivateKey: - publicKey = k.Public() - } - - // Generate the Key Authorization for the challenge - jwk := keyAsJWK(publicKey) - if jwk == nil { - return "", errors.New("Could not generate JWK from key.") - } - thumbBytes, err := jwk.Thumbprint(crypto.SHA256) - if err != nil { - return "", err - } - - // unpad the base64URL - keyThumb := base64.URLEncoding.EncodeToString(thumbBytes) - index := strings.Index(keyThumb, "=") - if index != -1 { - keyThumb = keyThumb[:index] - } - - return token + "." + keyThumb, nil -} - -// parsePEMBundle parses a certificate bundle from top to bottom and returns -// a slice of x509 certificates. This function will error if no certificates are found. -func parsePEMBundle(bundle []byte) ([]*x509.Certificate, error) { - var certificates []*x509.Certificate - var certDERBlock *pem.Block - - for { - certDERBlock, bundle = pem.Decode(bundle) - if certDERBlock == nil { - break - } - - if certDERBlock.Type == "CERTIFICATE" { - cert, err := x509.ParseCertificate(certDERBlock.Bytes) - if err != nil { - return nil, err - } - certificates = append(certificates, cert) - } - } - - if len(certificates) == 0 { - return nil, errors.New("No certificates were found while parsing the bundle.") - } - - return certificates, nil -} - -func parsePEMPrivateKey(key []byte) (crypto.PrivateKey, error) { - keyBlock, _ := pem.Decode(key) - - switch keyBlock.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(keyBlock.Bytes) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(keyBlock.Bytes) - default: - return nil, errors.New("Unknown PEM header value") - } -} - -func generatePrivateKey(keyType KeyType) (crypto.PrivateKey, error) { - - switch keyType { - case EC256: - return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - case EC384: - return ecdsa.GenerateKey(elliptic.P384(), rand.Reader) - case RSA2048: - return rsa.GenerateKey(rand.Reader, 2048) - case RSA4096: - return rsa.GenerateKey(rand.Reader, 4096) - case RSA8192: - return rsa.GenerateKey(rand.Reader, 8192) - } - - return nil, fmt.Errorf("Invalid KeyType: %s", keyType) -} - -func generateCsr(privateKey crypto.PrivateKey, domain string, san []string) ([]byte, error) { - template := x509.CertificateRequest{ - Subject: pkix.Name{ - CommonName: domain, - }, - } - - if len(san) > 0 { - template.DNSNames = san - } - - return x509.CreateCertificateRequest(rand.Reader, &template, privateKey) -} - -func pemEncode(data interface{}) []byte { - var pemBlock *pem.Block - switch key := data.(type) { - case *ecdsa.PrivateKey: - keyBytes, _ := x509.MarshalECPrivateKey(key) - pemBlock = &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes} - case *rsa.PrivateKey: - pemBlock = &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)} - break - case derCertificateBytes: - pemBlock = &pem.Block{Type: "CERTIFICATE", Bytes: []byte(data.(derCertificateBytes))} - } - - return pem.EncodeToMemory(pemBlock) -} - -func pemDecode(data []byte) (*pem.Block, error) { - pemBlock, _ := pem.Decode(data) - if pemBlock == nil { - return nil, fmt.Errorf("Pem decode did not yield a valid block. Is the certificate in the right format?") - } - - return pemBlock, nil -} - -func pemDecodeTox509(pem []byte) (*x509.Certificate, error) { - pemBlock, err := pemDecode(pem) - if pemBlock == nil { - return nil, err - } - - return x509.ParseCertificate(pemBlock.Bytes) -} - -// GetPEMCertExpiration returns the "NotAfter" date of a PEM encoded certificate. -// The certificate has to be PEM encoded. Any other encodings like DER will fail. -func GetPEMCertExpiration(cert []byte) (time.Time, error) { - pemBlock, err := pemDecode(cert) - if pemBlock == nil { - return time.Time{}, err - } - - return getCertExpiration(pemBlock.Bytes) -} - -// getCertExpiration returns the "NotAfter" date of a DER encoded certificate. -func getCertExpiration(cert []byte) (time.Time, error) { - pCert, err := x509.ParseCertificate(cert) - if err != nil { - return time.Time{}, err - } - - return pCert.NotAfter, nil -} - -func generatePemCert(privKey *rsa.PrivateKey, domain string) ([]byte, error) { - derBytes, err := generateDerCert(privKey, time.Time{}, domain) - if err != nil { - return nil, err - } - - return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}), nil -} - -func generateDerCert(privKey *rsa.PrivateKey, expiration time.Time, domain string) ([]byte, error) { - serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) - if err != nil { - return nil, err - } - - if expiration.IsZero() { - expiration = time.Now().Add(365) - } - - template := x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{ - CommonName: "ACME Challenge TEMP", - }, - NotBefore: time.Now(), - NotAfter: expiration, - - KeyUsage: x509.KeyUsageKeyEncipherment, - BasicConstraintsValid: true, - DNSNames: []string{domain}, - } - - return x509.CreateCertificate(rand.Reader, &template, &template, &privKey.PublicKey, privKey) -} - -func limitReader(rd io.ReadCloser, numBytes int64) io.ReadCloser { - return http.MaxBytesReader(nil, rd, numBytes) -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto_test.go deleted file mode 100644 index d2fc5088..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package acme - -import ( - "bytes" - "crypto/rand" - "crypto/rsa" - "testing" - "time" -) - -func TestGeneratePrivateKey(t *testing.T) { - key, err := generatePrivateKey(RSA2048) - if err != nil { - t.Error("Error generating private key:", err) - } - if key == nil { - t.Error("Expected key to not be nil, but it was") - } -} - -func TestGenerateCSR(t *testing.T) { - key, err := rsa.GenerateKey(rand.Reader, 512) - if err != nil { - t.Fatal("Error generating private key:", err) - } - - csr, err := generateCsr(key, "fizz.buzz", nil) - if err != nil { - t.Error("Error generating CSR:", err) - } - if csr == nil || len(csr) == 0 { - t.Error("Expected CSR with data, but it was nil or length 0") - } -} - -func TestPEMEncode(t *testing.T) { - buf := bytes.NewBufferString("TestingRSAIsSoMuchFun") - - reader := MockRandReader{b: buf} - key, err := rsa.GenerateKey(reader, 32) - if err != nil { - t.Fatal("Error generating private key:", err) - } - - data := pemEncode(key) - - if data == nil { - t.Fatal("Expected result to not be nil, but it was") - } - if len(data) != 127 { - t.Errorf("Expected PEM encoding to be length 127, but it was %d", len(data)) - } -} - -func TestPEMCertExpiration(t *testing.T) { - privKey, err := generatePrivateKey(RSA2048) - if err != nil { - t.Fatal("Error generating private key:", err) - } - - expiration := time.Now().Add(365) - expiration = expiration.Round(time.Second) - certBytes, err := generateDerCert(privKey.(*rsa.PrivateKey), expiration, "test.com") - if err != nil { - t.Fatal("Error generating cert:", err) - } - - buf := bytes.NewBufferString("TestingRSAIsSoMuchFun") - - // Some random string should return an error. - if ctime, err := GetPEMCertExpiration(buf.Bytes()); err == nil { - t.Errorf("Expected getCertExpiration to return an error for garbage string but returned %v", ctime) - } - - // A DER encoded certificate should return an error. - if _, err := GetPEMCertExpiration(certBytes); err == nil { - t.Errorf("Expected getCertExpiration to return an error for DER certificates but returned none.") - } - - // A PEM encoded certificate should work ok. - pemCert := pemEncode(derCertificateBytes(certBytes)) - if ctime, err := GetPEMCertExpiration(pemCert); err != nil || !ctime.Equal(expiration.UTC()) { - t.Errorf("Expected getCertExpiration to return %v but returned %v. Error: %v", expiration, ctime, err) - } -} - -type MockRandReader struct { - b *bytes.Buffer -} - -func (r MockRandReader) Read(p []byte) (int, error) { - return r.b.Read(p) -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/error.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/error.go deleted file mode 100644 index b32561a3..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/error.go +++ /dev/null @@ -1,73 +0,0 @@ -package acme - -import ( - "encoding/json" - "fmt" - "net/http" - "strings" -) - -const ( - tosAgreementError = "Must agree to subscriber agreement before any further actions" -) - -// RemoteError is the base type for all errors specific to the ACME protocol. -type RemoteError struct { - StatusCode int `json:"status,omitempty"` - Type string `json:"type"` - Detail string `json:"detail"` -} - -func (e RemoteError) Error() string { - return fmt.Sprintf("acme: Error %d - %s - %s", e.StatusCode, e.Type, e.Detail) -} - -// TOSError represents the error which is returned if the user needs to -// accept the TOS. -// TODO: include the new TOS url if we can somehow obtain it. -type TOSError struct { - RemoteError -} - -type domainError struct { - Domain string - Error error -} - -type challengeError struct { - RemoteError - records []validationRecord -} - -func (c challengeError) Error() string { - - var errStr string - for _, validation := range c.records { - errStr = errStr + fmt.Sprintf("\tValidation for %s:%s\n\tResolved to:\n\t\t%s\n\tUsed: %s\n\n", - validation.Hostname, validation.Port, strings.Join(validation.ResolvedAddresses, "\n\t\t"), validation.UsedAddress) - } - - return fmt.Sprintf("%s\nError Detail:\n%s", c.RemoteError.Error(), errStr) -} - -func handleHTTPError(resp *http.Response) error { - var errorDetail RemoteError - decoder := json.NewDecoder(resp.Body) - err := decoder.Decode(&errorDetail) - if err != nil { - return err - } - - errorDetail.StatusCode = resp.StatusCode - - // Check for errors we handle specifically - if errorDetail.StatusCode == http.StatusForbidden && errorDetail.Detail == tosAgreementError { - return TOSError{errorDetail} - } - - return errorDetail -} - -func handleChallengeError(chlng challenge) error { - return challengeError{chlng.Error, chlng.ValidationRecords} -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http.go deleted file mode 100644 index 410aead6..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http.go +++ /dev/null @@ -1,117 +0,0 @@ -package acme - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "runtime" - "strings" - "time" -) - -// UserAgent (if non-empty) will be tacked onto the User-Agent string in requests. -var UserAgent string - -// defaultClient is an HTTP client with a reasonable timeout value. -var defaultClient = http.Client{Timeout: 10 * time.Second} - -const ( - // defaultGoUserAgent is the Go HTTP package user agent string. Too - // bad it isn't exported. If it changes, we should update it here, too. - defaultGoUserAgent = "Go-http-client/1.1" - - // ourUserAgent is the User-Agent of this underlying library package. - ourUserAgent = "xenolf-acme" -) - -// httpHead performs a HEAD request with a proper User-Agent string. -// The response body (resp.Body) is already closed when this function returns. -func httpHead(url string) (resp *http.Response, err error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - - req.Header.Set("User-Agent", userAgent()) - - resp, err = defaultClient.Do(req) - if err != nil { - return resp, err - } - resp.Body.Close() - return resp, err -} - -// httpPost performs a POST request with a proper User-Agent string. -// Callers should close resp.Body when done reading from it. -func httpPost(url string, bodyType string, body io.Reader) (resp *http.Response, err error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - req.Header.Set("User-Agent", userAgent()) - - return defaultClient.Do(req) -} - -// httpGet performs a GET request with a proper User-Agent string. -// Callers should close resp.Body when done reading from it. -func httpGet(url string) (resp *http.Response, err error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - req.Header.Set("User-Agent", userAgent()) - - return defaultClient.Do(req) -} - -// getJSON performs an HTTP GET request and parses the response body -// as JSON, into the provided respBody object. -func getJSON(uri string, respBody interface{}) (http.Header, error) { - resp, err := httpGet(uri) - if err != nil { - return nil, fmt.Errorf("failed to get %q: %v", uri, err) - } - defer resp.Body.Close() - - if resp.StatusCode >= http.StatusBadRequest { - return resp.Header, handleHTTPError(resp) - } - - return resp.Header, json.NewDecoder(resp.Body).Decode(respBody) -} - -// postJSON performs an HTTP POST request and parses the response body -// as JSON, into the provided respBody object. -func postJSON(j *jws, uri string, reqBody, respBody interface{}) (http.Header, error) { - jsonBytes, err := json.Marshal(reqBody) - if err != nil { - return nil, errors.New("Failed to marshal network message...") - } - - resp, err := j.post(uri, jsonBytes) - if err != nil { - return nil, fmt.Errorf("Failed to post JWS message. -> %v", err) - } - defer resp.Body.Close() - - if resp.StatusCode >= http.StatusBadRequest { - return resp.Header, handleHTTPError(resp) - } - - if respBody == nil { - return resp.Header, nil - } - - return resp.Header, json.NewDecoder(resp.Body).Decode(respBody) -} - -// userAgent builds and returns the User-Agent string to use in requests. -func userAgent() string { - ua := fmt.Sprintf("%s (%s; %s) %s %s", defaultGoUserAgent, runtime.GOOS, runtime.GOARCH, ourUserAgent, UserAgent) - return strings.TrimSpace(ua) -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge.go deleted file mode 100644 index 95cb1fd8..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge.go +++ /dev/null @@ -1,41 +0,0 @@ -package acme - -import ( - "fmt" - "log" -) - -type httpChallenge struct { - jws *jws - validate validateFunc - provider ChallengeProvider -} - -// HTTP01ChallengePath returns the URL path for the `http-01` challenge -func HTTP01ChallengePath(token string) string { - return "/.well-known/acme-challenge/" + token -} - -func (s *httpChallenge) Solve(chlng challenge, domain string) error { - - logf("[INFO][%s] acme: Trying to solve HTTP-01", domain) - - // Generate the Key Authorization for the challenge - keyAuth, err := getKeyAuthorization(chlng.Token, s.jws.privKey) - if err != nil { - return err - } - - err = s.provider.Present(domain, chlng.Token, keyAuth) - if err != nil { - return fmt.Errorf("[%s] error presenting token: %v", domain, err) - } - defer func() { - err := s.provider.CleanUp(domain, chlng.Token, keyAuth) - if err != nil { - log.Printf("[%s] error cleaning up: %v", domain, err) - } - }() - - return s.validate(s.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth}) -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_server.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_server.go deleted file mode 100644 index 42541380..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_server.go +++ /dev/null @@ -1,79 +0,0 @@ -package acme - -import ( - "fmt" - "net" - "net/http" - "strings" -) - -// HTTPProviderServer implements ChallengeProvider for `http-01` challenge -// It may be instantiated without using the NewHTTPProviderServer function if -// you want only to use the default values. -type HTTPProviderServer struct { - iface string - port string - done chan bool - listener net.Listener -} - -// NewHTTPProviderServer creates a new HTTPProviderServer on the selected interface and port. -// Setting iface and / or port to an empty string will make the server fall back to -// the "any" interface and port 80 respectively. -func NewHTTPProviderServer(iface, port string) *HTTPProviderServer { - return &HTTPProviderServer{iface: iface, port: port} -} - -// Present starts a web server and makes the token available at `HTTP01ChallengePath(token)` for web requests. -func (s *HTTPProviderServer) Present(domain, token, keyAuth string) error { - if s.port == "" { - s.port = "80" - } - - var err error - s.listener, err = net.Listen("tcp", net.JoinHostPort(s.iface, s.port)) - if err != nil { - return fmt.Errorf("Could not start HTTP server for challenge -> %v", err) - } - - s.done = make(chan bool) - go s.serve(domain, token, keyAuth) - return nil -} - -// CleanUp closes the HTTP server and removes the token from `HTTP01ChallengePath(token)` -func (s *HTTPProviderServer) CleanUp(domain, token, keyAuth string) error { - if s.listener == nil { - return nil - } - s.listener.Close() - <-s.done - return nil -} - -func (s *HTTPProviderServer) serve(domain, token, keyAuth string) { - path := HTTP01ChallengePath(token) - - // The handler validates the HOST header and request type. - // For validation it then writes the token the server returned with the challenge - mux := http.NewServeMux() - mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) { - if strings.HasPrefix(r.Host, domain) && r.Method == "GET" { - w.Header().Add("Content-Type", "text/plain") - w.Write([]byte(keyAuth)) - logf("[INFO][%s] Served key authentication", domain) - } else { - logf("[INFO] Received request for domain %s with method %s", r.Host, r.Method) - w.Write([]byte("TEST")) - } - }) - - httpServer := &http.Server{ - Handler: mux, - } - // Once httpServer is shut down we don't want any lingering - // connections, so disable KeepAlives. - httpServer.SetKeepAlivesEnabled(false) - httpServer.Serve(s.listener) - s.done <- true -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_test.go deleted file mode 100644 index fdd8f4d2..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package acme - -import ( - "crypto/rand" - "crypto/rsa" - "io/ioutil" - "strings" - "testing" -) - -func TestHTTPChallenge(t *testing.T) { - privKey, _ := rsa.GenerateKey(rand.Reader, 512) - j := &jws{privKey: privKey} - clientChallenge := challenge{Type: HTTP01, Token: "http1"} - mockValidate := func(_ *jws, _, _ string, chlng challenge) error { - uri := "http://localhost:23457/.well-known/acme-challenge/" + chlng.Token - resp, err := httpGet(uri) - if err != nil { - return err - } - defer resp.Body.Close() - - if want := "text/plain"; resp.Header.Get("Content-Type") != want { - t.Errorf("Get(%q) Content-Type: got %q, want %q", uri, resp.Header.Get("Content-Type"), want) - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - bodyStr := string(body) - - if bodyStr != chlng.KeyAuthorization { - t.Errorf("Get(%q) Body: got %q, want %q", uri, bodyStr, chlng.KeyAuthorization) - } - - return nil - } - solver := &httpChallenge{jws: j, validate: mockValidate, provider: &HTTPProviderServer{port: "23457"}} - - if err := solver.Solve(clientChallenge, "localhost:23457"); err != nil { - t.Errorf("Solve error: got %v, want nil", err) - } -} - -func TestHTTPChallengeInvalidPort(t *testing.T) { - privKey, _ := rsa.GenerateKey(rand.Reader, 128) - j := &jws{privKey: privKey} - clientChallenge := challenge{Type: HTTP01, Token: "http2"} - solver := &httpChallenge{jws: j, validate: stubValidate, provider: &HTTPProviderServer{port: "123456"}} - - if err := solver.Solve(clientChallenge, "localhost:123456"); err == nil { - t.Errorf("Solve error: got %v, want error", err) - } else if want := "invalid port 123456"; !strings.HasSuffix(err.Error(), want) { - t.Errorf("Solve error: got %q, want suffix %q", err.Error(), want) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_test.go deleted file mode 100644 index 33a48a33..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package acme - -import ( - "net/http" - "net/http/httptest" - "strings" - "testing" -) - -func TestHTTPHeadUserAgent(t *testing.T) { - var ua, method string - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ua = r.Header.Get("User-Agent") - method = r.Method - })) - defer ts.Close() - - _, err := httpHead(ts.URL) - if err != nil { - t.Fatal(err) - } - - if method != "HEAD" { - t.Errorf("Expected method to be HEAD, got %s", method) - } - if !strings.Contains(ua, ourUserAgent) { - t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua) - } -} - -func TestHTTPGetUserAgent(t *testing.T) { - var ua, method string - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ua = r.Header.Get("User-Agent") - method = r.Method - })) - defer ts.Close() - - res, err := httpGet(ts.URL) - if err != nil { - t.Fatal(err) - } - res.Body.Close() - - if method != "GET" { - t.Errorf("Expected method to be GET, got %s", method) - } - if !strings.Contains(ua, ourUserAgent) { - t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua) - } -} - -func TestHTTPPostUserAgent(t *testing.T) { - var ua, method string - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ua = r.Header.Get("User-Agent") - method = r.Method - })) - defer ts.Close() - - res, err := httpPost(ts.URL, "text/plain", strings.NewReader("falalalala")) - if err != nil { - t.Fatal(err) - } - res.Body.Close() - - if method != "POST" { - t.Errorf("Expected method to be POST, got %s", method) - } - if !strings.Contains(ua, ourUserAgent) { - t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua) - } -} - -func TestUserAgent(t *testing.T) { - ua := userAgent() - - if !strings.Contains(ua, defaultGoUserAgent) { - t.Errorf("Expected UA to contain %s, got '%s'", defaultGoUserAgent, ua) - } - if !strings.Contains(ua, ourUserAgent) { - t.Errorf("Expected UA to contain %s, got '%s'", ourUserAgent, ua) - } - if strings.HasSuffix(ua, " ") { - t.Errorf("UA should not have trailing spaces; got '%s'", ua) - } - - // customize the UA by appending a value - UserAgent = "MyApp/1.2.3" - ua = userAgent() - if !strings.Contains(ua, defaultGoUserAgent) { - t.Errorf("Expected UA to contain %s, got '%s'", defaultGoUserAgent, ua) - } - if !strings.Contains(ua, ourUserAgent) { - t.Errorf("Expected UA to contain %s, got '%s'", ourUserAgent, ua) - } - if !strings.Contains(ua, UserAgent) { - t.Errorf("Expected custom UA to contain %s, got '%s'", UserAgent, ua) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/jws.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/jws.go deleted file mode 100644 index 8435d0cf..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/jws.go +++ /dev/null @@ -1,107 +0,0 @@ -package acme - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "fmt" - "net/http" - - "gopkg.in/square/go-jose.v1" -) - -type jws struct { - directoryURL string - privKey crypto.PrivateKey - nonces []string -} - -func keyAsJWK(key interface{}) *jose.JsonWebKey { - switch k := key.(type) { - case *ecdsa.PublicKey: - return &jose.JsonWebKey{Key: k, Algorithm: "EC"} - case *rsa.PublicKey: - return &jose.JsonWebKey{Key: k, Algorithm: "RSA"} - - default: - return nil - } -} - -// Posts a JWS signed message to the specified URL -func (j *jws) post(url string, content []byte) (*http.Response, error) { - signedContent, err := j.signContent(content) - if err != nil { - return nil, err - } - - resp, err := httpPost(url, "application/jose+json", bytes.NewBuffer([]byte(signedContent.FullSerialize()))) - if err != nil { - return nil, err - } - - j.getNonceFromResponse(resp) - - return resp, err -} - -func (j *jws) signContent(content []byte) (*jose.JsonWebSignature, error) { - - var alg jose.SignatureAlgorithm - switch k := j.privKey.(type) { - case *rsa.PrivateKey: - alg = jose.RS256 - case *ecdsa.PrivateKey: - if k.Curve == elliptic.P256() { - alg = jose.ES256 - } else if k.Curve == elliptic.P384() { - alg = jose.ES384 - } - } - - signer, err := jose.NewSigner(alg, j.privKey) - if err != nil { - return nil, err - } - signer.SetNonceSource(j) - - signed, err := signer.Sign(content) - if err != nil { - return nil, err - } - return signed, nil -} - -func (j *jws) getNonceFromResponse(resp *http.Response) error { - nonce := resp.Header.Get("Replay-Nonce") - if nonce == "" { - return fmt.Errorf("Server did not respond with a proper nonce header.") - } - - j.nonces = append(j.nonces, nonce) - return nil -} - -func (j *jws) getNonce() error { - resp, err := httpHead(j.directoryURL) - if err != nil { - return err - } - - return j.getNonceFromResponse(resp) -} - -func (j *jws) Nonce() (string, error) { - nonce := "" - if len(j.nonces) == 0 { - err := j.getNonce() - if err != nil { - return nonce, err - } - } - - nonce, j.nonces = j.nonces[len(j.nonces)-1], j.nonces[:len(j.nonces)-1] - return nonce, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/messages.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/messages.go deleted file mode 100644 index d1fac920..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/messages.go +++ /dev/null @@ -1,115 +0,0 @@ -package acme - -import ( - "time" - - "gopkg.in/square/go-jose.v1" -) - -type directory struct { - NewAuthzURL string `json:"new-authz"` - NewCertURL string `json:"new-cert"` - NewRegURL string `json:"new-reg"` - RevokeCertURL string `json:"revoke-cert"` -} - -type recoveryKeyMessage struct { - Length int `json:"length,omitempty"` - Client jose.JsonWebKey `json:"client,omitempty"` - Server jose.JsonWebKey `json:"client,omitempty"` -} - -type registrationMessage struct { - Resource string `json:"resource"` - Contact []string `json:"contact"` - // RecoveryKey recoveryKeyMessage `json:"recoveryKey,omitempty"` -} - -// Registration is returned by the ACME server after the registration -// The client implementation should save this registration somewhere. -type Registration struct { - Resource string `json:"resource,omitempty"` - ID int `json:"id"` - Key jose.JsonWebKey `json:"key"` - Contact []string `json:"contact"` - Agreement string `json:"agreement,omitempty"` - Authorizations string `json:"authorizations,omitempty"` - Certificates string `json:"certificates,omitempty"` - // RecoveryKey recoveryKeyMessage `json:"recoveryKey,omitempty"` -} - -// RegistrationResource represents all important informations about a registration -// of which the client needs to keep track itself. -type RegistrationResource struct { - Body Registration `json:"body,omitempty"` - URI string `json:"uri,omitempty"` - NewAuthzURL string `json:"new_authzr_uri,omitempty"` - TosURL string `json:"terms_of_service,omitempty"` -} - -type authorizationResource struct { - Body authorization - Domain string - NewCertURL string - AuthURL string -} - -type authorization struct { - Resource string `json:"resource,omitempty"` - Identifier identifier `json:"identifier"` - Status string `json:"status,omitempty"` - Expires time.Time `json:"expires,omitempty"` - Challenges []challenge `json:"challenges,omitempty"` - Combinations [][]int `json:"combinations,omitempty"` -} - -type identifier struct { - Type string `json:"type"` - Value string `json:"value"` -} - -type validationRecord struct { - URI string `json:"url,omitempty"` - Hostname string `json:"hostname,omitempty"` - Port string `json:"port,omitempty"` - ResolvedAddresses []string `json:"addressesResolved,omitempty"` - UsedAddress string `json:"addressUsed,omitempty"` -} - -type challenge struct { - Resource string `json:"resource,omitempty"` - Type Challenge `json:"type,omitempty"` - Status string `json:"status,omitempty"` - URI string `json:"uri,omitempty"` - Token string `json:"token,omitempty"` - KeyAuthorization string `json:"keyAuthorization,omitempty"` - TLS bool `json:"tls,omitempty"` - Iterations int `json:"n,omitempty"` - Error RemoteError `json:"error,omitempty"` - ValidationRecords []validationRecord `json:"validationRecord,omitempty"` -} - -type csrMessage struct { - Resource string `json:"resource,omitempty"` - Csr string `json:"csr"` - Authorizations []string `json:"authorizations"` -} - -type revokeCertMessage struct { - Resource string `json:"resource"` - Certificate string `json:"certificate"` -} - -// CertificateResource represents a CA issued certificate. -// PrivateKey and Certificate are both already PEM encoded -// and can be directly written to disk. Certificate may -// be a certificate bundle, depending on the options supplied -// to create it. -type CertificateResource struct { - Domain string `json:"domain"` - CertURL string `json:"certUrl"` - CertStableURL string `json:"certStableUrl"` - AccountRef string `json:"accountRef,omitempty"` - PrivateKey []byte `json:"-"` - Certificate []byte `json:"-"` -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/provider.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/provider.go deleted file mode 100644 index d177ff07..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/provider.go +++ /dev/null @@ -1,28 +0,0 @@ -package acme - -import "time" - -// ChallengeProvider enables implementing a custom challenge -// provider. Present presents the solution to a challenge available to -// be solved. CleanUp will be called by the challenge if Present ends -// in a non-error state. -type ChallengeProvider interface { - Present(domain, token, keyAuth string) error - CleanUp(domain, token, keyAuth string) error -} - -// ChallengeProviderTimeout allows for implementing a -// ChallengeProvider where an unusually long timeout is required when -// waiting for an ACME challenge to be satisfied, such as when -// checking for DNS record progagation. If an implementor of a -// ChallengeProvider provides a Timeout method, then the return values -// of the Timeout method will be used when appropriate by the acme -// package. The interval value is the time between checks. -// -// The default values used for timeout and interval are 60 seconds and -// 2 seconds respectively. These are used when no Timeout method is -// defined for the ChallengeProvider. -type ChallengeProviderTimeout interface { - ChallengeProvider - Timeout() (timeout, interval time.Duration) -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go deleted file mode 100644 index f184b17a..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go +++ /dev/null @@ -1,73 +0,0 @@ -package acme - -import ( - "crypto/rsa" - "crypto/sha256" - "crypto/tls" - "encoding/hex" - "fmt" - "log" -) - -type tlsSNIChallenge struct { - jws *jws - validate validateFunc - provider ChallengeProvider -} - -func (t *tlsSNIChallenge) Solve(chlng challenge, domain string) error { - // FIXME: https://github.com/ietf-wg-acme/acme/pull/22 - // Currently we implement this challenge to track boulder, not the current spec! - - logf("[INFO][%s] acme: Trying to solve TLS-SNI-01", domain) - - // Generate the Key Authorization for the challenge - keyAuth, err := getKeyAuthorization(chlng.Token, t.jws.privKey) - if err != nil { - return err - } - - err = t.provider.Present(domain, chlng.Token, keyAuth) - if err != nil { - return fmt.Errorf("[%s] error presenting token: %v", domain, err) - } - defer func() { - err := t.provider.CleanUp(domain, chlng.Token, keyAuth) - if err != nil { - log.Printf("[%s] error cleaning up: %v", domain, err) - } - }() - return t.validate(t.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth}) -} - -// TLSSNI01ChallengeCert returns a certificate and target domain for the `tls-sni-01` challenge -func TLSSNI01ChallengeCertDomain(keyAuth string) (tls.Certificate, string, error) { - // generate a new RSA key for the certificates - tempPrivKey, err := generatePrivateKey(RSA2048) - if err != nil { - return tls.Certificate{}, "", err - } - rsaPrivKey := tempPrivKey.(*rsa.PrivateKey) - rsaPrivPEM := pemEncode(rsaPrivKey) - - zBytes := sha256.Sum256([]byte(keyAuth)) - z := hex.EncodeToString(zBytes[:sha256.Size]) - domain := fmt.Sprintf("%s.%s.acme.invalid", z[:32], z[32:]) - tempCertPEM, err := generatePemCert(rsaPrivKey, domain) - if err != nil { - return tls.Certificate{}, "", err - } - - certificate, err := tls.X509KeyPair(tempCertPEM, rsaPrivPEM) - if err != nil { - return tls.Certificate{}, "", err - } - - return certificate, domain, nil -} - -// TLSSNI01ChallengeCert returns a certificate for the `tls-sni-01` challenge -func TLSSNI01ChallengeCert(keyAuth string) (tls.Certificate, error) { - cert, _, err := TLSSNI01ChallengeCertDomain(keyAuth) - return cert, err -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go deleted file mode 100644 index faaf16f6..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go +++ /dev/null @@ -1,62 +0,0 @@ -package acme - -import ( - "crypto/tls" - "fmt" - "net" - "net/http" -) - -// TLSProviderServer implements ChallengeProvider for `TLS-SNI-01` challenge -// It may be instantiated without using the NewTLSProviderServer function if -// you want only to use the default values. -type TLSProviderServer struct { - iface string - port string - done chan bool - listener net.Listener -} - -// NewTLSProviderServer creates a new TLSProviderServer on the selected interface and port. -// Setting iface and / or port to an empty string will make the server fall back to -// the "any" interface and port 443 respectively. -func NewTLSProviderServer(iface, port string) *TLSProviderServer { - return &TLSProviderServer{iface: iface, port: port} -} - -// Present makes the keyAuth available as a cert -func (s *TLSProviderServer) Present(domain, token, keyAuth string) error { - if s.port == "" { - s.port = "443" - } - - cert, err := TLSSNI01ChallengeCert(keyAuth) - if err != nil { - return err - } - - tlsConf := new(tls.Config) - tlsConf.Certificates = []tls.Certificate{cert} - - s.listener, err = tls.Listen("tcp", net.JoinHostPort(s.iface, s.port), tlsConf) - if err != nil { - return fmt.Errorf("Could not start HTTPS server for challenge -> %v", err) - } - - s.done = make(chan bool) - go func() { - http.Serve(s.listener, nil) - s.done <- true - }() - return nil -} - -// CleanUp closes the HTTP server. -func (s *TLSProviderServer) CleanUp(domain, token, keyAuth string) error { - if s.listener == nil { - return nil - } - s.listener.Close() - <-s.done - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go deleted file mode 100644 index 3aec7456..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package acme - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "crypto/tls" - "encoding/hex" - "fmt" - "strings" - "testing" -) - -func TestTLSSNIChallenge(t *testing.T) { - privKey, _ := rsa.GenerateKey(rand.Reader, 512) - j := &jws{privKey: privKey} - clientChallenge := challenge{Type: TLSSNI01, Token: "tlssni1"} - mockValidate := func(_ *jws, _, _ string, chlng challenge) error { - conn, err := tls.Dial("tcp", "localhost:23457", &tls.Config{ - InsecureSkipVerify: true, - }) - if err != nil { - t.Errorf("Expected to connect to challenge server without an error. %s", err.Error()) - } - - // Expect the server to only return one certificate - connState := conn.ConnectionState() - if count := len(connState.PeerCertificates); count != 1 { - t.Errorf("Expected the challenge server to return exactly one certificate but got %d", count) - } - - remoteCert := connState.PeerCertificates[0] - if count := len(remoteCert.DNSNames); count != 1 { - t.Errorf("Expected the challenge certificate to have exactly one DNSNames entry but had %d", count) - } - - zBytes := sha256.Sum256([]byte(chlng.KeyAuthorization)) - z := hex.EncodeToString(zBytes[:sha256.Size]) - domain := fmt.Sprintf("%s.%s.acme.invalid", z[:32], z[32:]) - - if remoteCert.DNSNames[0] != domain { - t.Errorf("Expected the challenge certificate DNSName to match %s but was %s", domain, remoteCert.DNSNames[0]) - } - - return nil - } - solver := &tlsSNIChallenge{jws: j, validate: mockValidate, provider: &TLSProviderServer{port: "23457"}} - - if err := solver.Solve(clientChallenge, "localhost:23457"); err != nil { - t.Errorf("Solve error: got %v, want nil", err) - } -} - -func TestTLSSNIChallengeInvalidPort(t *testing.T) { - privKey, _ := rsa.GenerateKey(rand.Reader, 128) - j := &jws{privKey: privKey} - clientChallenge := challenge{Type: TLSSNI01, Token: "tlssni2"} - solver := &tlsSNIChallenge{jws: j, validate: stubValidate, provider: &TLSProviderServer{port: "123456"}} - - if err := solver.Solve(clientChallenge, "localhost:123456"); err == nil { - t.Errorf("Solve error: got %v, want error", err) - } else if want := "invalid port 123456"; !strings.HasSuffix(err.Error(), want) { - t.Errorf("Solve error: got %q, want suffix %q", err.Error(), want) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils.go deleted file mode 100644 index 2fa0db30..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils.go +++ /dev/null @@ -1,29 +0,0 @@ -package acme - -import ( - "fmt" - "time" -) - -// WaitFor polls the given function 'f', once every 'interval', up to 'timeout'. -func WaitFor(timeout, interval time.Duration, f func() (bool, error)) error { - var lastErr string - timeup := time.After(timeout) - for { - select { - case <-timeup: - return fmt.Errorf("Time limit exceeded. Last error: %s", lastErr) - default: - } - - stop, err := f() - if stop { - return nil - } - if err != nil { - lastErr = err.Error() - } - - time.Sleep(interval) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils_test.go deleted file mode 100644 index 158af411..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package acme - -import ( - "testing" - "time" -) - -func TestWaitForTimeout(t *testing.T) { - c := make(chan error) - go func() { - err := WaitFor(3*time.Second, 1*time.Second, func() (bool, error) { - return false, nil - }) - c <- err - }() - - timeout := time.After(4 * time.Second) - select { - case <-timeout: - t.Fatal("timeout exceeded") - case err := <-c: - if err == nil { - t.Errorf("expected timeout error; got %v", err) - } - } -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md deleted file mode 100644 index 97e61dbb..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md +++ /dev/null @@ -1,10 +0,0 @@ -Serious about security -====================== - -Square recognizes the important contributions the security research community -can make. We therefore encourage reporting security issues with the code -contained in this repository. - -If you believe you have discovered a security vulnerability, please follow the -guidelines at . - diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md deleted file mode 100644 index 61b18365..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md +++ /dev/null @@ -1,14 +0,0 @@ -# Contributing - -If you would like to contribute code to go-jose you can do so through GitHub by -forking the repository and sending a pull request. - -When submitting code, please make every effort to follow existing conventions -and style in order to keep the code as readable as possible. Please also make -sure all tests pass by running `go test`, and format your code with `go fmt`. -We also recommend using `golint` and `errcheck`. - -Before your code can be accepted into the project you must also sign the -[Individual Contributor License Agreement][1]. - - [1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1 diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/LICENSE b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/README.md b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/README.md deleted file mode 100644 index fd859da7..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/README.md +++ /dev/null @@ -1,209 +0,0 @@ -# Go JOSE - -[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1) [![license](http://img.shields.io/badge/license-apache_2.0-red.svg?style=flat)](https://raw.githubusercontent.com/square/go-jose/master/LICENSE) [![build](https://travis-ci.org/square/go-jose.svg?branch=master)](https://travis-ci.org/square/go-jose) [![coverage](https://coveralls.io/repos/github/square/go-jose/badge.svg?branch=master)](https://coveralls.io/r/square/go-jose) - -Package jose aims to provide an implementation of the Javascript Object Signing -and Encryption set of standards. For the moment, it mainly focuses on encryption -and signing based on the JSON Web Encryption and JSON Web Signature standards. - -**Disclaimer**: This library contains encryption software that is subject to -the U.S. Export Administration Regulations. You may not export, re-export, -transfer or download this code or any part of it in violation of any United -States law, directive or regulation. In particular this software may not be -exported or re-exported in any form or on any media to Iran, North Sudan, -Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any -US maintained blocked list. - -## Overview - -The implementation follows the -[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) -standard (RFC 7516) and -[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) -standard (RFC 7515). Tables of supported algorithms are shown below. -The library supports both the compact and full serialization formats, and has -optional support for multiple recipients. It also comes with a small -command-line utility -([`jose-util`](https://github.com/square/go-jose/tree/master/jose-util)) -for dealing with JOSE messages in a shell. - -**Note**: We use a forked version of the `encoding/json` package from the Go -standard library which uses case-sensitive matching for member names (instead -of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). -This is to avoid differences in interpretation of messages between go-jose and -libraries in other languages. If you do not like this behavior, you can use the -`std_json` build tag to disable it (though we do not recommend doing so). - -### Versions - -We use [gopkg.in](https://gopkg.in) for versioning. - -[Version 1](https://gopkg.in/square/go-jose.v1) is the current stable version: - - import "gopkg.in/square/go-jose.v1" - -The interface for [go-jose.v1](https://gopkg.in/square/go-jose.v1) will remain -backwards compatible. We're currently sketching out ideas for a new version, to -clean up the interface a bit. If you have ideas or feature requests [please let -us know](https://github.com/square/go-jose/issues/64)! - -### Supported algorithms - -See below for a table of supported algorithms. Algorithm identifiers match -the names in the -[JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) -standard where possible. The -[Godoc reference](https://godoc.org/github.com/square/go-jose#pkg-constants) -has a list of constants. - - Key encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSA-PKCS#1v1.5 | RSA1_5 - RSA-OAEP | RSA-OAEP, RSA-OAEP-256 - AES key wrap | A128KW, A192KW, A256KW - AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW - ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW - ECDH-ES (direct) | ECDH-ES1 - Direct encryption | dir1 - -1. Not supported in multi-recipient mode - - Signing / MAC | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 - RSASSA-PSS | PS256, PS384, PS512 - HMAC | HS256, HS384, HS512 - ECDSA | ES256, ES384, ES512 - - Content encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 - AES-GCM | A128GCM, A192GCM, A256GCM - - Compression | Algorithm identifiers(s) - :------------------------- | ------------------------------- - DEFLATE (RFC 1951) | DEF - -### Supported key types - -See below for a table of supported key types. These are understood by the -library, and can be passed to corresponding functions such as `NewEncrypter` or -`NewSigner`. Note that if you are creating a new encrypter or signer with a -JsonWebKey, the key id of the JsonWebKey (if present) will be added to any -resulting messages. - - Algorithm(s) | Corresponding types - :------------------------- | ------------------------------- - RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey), *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) - ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey), *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) - AES, HMAC | []byte, *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) - -## Examples - -Encryption/decryption example using RSA: - -```Go -// Generate a public/private key pair to use for this example. The library -// also provides two utility functions (LoadPublicKey and LoadPrivateKey) -// that can be used to load keys from PEM/DER-encoded data. -privateKey, err := rsa.GenerateKey(rand.Reader, 2048) -if err != nil { - panic(err) -} - -// Instantiate an encrypter using RSA-OAEP with AES128-GCM. An error would -// indicate that the selected algorithm(s) are not currently supported. -publicKey := &privateKey.PublicKey -encrypter, err := NewEncrypter(RSA_OAEP, A128GCM, publicKey) -if err != nil { - panic(err) -} - -// Encrypt a sample plaintext. Calling the encrypter returns an encrypted -// JWE object, which can then be serialized for output afterwards. An error -// would indicate a problem in an underlying cryptographic primitive. -var plaintext = []byte("Lorem ipsum dolor sit amet") -object, err := encrypter.Encrypt(plaintext) -if err != nil { - panic(err) -} - -// Serialize the encrypted object using the full serialization format. -// Alternatively you can also use the compact format here by calling -// object.CompactSerialize() instead. -serialized := object.FullSerialize() - -// Parse the serialized, encrypted JWE object. An error would indicate that -// the given input did not represent a valid message. -object, err = ParseEncrypted(serialized) -if err != nil { - panic(err) -} - -// Now we can decrypt and get back our original plaintext. An error here -// would indicate the the message failed to decrypt, e.g. because the auth -// tag was broken or the message was tampered with. -decrypted, err := object.Decrypt(privateKey) -if err != nil { - panic(err) -} - -fmt.Printf(string(decrypted)) -// output: Lorem ipsum dolor sit amet -``` - -Signing/verification example using RSA: - -```Go -// Generate a public/private key pair to use for this example. The library -// also provides two utility functions (LoadPublicKey and LoadPrivateKey) -// that can be used to load keys from PEM/DER-encoded data. -privateKey, err := rsa.GenerateKey(rand.Reader, 2048) -if err != nil { - panic(err) -} - -// Instantiate a signer using RSASSA-PSS (SHA512) with the given private key. -signer, err := NewSigner(PS512, privateKey) -if err != nil { - panic(err) -} - -// Sign a sample payload. Calling the signer returns a protected JWS object, -// which can then be serialized for output afterwards. An error would -// indicate a problem in an underlying cryptographic primitive. -var payload = []byte("Lorem ipsum dolor sit amet") -object, err := signer.Sign(payload) -if err != nil { - panic(err) -} - -// Serialize the encrypted object using the full serialization format. -// Alternatively you can also use the compact format here by calling -// object.CompactSerialize() instead. -serialized := object.FullSerialize() - -// Parse the serialized, protected JWS object. An error would indicate that -// the given input did not represent a valid message. -object, err = ParseSigned(serialized) -if err != nil { - panic(err) -} - -// Now we can verify the signature on the payload. An error here would -// indicate the the message failed to verify, e.g. because the signature was -// broken or the message was tampered with. -output, err := object.Verify(&privateKey.PublicKey) -if err != nil { - panic(err) -} - -fmt.Printf(string(output)) -// output: Lorem ipsum dolor sit amet -``` - -More examples can be found in the [Godoc -reference](https://godoc.org/github.com/square/go-jose) for this package. The -[`jose-util`](https://github.com/square/go-jose/tree/master/jose-util) -subdirectory also contains a small command-line utility which might -be useful as an example. diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric.go deleted file mode 100644 index 381156ca..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric.go +++ /dev/null @@ -1,498 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto" - "crypto/aes" - "crypto/ecdsa" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "errors" - "fmt" - "math/big" - - "gopkg.in/square/go-jose.v1/cipher" -) - -// A generic RSA-based encrypter/verifier -type rsaEncrypterVerifier struct { - publicKey *rsa.PublicKey -} - -// A generic RSA-based decrypter/signer -type rsaDecrypterSigner struct { - privateKey *rsa.PrivateKey -} - -// A generic EC-based encrypter/verifier -type ecEncrypterVerifier struct { - publicKey *ecdsa.PublicKey -} - -// A key generator for ECDH-ES -type ecKeyGenerator struct { - size int - algID string - publicKey *ecdsa.PublicKey -} - -// A generic EC-based decrypter/signer -type ecDecrypterSigner struct { - privateKey *ecdsa.PrivateKey -} - -// newRSARecipient creates recipientKeyInfo based on the given key. -func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch keyAlg { - case RSA1_5, RSA_OAEP, RSA_OAEP_256: - default: - return recipientKeyInfo{}, ErrUnsupportedAlgorithm - } - - return recipientKeyInfo{ - keyAlg: keyAlg, - keyEncrypter: &rsaEncrypterVerifier{ - publicKey: publicKey, - }, - }, nil -} - -// newRSASigner creates a recipientSigInfo based on the given key. -func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch sigAlg { - case RS256, RS384, RS512, PS256, PS384, PS512: - default: - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: &JsonWebKey{ - Key: &privateKey.PublicKey, - }, - signer: &rsaDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -// newECDHRecipient creates recipientKeyInfo based on the given key. -func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch keyAlg { - case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - default: - return recipientKeyInfo{}, ErrUnsupportedAlgorithm - } - - return recipientKeyInfo{ - keyAlg: keyAlg, - keyEncrypter: &ecEncrypterVerifier{ - publicKey: publicKey, - }, - }, nil -} - -// newECDSASigner creates a recipientSigInfo based on the given key. -func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch sigAlg { - case ES256, ES384, ES512: - default: - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: &JsonWebKey{ - Key: &privateKey.PublicKey, - }, - signer: &ecDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -// Encrypt the given payload and update the object. -func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { - encryptedKey, err := ctx.encrypt(cek, alg) - if err != nil { - return recipientInfo{}, err - } - - return recipientInfo{ - encryptedKey: encryptedKey, - header: &rawHeader{}, - }, nil -} - -// Encrypt the given payload. Based on the key encryption algorithm, -// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). -func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) { - switch alg { - case RSA1_5: - return rsa.EncryptPKCS1v15(randReader, ctx.publicKey, cek) - case RSA_OAEP: - return rsa.EncryptOAEP(sha1.New(), randReader, ctx.publicKey, cek, []byte{}) - case RSA_OAEP_256: - return rsa.EncryptOAEP(sha256.New(), randReader, ctx.publicKey, cek, []byte{}) - } - - return nil, ErrUnsupportedAlgorithm -} - -// Decrypt the given payload and return the content encryption key. -func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { - return ctx.decrypt(recipient.encryptedKey, KeyAlgorithm(headers.Alg), generator) -} - -// Decrypt the given payload. Based on the key encryption algorithm, -// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). -func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) { - // Note: The random reader on decrypt operations is only used for blinding, - // so stubbing is meanlingless (hence the direct use of rand.Reader). - switch alg { - case RSA1_5: - defer func() { - // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload - // because of an index out of bounds error, which we want to ignore. - // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() - // only exists for preventing crashes with unpatched versions. - // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k - // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 - _ = recover() - }() - - // Perform some input validation. - keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8 - if keyBytes != len(jek) { - // Input size is incorrect, the encrypted payload should always match - // the size of the public modulus (e.g. using a 2048 bit key will - // produce 256 bytes of output). Reject this since it's invalid input. - return nil, ErrCryptoFailure - } - - cek, _, err := generator.genKey() - if err != nil { - return nil, ErrCryptoFailure - } - - // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to - // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing - // the Million Message Attack on Cryptographic Message Syntax". We are - // therefore deliberatly ignoring errors here. - _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek) - - return cek, nil - case RSA_OAEP: - // Use rand.Reader for RSA blinding - return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{}) - case RSA_OAEP_256: - // Use rand.Reader for RSA blinding - return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{}) - } - - return nil, ErrUnsupportedAlgorithm -} - -// Sign the given payload -func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - var hash crypto.Hash - - switch alg { - case RS256, PS256: - hash = crypto.SHA256 - case RS384, PS384: - hash = crypto.SHA384 - case RS512, PS512: - hash = crypto.SHA512 - default: - return Signature{}, ErrUnsupportedAlgorithm - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - var out []byte - var err error - - switch alg { - case RS256, RS384, RS512: - out, err = rsa.SignPKCS1v15(randReader, ctx.privateKey, hash, hashed) - case PS256, PS384, PS512: - out, err = rsa.SignPSS(randReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - }) - } - - if err != nil { - return Signature{}, err - } - - return Signature{ - Signature: out, - protected: &rawHeader{}, - }, nil -} - -// Verify the given payload -func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - var hash crypto.Hash - - switch alg { - case RS256, PS256: - hash = crypto.SHA256 - case RS384, PS384: - hash = crypto.SHA384 - case RS512, PS512: - hash = crypto.SHA512 - default: - return ErrUnsupportedAlgorithm - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - switch alg { - case RS256, RS384, RS512: - return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature) - case PS256, PS384, PS512: - return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil) - } - - return ErrUnsupportedAlgorithm -} - -// Encrypt the given payload and update the object. -func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { - switch alg { - case ECDH_ES: - // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key. - return recipientInfo{ - header: &rawHeader{}, - }, nil - case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - default: - return recipientInfo{}, ErrUnsupportedAlgorithm - } - - generator := ecKeyGenerator{ - algID: string(alg), - publicKey: ctx.publicKey, - } - - switch alg { - case ECDH_ES_A128KW: - generator.size = 16 - case ECDH_ES_A192KW: - generator.size = 24 - case ECDH_ES_A256KW: - generator.size = 32 - } - - kek, header, err := generator.genKey() - if err != nil { - return recipientInfo{}, err - } - - block, err := aes.NewCipher(kek) - if err != nil { - return recipientInfo{}, err - } - - jek, err := josecipher.KeyWrap(block, cek) - if err != nil { - return recipientInfo{}, err - } - - return recipientInfo{ - encryptedKey: jek, - header: &header, - }, nil -} - -// Get key size for EC key generator -func (ctx ecKeyGenerator) keySize() int { - return ctx.size -} - -// Get a content encryption key for ECDH-ES -func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { - priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, randReader) - if err != nil { - return nil, rawHeader{}, err - } - - out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size) - - headers := rawHeader{ - Epk: &JsonWebKey{ - Key: &priv.PublicKey, - }, - } - - return out, headers, nil -} - -// Decrypt the given payload and return the content encryption key. -func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { - if headers.Epk == nil { - return nil, errors.New("square/go-jose: missing epk header") - } - - publicKey, ok := headers.Epk.Key.(*ecdsa.PublicKey) - if publicKey == nil || !ok { - return nil, errors.New("square/go-jose: invalid epk header") - } - - apuData := headers.Apu.bytes() - apvData := headers.Apv.bytes() - - deriveKey := func(algID string, size int) []byte { - return josecipher.DeriveECDHES(algID, apuData, apvData, ctx.privateKey, publicKey, size) - } - - var keySize int - - switch KeyAlgorithm(headers.Alg) { - case ECDH_ES: - // ECDH-ES uses direct key agreement, no key unwrapping necessary. - return deriveKey(string(headers.Enc), generator.keySize()), nil - case ECDH_ES_A128KW: - keySize = 16 - case ECDH_ES_A192KW: - keySize = 24 - case ECDH_ES_A256KW: - keySize = 32 - default: - return nil, ErrUnsupportedAlgorithm - } - - key := deriveKey(headers.Alg, keySize) - block, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - return josecipher.KeyUnwrap(block, recipient.encryptedKey) -} - -// Sign the given payload -func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - var expectedBitSize int - var hash crypto.Hash - - switch alg { - case ES256: - expectedBitSize = 256 - hash = crypto.SHA256 - case ES384: - expectedBitSize = 384 - hash = crypto.SHA384 - case ES512: - expectedBitSize = 521 - hash = crypto.SHA512 - } - - curveBits := ctx.privateKey.Curve.Params().BitSize - if expectedBitSize != curveBits { - return Signature{}, fmt.Errorf("square/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - r, s, err := ecdsa.Sign(randReader, ctx.privateKey, hashed) - if err != nil { - return Signature{}, err - } - - keyBytes := curveBits / 8 - if curveBits%8 > 0 { - keyBytes += 1 - } - - // We serialize the outpus (r and s) into big-endian byte arrays and pad - // them with zeros on the left to make sure the sizes work out. Both arrays - // must be keyBytes long, and the output must be 2*keyBytes long. - rBytes := r.Bytes() - rBytesPadded := make([]byte, keyBytes) - copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) - - sBytes := s.Bytes() - sBytesPadded := make([]byte, keyBytes) - copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) - - out := append(rBytesPadded, sBytesPadded...) - - return Signature{ - Signature: out, - protected: &rawHeader{}, - }, nil -} - -// Verify the given payload -func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - var keySize int - var hash crypto.Hash - - switch alg { - case ES256: - keySize = 32 - hash = crypto.SHA256 - case ES384: - keySize = 48 - hash = crypto.SHA384 - case ES512: - keySize = 66 - hash = crypto.SHA512 - } - - if len(signature) != 2*keySize { - return fmt.Errorf("square/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - r := big.NewInt(0).SetBytes(signature[:keySize]) - s := big.NewInt(0).SetBytes(signature[keySize:]) - - match := ecdsa.Verify(ctx.publicKey, hashed, r, s) - if !match { - return errors.New("square/go-jose: ecdsa signature failed to verify") - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go deleted file mode 100644 index 1c8c8b34..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go +++ /dev/null @@ -1,431 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "crypto/rand" - "crypto/rsa" - "errors" - "io" - "math/big" - "testing" -) - -func TestVectorsRSA(t *testing.T) { - // Sources: - // http://www.emc.com/emc-plus/rsa-labs/standards-initiatives/pkcs-rsa-cryptography-standard.htm - // ftp://ftp.rsa.com/pub/rsalabs/tmp/pkcs1v15crypt-vectors.txt - priv := &rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - N: fromHexInt(` - a8b3b284af8eb50b387034a860f146c4919f318763cd6c5598c8 - ae4811a1e0abc4c7e0b082d693a5e7fced675cf4668512772c0c - bc64a742c6c630f533c8cc72f62ae833c40bf25842e984bb78bd - bf97c0107d55bdb662f5c4e0fab9845cb5148ef7392dd3aaff93 - ae1e6b667bb3d4247616d4f5ba10d4cfd226de88d39f16fb`), - E: 65537, - }, - D: fromHexInt(` - 53339cfdb79fc8466a655c7316aca85c55fd8f6dd898fdaf1195 - 17ef4f52e8fd8e258df93fee180fa0e4ab29693cd83b152a553d - 4ac4d1812b8b9fa5af0e7f55fe7304df41570926f3311f15c4d6 - 5a732c483116ee3d3d2d0af3549ad9bf7cbfb78ad884f84d5beb - 04724dc7369b31def37d0cf539e9cfcdd3de653729ead5d1`), - Primes: []*big.Int{ - fromHexInt(` - d32737e7267ffe1341b2d5c0d150a81b586fb3132bed2f8d5262 - 864a9cb9f30af38be448598d413a172efb802c21acf1c11c520c - 2f26a471dcad212eac7ca39d`), - fromHexInt(` - cc8853d1d54da630fac004f471f281c7b8982d8224a490edbeb3 - 3d3e3d5cc93c4765703d1dd791642f1f116a0dd852be2419b2af - 72bfe9a030e860b0288b5d77`), - }, - } - - input := fromHexBytes( - "6628194e12073db03ba94cda9ef9532397d50dba79b987004afefe34") - - expectedPKCS := fromHexBytes(` - 50b4c14136bd198c2f3c3ed243fce036e168d56517984a263cd66492b808 - 04f169d210f2b9bdfb48b12f9ea05009c77da257cc600ccefe3a6283789d - 8ea0e607ac58e2690ec4ebc10146e8cbaa5ed4d5cce6fe7b0ff9efc1eabb - 564dbf498285f449ee61dd7b42ee5b5892cb90601f30cda07bf26489310b - cd23b528ceab3c31`) - - expectedOAEP := fromHexBytes(` - 354fe67b4a126d5d35fe36c777791a3f7ba13def484e2d3908aff722fad4 - 68fb21696de95d0be911c2d3174f8afcc201035f7b6d8e69402de5451618 - c21a535fa9d7bfc5b8dd9fc243f8cf927db31322d6e881eaa91a996170e6 - 57a05a266426d98c88003f8477c1227094a0d9fa1e8c4024309ce1ecccb5 - 210035d47ac72e8a`) - - // Mock random reader - randReader = bytes.NewReader(fromHexBytes(` - 017341ae3875d5f87101f8cc4fa9b9bc156bb04628fccdb2f4f11e905bd3 - a155d376f593bd7304210874eba08a5e22bcccb4c9d3882a93a54db022f5 - 03d16338b6b7ce16dc7f4bbf9a96b59772d6606e9747c7649bf9e083db98 - 1884a954ab3c6f18b776ea21069d69776a33e96bad48e1dda0a5ef`)) - defer resetRandReader() - - // RSA-PKCS1v1.5 encrypt - enc := new(rsaEncrypterVerifier) - enc.publicKey = &priv.PublicKey - encryptedPKCS, err := enc.encrypt(input, RSA1_5) - if err != nil { - t.Error("Encryption failed:", err) - return - } - - if bytes.Compare(encryptedPKCS, expectedPKCS) != 0 { - t.Error("Output does not match expected value (PKCS1v1.5)") - } - - // RSA-OAEP encrypt - encryptedOAEP, err := enc.encrypt(input, RSA_OAEP) - if err != nil { - t.Error("Encryption failed:", err) - return - } - - if bytes.Compare(encryptedOAEP, expectedOAEP) != 0 { - t.Error("Output does not match expected value (OAEP)") - } - - // Need fake cipher for PKCS1v1.5 decrypt - resetRandReader() - aes := newAESGCM(len(input)) - - keygen := randomKeyGenerator{ - size: aes.keySize(), - } - - // RSA-PKCS1v1.5 decrypt - dec := new(rsaDecrypterSigner) - dec.privateKey = priv - decryptedPKCS, err := dec.decrypt(encryptedPKCS, RSA1_5, keygen) - if err != nil { - t.Error("Decryption failed:", err) - return - } - - if bytes.Compare(input, decryptedPKCS) != 0 { - t.Error("Output does not match expected value (PKCS1v1.5)") - } - - // RSA-OAEP decrypt - decryptedOAEP, err := dec.decrypt(encryptedOAEP, RSA_OAEP, keygen) - if err != nil { - t.Error("decryption failed:", err) - return - } - - if bytes.Compare(input, decryptedOAEP) != 0 { - t.Error("output does not match expected value (OAEP)") - } -} - -func TestInvalidAlgorithmsRSA(t *testing.T) { - _, err := newRSARecipient("XYZ", nil) - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - _, err = newRSASigner("XYZ", nil) - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - enc := new(rsaEncrypterVerifier) - enc.publicKey = &rsaTestKey.PublicKey - _, err = enc.encryptKey([]byte{}, "XYZ") - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - err = enc.verifyPayload([]byte{}, []byte{}, "XYZ") - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - dec := new(rsaDecrypterSigner) - dec.privateKey = rsaTestKey - _, err = dec.decrypt(make([]byte, 256), "XYZ", randomKeyGenerator{size: 16}) - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - _, err = dec.signPayload([]byte{}, "XYZ") - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } -} - -type failingKeyGenerator struct{} - -func (ctx failingKeyGenerator) keySize() int { - return 0 -} - -func (ctx failingKeyGenerator) genKey() ([]byte, rawHeader, error) { - return nil, rawHeader{}, errors.New("failed to generate key") -} - -func TestPKCSKeyGeneratorFailure(t *testing.T) { - dec := new(rsaDecrypterSigner) - dec.privateKey = rsaTestKey - generator := failingKeyGenerator{} - _, err := dec.decrypt(make([]byte, 256), RSA1_5, generator) - if err != ErrCryptoFailure { - t.Error("should return error on invalid algorithm") - } -} - -func TestInvalidAlgorithmsEC(t *testing.T) { - _, err := newECDHRecipient("XYZ", nil) - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - _, err = newECDSASigner("XYZ", nil) - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - enc := new(ecEncrypterVerifier) - enc.publicKey = &ecTestKey256.PublicKey - _, err = enc.encryptKey([]byte{}, "XYZ") - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } -} - -func TestInvalidECKeyGen(t *testing.T) { - gen := ecKeyGenerator{ - size: 16, - algID: "A128GCM", - publicKey: &ecTestKey256.PublicKey, - } - - if gen.keySize() != 16 { - t.Error("ec key generator reported incorrect key size") - } - - _, _, err := gen.genKey() - if err != nil { - t.Error("ec key generator failed to generate key", err) - } -} - -func TestInvalidECDecrypt(t *testing.T) { - dec := ecDecrypterSigner{ - privateKey: ecTestKey256, - } - - generator := randomKeyGenerator{size: 16} - - // Missing epk header - headers := rawHeader{ - Alg: string(ECDH_ES), - } - - _, err := dec.decryptKey(headers, nil, generator) - if err == nil { - t.Error("ec decrypter accepted object with missing epk header") - } - - // Invalid epk header - headers.Epk = &JsonWebKey{} - - _, err = dec.decryptKey(headers, nil, generator) - if err == nil { - t.Error("ec decrypter accepted object with invalid epk header") - } -} - -func TestDecryptWithIncorrectSize(t *testing.T) { - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - t.Error(err) - return - } - - dec := new(rsaDecrypterSigner) - dec.privateKey = priv - aes := newAESGCM(16) - - keygen := randomKeyGenerator{ - size: aes.keySize(), - } - - payload := make([]byte, 254) - _, err = dec.decrypt(payload, RSA1_5, keygen) - if err == nil { - t.Error("Invalid payload size should return error") - } - - payload = make([]byte, 257) - _, err = dec.decrypt(payload, RSA1_5, keygen) - if err == nil { - t.Error("Invalid payload size should return error") - } -} - -func TestPKCSDecryptNeverFails(t *testing.T) { - // We don't want RSA-PKCS1 v1.5 decryption to ever fail, in order to prevent - // side-channel timing attacks (Bleichenbacher attack in particular). - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - t.Error(err) - return - } - - dec := new(rsaDecrypterSigner) - dec.privateKey = priv - aes := newAESGCM(16) - - keygen := randomKeyGenerator{ - size: aes.keySize(), - } - - for i := 1; i < 50; i++ { - payload := make([]byte, 256) - _, err := io.ReadFull(rand.Reader, payload) - if err != nil { - t.Error("Unable to get random data:", err) - return - } - _, err = dec.decrypt(payload, RSA1_5, keygen) - if err != nil { - t.Error("PKCS1v1.5 decrypt should never fail:", err) - return - } - } -} - -func BenchmarkPKCSDecryptWithValidPayloads(b *testing.B) { - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - panic(err) - } - - enc := new(rsaEncrypterVerifier) - enc.publicKey = &priv.PublicKey - dec := new(rsaDecrypterSigner) - dec.privateKey = priv - aes := newAESGCM(32) - - b.StopTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - plaintext := make([]byte, 32) - _, err = io.ReadFull(rand.Reader, plaintext) - if err != nil { - panic(err) - } - - ciphertext, err := enc.encrypt(plaintext, RSA1_5) - if err != nil { - panic(err) - } - - keygen := randomKeyGenerator{ - size: aes.keySize(), - } - - b.StartTimer() - _, err = dec.decrypt(ciphertext, RSA1_5, keygen) - b.StopTimer() - if err != nil { - panic(err) - } - } -} - -func BenchmarkPKCSDecryptWithInvalidPayloads(b *testing.B) { - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - panic(err) - } - - enc := new(rsaEncrypterVerifier) - enc.publicKey = &priv.PublicKey - dec := new(rsaDecrypterSigner) - dec.privateKey = priv - aes := newAESGCM(16) - - keygen := randomKeyGenerator{ - size: aes.keySize(), - } - - b.StopTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - plaintext := make([]byte, 16) - _, err = io.ReadFull(rand.Reader, plaintext) - if err != nil { - panic(err) - } - - ciphertext, err := enc.encrypt(plaintext, RSA1_5) - if err != nil { - panic(err) - } - - // Do some simple scrambling - ciphertext[128] ^= 0xFF - - b.StartTimer() - _, err = dec.decrypt(ciphertext, RSA1_5, keygen) - b.StopTimer() - if err != nil { - panic(err) - } - } -} - -func TestInvalidEllipticCurve(t *testing.T) { - signer256 := ecDecrypterSigner{privateKey: ecTestKey256} - signer384 := ecDecrypterSigner{privateKey: ecTestKey384} - signer521 := ecDecrypterSigner{privateKey: ecTestKey521} - - _, err := signer256.signPayload([]byte{}, ES384) - if err == nil { - t.Error("should not generate ES384 signature with P-256 key") - } - _, err = signer256.signPayload([]byte{}, ES512) - if err == nil { - t.Error("should not generate ES512 signature with P-256 key") - } - _, err = signer384.signPayload([]byte{}, ES256) - if err == nil { - t.Error("should not generate ES256 signature with P-384 key") - } - _, err = signer384.signPayload([]byte{}, ES512) - if err == nil { - t.Error("should not generate ES512 signature with P-384 key") - } - _, err = signer521.signPayload([]byte{}, ES256) - if err == nil { - t.Error("should not generate ES256 signature with P-521 key") - } - _, err = signer521.signPayload([]byte{}, ES384) - if err == nil { - t.Error("should not generate ES384 signature with P-521 key") - } -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go deleted file mode 100644 index a5c35834..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go +++ /dev/null @@ -1,196 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto/cipher" - "crypto/hmac" - "crypto/sha256" - "crypto/sha512" - "crypto/subtle" - "encoding/binary" - "errors" - "hash" -) - -const ( - nonceBytes = 16 -) - -// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC. -func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) { - keySize := len(key) / 2 - integrityKey := key[:keySize] - encryptionKey := key[keySize:] - - blockCipher, err := newBlockCipher(encryptionKey) - if err != nil { - return nil, err - } - - var hash func() hash.Hash - switch keySize { - case 16: - hash = sha256.New - case 24: - hash = sha512.New384 - case 32: - hash = sha512.New - } - - return &cbcAEAD{ - hash: hash, - blockCipher: blockCipher, - authtagBytes: keySize, - integrityKey: integrityKey, - }, nil -} - -// An AEAD based on CBC+HMAC -type cbcAEAD struct { - hash func() hash.Hash - authtagBytes int - integrityKey []byte - blockCipher cipher.Block -} - -func (ctx *cbcAEAD) NonceSize() int { - return nonceBytes -} - -func (ctx *cbcAEAD) Overhead() int { - // Maximum overhead is block size (for padding) plus auth tag length, where - // the length of the auth tag is equivalent to the key size. - return ctx.blockCipher.BlockSize() + ctx.authtagBytes -} - -// Seal encrypts and authenticates the plaintext. -func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte { - // Output buffer -- must take care not to mangle plaintext input. - ciphertext := make([]byte, len(plaintext)+ctx.Overhead())[:len(plaintext)] - copy(ciphertext, plaintext) - ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize()) - - cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce) - - cbc.CryptBlocks(ciphertext, ciphertext) - authtag := ctx.computeAuthTag(data, nonce, ciphertext) - - ret, out := resize(dst, len(dst)+len(ciphertext)+len(authtag)) - copy(out, ciphertext) - copy(out[len(ciphertext):], authtag) - - return ret -} - -// Open decrypts and authenticates the ciphertext. -func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { - if len(ciphertext) < ctx.authtagBytes { - return nil, errors.New("square/go-jose: invalid ciphertext (too short)") - } - - offset := len(ciphertext) - ctx.authtagBytes - expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset]) - match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:]) - if match != 1 { - return nil, errors.New("square/go-jose: invalid ciphertext (auth tag mismatch)") - } - - cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) - - // Make copy of ciphertext buffer, don't want to modify in place - buffer := append([]byte{}, []byte(ciphertext[:offset])...) - - if len(buffer)%ctx.blockCipher.BlockSize() > 0 { - return nil, errors.New("square/go-jose: invalid ciphertext (invalid length)") - } - - cbc.CryptBlocks(buffer, buffer) - - // Remove padding - plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize()) - if err != nil { - return nil, err - } - - ret, out := resize(dst, len(dst)+len(plaintext)) - copy(out, plaintext) - - return ret, nil -} - -// Compute an authentication tag -func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte { - buffer := make([]byte, len(aad)+len(nonce)+len(ciphertext)+8) - n := 0 - n += copy(buffer, aad) - n += copy(buffer[n:], nonce) - n += copy(buffer[n:], ciphertext) - binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad)*8)) - - // According to documentation, Write() on hash.Hash never fails. - hmac := hmac.New(ctx.hash, ctx.integrityKey) - _, _ = hmac.Write(buffer) - - return hmac.Sum(nil)[:ctx.authtagBytes] -} - -// resize ensures the the given slice has a capacity of at least n bytes. -// If the capacity of the slice is less than n, a new slice is allocated -// and the existing data will be copied. -func resize(in []byte, n int) (head, tail []byte) { - if cap(in) >= n { - head = in[:n] - } else { - head = make([]byte, n) - copy(head, in) - } - - tail = head[len(in):] - return -} - -// Apply padding -func padBuffer(buffer []byte, blockSize int) []byte { - missing := blockSize - (len(buffer) % blockSize) - ret, out := resize(buffer, len(buffer)+missing) - padding := bytes.Repeat([]byte{byte(missing)}, missing) - copy(out, padding) - return ret -} - -// Remove padding -func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) { - if len(buffer)%blockSize != 0 { - return nil, errors.New("square/go-jose: invalid padding") - } - - last := buffer[len(buffer)-1] - count := int(last) - - if count == 0 || count > blockSize || count > len(buffer) { - return nil, errors.New("square/go-jose: invalid padding") - } - - padding := bytes.Repeat([]byte{last}, count) - if !bytes.HasSuffix(buffer, padding) { - return nil, errors.New("square/go-jose: invalid padding") - } - - return buffer[:len(buffer)-count], nil -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go deleted file mode 100644 index c230271b..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go +++ /dev/null @@ -1,498 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto/aes" - "crypto/cipher" - "crypto/rand" - "io" - "strings" - "testing" -) - -func TestInvalidInputs(t *testing.T) { - key := []byte{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - } - - nonce := []byte{ - 92, 80, 104, 49, 133, 25, 161, 215, 173, 101, 219, 211, 136, 91, 210, 145} - - aead, _ := NewCBCHMAC(key, aes.NewCipher) - ciphertext := aead.Seal(nil, nonce, []byte("plaintext"), []byte("aad")) - - // Changed AAD, must fail - _, err := aead.Open(nil, nonce, ciphertext, []byte("INVALID")) - if err == nil { - t.Error("must detect invalid aad") - } - - // Empty ciphertext, must fail - _, err = aead.Open(nil, nonce, []byte{}, []byte("aad")) - if err == nil { - t.Error("must detect invalid/empty ciphertext") - } - - // Corrupt ciphertext, must fail - corrupt := make([]byte, len(ciphertext)) - copy(corrupt, ciphertext) - corrupt[0] ^= 0xFF - - _, err = aead.Open(nil, nonce, corrupt, []byte("aad")) - if err == nil { - t.Error("must detect corrupt ciphertext") - } - - // Corrupt authtag, must fail - copy(corrupt, ciphertext) - corrupt[len(ciphertext)-1] ^= 0xFF - - _, err = aead.Open(nil, nonce, corrupt, []byte("aad")) - if err == nil { - t.Error("must detect corrupt authtag") - } - - // Truncated data, must fail - _, err = aead.Open(nil, nonce, ciphertext[:10], []byte("aad")) - if err == nil { - t.Error("must detect corrupt authtag") - } -} - -func TestVectorsAESCBC128(t *testing.T) { - // Source: http://tools.ietf.org/html/draft-ietf-jose-json-web-encryption-29#appendix-A.2 - plaintext := []byte{ - 76, 105, 118, 101, 32, 108, 111, 110, 103, 32, 97, 110, 100, 32, - 112, 114, 111, 115, 112, 101, 114, 46} - - aad := []byte{ - 101, 121, 74, 104, 98, 71, 99, 105, 79, 105, 74, 83, 85, 48, 69, - 120, 88, 122, 85, 105, 76, 67, 74, 108, 98, 109, 77, 105, 79, 105, - 74, 66, 77, 84, 73, 52, 81, 48, 74, 68, 76, 85, 104, 84, 77, 106, 85, - 50, 73, 110, 48} - - expectedCiphertext := []byte{ - 40, 57, 83, 181, 119, 33, 133, 148, 198, 185, 243, 24, 152, 230, 6, - 75, 129, 223, 127, 19, 210, 82, 183, 230, 168, 33, 215, 104, 143, - 112, 56, 102} - - expectedAuthtag := []byte{ - 246, 17, 244, 190, 4, 95, 98, 3, 231, 0, 115, 157, 242, 203, 100, - 191} - - key := []byte{ - 4, 211, 31, 197, 84, 157, 252, 254, 11, 100, 157, 250, 63, 170, 106, 206, - 107, 124, 212, 45, 111, 107, 9, 219, 200, 177, 0, 240, 143, 156, 44, 207} - - nonce := []byte{ - 3, 22, 60, 12, 43, 67, 104, 105, 108, 108, 105, 99, 111, 116, 104, 101} - - enc, err := NewCBCHMAC(key, aes.NewCipher) - out := enc.Seal(nil, nonce, plaintext, aad) - if err != nil { - t.Error("Unable to encrypt:", err) - return - } - - if bytes.Compare(out[:len(out)-16], expectedCiphertext) != 0 { - t.Error("Ciphertext did not match") - } - if bytes.Compare(out[len(out)-16:], expectedAuthtag) != 0 { - t.Error("Auth tag did not match") - } -} - -func TestVectorsAESCBC256(t *testing.T) { - // Source: https://tools.ietf.org/html/draft-mcgrew-aead-aes-cbc-hmac-sha2-05#section-5.4 - plaintext := []byte{ - 0x41, 0x20, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x20, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x20, - 0x6d, 0x75, 0x73, 0x74, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x71, 0x75, - 0x69, 0x72, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x65, 0x20, 0x73, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x69, 0x74, 0x20, 0x6d, 0x75, 0x73, 0x74, 0x20, 0x62, - 0x65, 0x20, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x66, 0x61, 0x6c, 0x6c, 0x20, 0x69, - 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x20, 0x6f, 0x66, - 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x6e, 0x65, 0x6d, 0x79, 0x20, 0x77, 0x69, 0x74, 0x68, 0x6f, - 0x75, 0x74, 0x20, 0x69, 0x6e, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x6e, 0x69, 0x65, 0x6e, 0x63, 0x65} - - aad := []byte{ - 0x54, 0x68, 0x65, 0x20, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x20, 0x70, 0x72, 0x69, 0x6e, 0x63, - 0x69, 0x70, 0x6c, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x41, 0x75, 0x67, 0x75, 0x73, 0x74, 0x65, 0x20, - 0x4b, 0x65, 0x72, 0x63, 0x6b, 0x68, 0x6f, 0x66, 0x66, 0x73} - - expectedCiphertext := []byte{ - 0x4a, 0xff, 0xaa, 0xad, 0xb7, 0x8c, 0x31, 0xc5, 0xda, 0x4b, 0x1b, 0x59, 0x0d, 0x10, 0xff, 0xbd, - 0x3d, 0xd8, 0xd5, 0xd3, 0x02, 0x42, 0x35, 0x26, 0x91, 0x2d, 0xa0, 0x37, 0xec, 0xbc, 0xc7, 0xbd, - 0x82, 0x2c, 0x30, 0x1d, 0xd6, 0x7c, 0x37, 0x3b, 0xcc, 0xb5, 0x84, 0xad, 0x3e, 0x92, 0x79, 0xc2, - 0xe6, 0xd1, 0x2a, 0x13, 0x74, 0xb7, 0x7f, 0x07, 0x75, 0x53, 0xdf, 0x82, 0x94, 0x10, 0x44, 0x6b, - 0x36, 0xeb, 0xd9, 0x70, 0x66, 0x29, 0x6a, 0xe6, 0x42, 0x7e, 0xa7, 0x5c, 0x2e, 0x08, 0x46, 0xa1, - 0x1a, 0x09, 0xcc, 0xf5, 0x37, 0x0d, 0xc8, 0x0b, 0xfe, 0xcb, 0xad, 0x28, 0xc7, 0x3f, 0x09, 0xb3, - 0xa3, 0xb7, 0x5e, 0x66, 0x2a, 0x25, 0x94, 0x41, 0x0a, 0xe4, 0x96, 0xb2, 0xe2, 0xe6, 0x60, 0x9e, - 0x31, 0xe6, 0xe0, 0x2c, 0xc8, 0x37, 0xf0, 0x53, 0xd2, 0x1f, 0x37, 0xff, 0x4f, 0x51, 0x95, 0x0b, - 0xbe, 0x26, 0x38, 0xd0, 0x9d, 0xd7, 0xa4, 0x93, 0x09, 0x30, 0x80, 0x6d, 0x07, 0x03, 0xb1, 0xf6} - - expectedAuthtag := []byte{ - 0x4d, 0xd3, 0xb4, 0xc0, 0x88, 0xa7, 0xf4, 0x5c, 0x21, 0x68, 0x39, 0x64, 0x5b, 0x20, 0x12, 0xbf, - 0x2e, 0x62, 0x69, 0xa8, 0xc5, 0x6a, 0x81, 0x6d, 0xbc, 0x1b, 0x26, 0x77, 0x61, 0x95, 0x5b, 0xc5} - - key := []byte{ - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, - 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f} - - nonce := []byte{ - 0x1a, 0xf3, 0x8c, 0x2d, 0xc2, 0xb9, 0x6f, 0xfd, 0xd8, 0x66, 0x94, 0x09, 0x23, 0x41, 0xbc, 0x04} - - enc, err := NewCBCHMAC(key, aes.NewCipher) - out := enc.Seal(nil, nonce, plaintext, aad) - if err != nil { - t.Error("Unable to encrypt:", err) - return - } - - if bytes.Compare(out[:len(out)-32], expectedCiphertext) != 0 { - t.Error("Ciphertext did not match, got", out[:len(out)-32], "wanted", expectedCiphertext) - } - if bytes.Compare(out[len(out)-32:], expectedAuthtag) != 0 { - t.Error("Auth tag did not match, got", out[len(out)-32:], "wanted", expectedAuthtag) - } -} - -func TestAESCBCRoundtrip(t *testing.T) { - key128 := []byte{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - - key192 := []byte{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7} - - key256 := []byte{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - - nonce := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - - RunRoundtrip(t, key128, nonce) - RunRoundtrip(t, key192, nonce) - RunRoundtrip(t, key256, nonce) -} - -func RunRoundtrip(t *testing.T, key, nonce []byte) { - aead, err := NewCBCHMAC(key, aes.NewCipher) - if err != nil { - panic(err) - } - - if aead.NonceSize() != len(nonce) { - panic("invalid nonce") - } - - // Test pre-existing data in dst buffer - dst := []byte{15, 15, 15, 15} - plaintext := []byte{0, 0, 0, 0} - aad := []byte{4, 3, 2, 1} - - result := aead.Seal(dst, nonce, plaintext, aad) - if bytes.Compare(dst, result[:4]) != 0 { - t.Error("Existing data in dst not preserved") - } - - // Test pre-existing (empty) dst buffer with sufficient capacity - dst = make([]byte, 256)[:0] - result, err = aead.Open(dst, nonce, result[4:], aad) - if err != nil { - panic(err) - } - - if bytes.Compare(result, plaintext) != 0 { - t.Error("Plaintext does not match output") - } -} - -func TestAESCBCOverhead(t *testing.T) { - aead, err := NewCBCHMAC(make([]byte, 32), aes.NewCipher) - if err != nil { - panic(err) - } - - if aead.Overhead() != 32 { - t.Error("CBC-HMAC reports incorrect overhead value") - } -} - -func TestPadding(t *testing.T) { - for i := 0; i < 256; i++ { - slice := make([]byte, i) - padded := padBuffer(slice, 16) - if len(padded)%16 != 0 { - t.Error("failed to pad slice properly", i) - return - } - unpadded, err := unpadBuffer(padded, 16) - if err != nil || len(unpadded) != i { - t.Error("failed to unpad slice properly", i) - return - } - } -} - -func TestInvalidKey(t *testing.T) { - key := make([]byte, 30) - _, err := NewCBCHMAC(key, aes.NewCipher) - if err == nil { - t.Error("should not be able to instantiate CBC-HMAC with invalid key") - } -} - -func TestTruncatedCiphertext(t *testing.T) { - key := make([]byte, 32) - nonce := make([]byte, 16) - data := make([]byte, 32) - - io.ReadFull(rand.Reader, key) - io.ReadFull(rand.Reader, nonce) - - aead, err := NewCBCHMAC(key, aes.NewCipher) - if err != nil { - panic(err) - } - - ctx := aead.(*cbcAEAD) - ct := aead.Seal(nil, nonce, data, nil) - - // Truncated ciphertext, but with correct auth tag - truncated, tail := resize(ct[:len(ct)-ctx.authtagBytes-2], len(ct)-2) - copy(tail, ctx.computeAuthTag(nil, nonce, truncated[:len(truncated)-ctx.authtagBytes])) - - // Open should fail - _, err = aead.Open(nil, nonce, truncated, nil) - if err == nil { - t.Error("open on truncated ciphertext should fail") - } -} - -func TestInvalidPaddingOpen(t *testing.T) { - key := make([]byte, 32) - nonce := make([]byte, 16) - - // Plaintext with invalid padding - plaintext := padBuffer(make([]byte, 28), aes.BlockSize) - plaintext[len(plaintext)-1] = 0xFF - - io.ReadFull(rand.Reader, key) - io.ReadFull(rand.Reader, nonce) - - block, _ := aes.NewCipher(key) - cbc := cipher.NewCBCEncrypter(block, nonce) - buffer := append([]byte{}, plaintext...) - cbc.CryptBlocks(buffer, buffer) - - aead, _ := NewCBCHMAC(key, aes.NewCipher) - ctx := aead.(*cbcAEAD) - - // Mutated ciphertext, but with correct auth tag - size := len(buffer) - ciphertext, tail := resize(buffer, size+(len(key)/2)) - copy(tail, ctx.computeAuthTag(nil, nonce, ciphertext[:size])) - - // Open should fail (b/c of invalid padding, even though tag matches) - _, err := aead.Open(nil, nonce, ciphertext, nil) - if err == nil || !strings.Contains(err.Error(), "invalid padding") { - t.Error("no or unexpected error on open with invalid padding:", err) - } -} - -func TestInvalidPadding(t *testing.T) { - for i := 0; i < 256; i++ { - slice := make([]byte, i) - padded := padBuffer(slice, 16) - if len(padded)%16 != 0 { - t.Error("failed to pad slice properly", i) - return - } - - paddingBytes := 16 - (i % 16) - - // Mutate padding for testing - for j := 1; j <= paddingBytes; j++ { - mutated := make([]byte, len(padded)) - copy(mutated, padded) - mutated[len(mutated)-j] ^= 0xFF - - _, err := unpadBuffer(mutated, 16) - if err == nil { - t.Error("unpad on invalid padding should fail", i) - return - } - } - - // Test truncated padding - _, err := unpadBuffer(padded[:len(padded)-1], 16) - if err == nil { - t.Error("unpad on truncated padding should fail", i) - return - } - } -} - -func TestZeroLengthPadding(t *testing.T) { - data := make([]byte, 16) - data, err := unpadBuffer(data, 16) - if err == nil { - t.Error("padding with 0x00 should never be valid") - } -} - -func benchEncryptCBCHMAC(b *testing.B, keySize, chunkSize int) { - key := make([]byte, keySize*2) - nonce := make([]byte, 16) - - io.ReadFull(rand.Reader, key) - io.ReadFull(rand.Reader, nonce) - - chunk := make([]byte, chunkSize) - - aead, err := NewCBCHMAC(key, aes.NewCipher) - if err != nil { - panic(err) - } - - b.SetBytes(int64(chunkSize)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - aead.Seal(nil, nonce, chunk, nil) - } -} - -func benchDecryptCBCHMAC(b *testing.B, keySize, chunkSize int) { - key := make([]byte, keySize*2) - nonce := make([]byte, 16) - - io.ReadFull(rand.Reader, key) - io.ReadFull(rand.Reader, nonce) - - chunk := make([]byte, chunkSize) - - aead, err := NewCBCHMAC(key, aes.NewCipher) - if err != nil { - panic(err) - } - - out := aead.Seal(nil, nonce, chunk, nil) - - b.SetBytes(int64(chunkSize)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - aead.Open(nil, nonce, out, nil) - } -} - -func BenchmarkEncryptAES128_CBCHMAC_1k(b *testing.B) { - benchEncryptCBCHMAC(b, 16, 1024) -} - -func BenchmarkEncryptAES128_CBCHMAC_64k(b *testing.B) { - benchEncryptCBCHMAC(b, 16, 65536) -} - -func BenchmarkEncryptAES128_CBCHMAC_1MB(b *testing.B) { - benchEncryptCBCHMAC(b, 16, 1048576) -} - -func BenchmarkEncryptAES128_CBCHMAC_64MB(b *testing.B) { - benchEncryptCBCHMAC(b, 16, 67108864) -} - -func BenchmarkDecryptAES128_CBCHMAC_1k(b *testing.B) { - benchDecryptCBCHMAC(b, 16, 1024) -} - -func BenchmarkDecryptAES128_CBCHMAC_64k(b *testing.B) { - benchDecryptCBCHMAC(b, 16, 65536) -} - -func BenchmarkDecryptAES128_CBCHMAC_1MB(b *testing.B) { - benchDecryptCBCHMAC(b, 16, 1048576) -} - -func BenchmarkDecryptAES128_CBCHMAC_64MB(b *testing.B) { - benchDecryptCBCHMAC(b, 16, 67108864) -} - -func BenchmarkEncryptAES192_CBCHMAC_64k(b *testing.B) { - benchEncryptCBCHMAC(b, 24, 65536) -} - -func BenchmarkEncryptAES192_CBCHMAC_1MB(b *testing.B) { - benchEncryptCBCHMAC(b, 24, 1048576) -} - -func BenchmarkEncryptAES192_CBCHMAC_64MB(b *testing.B) { - benchEncryptCBCHMAC(b, 24, 67108864) -} - -func BenchmarkDecryptAES192_CBCHMAC_1k(b *testing.B) { - benchDecryptCBCHMAC(b, 24, 1024) -} - -func BenchmarkDecryptAES192_CBCHMAC_64k(b *testing.B) { - benchDecryptCBCHMAC(b, 24, 65536) -} - -func BenchmarkDecryptAES192_CBCHMAC_1MB(b *testing.B) { - benchDecryptCBCHMAC(b, 24, 1048576) -} - -func BenchmarkDecryptAES192_CBCHMAC_64MB(b *testing.B) { - benchDecryptCBCHMAC(b, 24, 67108864) -} - -func BenchmarkEncryptAES256_CBCHMAC_64k(b *testing.B) { - benchEncryptCBCHMAC(b, 32, 65536) -} - -func BenchmarkEncryptAES256_CBCHMAC_1MB(b *testing.B) { - benchEncryptCBCHMAC(b, 32, 1048576) -} - -func BenchmarkEncryptAES256_CBCHMAC_64MB(b *testing.B) { - benchEncryptCBCHMAC(b, 32, 67108864) -} - -func BenchmarkDecryptAES256_CBCHMAC_1k(b *testing.B) { - benchDecryptCBCHMAC(b, 32, 1032) -} - -func BenchmarkDecryptAES256_CBCHMAC_64k(b *testing.B) { - benchDecryptCBCHMAC(b, 32, 65536) -} - -func BenchmarkDecryptAES256_CBCHMAC_1MB(b *testing.B) { - benchDecryptCBCHMAC(b, 32, 1048576) -} - -func BenchmarkDecryptAES256_CBCHMAC_64MB(b *testing.B) { - benchDecryptCBCHMAC(b, 32, 67108864) -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go deleted file mode 100644 index cbb5f7b8..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go +++ /dev/null @@ -1,75 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto" - "encoding/binary" - "hash" - "io" -) - -type concatKDF struct { - z, info []byte - i uint32 - cache []byte - hasher hash.Hash -} - -// NewConcatKDF builds a KDF reader based on the given inputs. -func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader { - buffer := make([]byte, len(algID)+len(ptyUInfo)+len(ptyVInfo)+len(supPubInfo)+len(supPrivInfo)) - n := 0 - n += copy(buffer, algID) - n += copy(buffer[n:], ptyUInfo) - n += copy(buffer[n:], ptyVInfo) - n += copy(buffer[n:], supPubInfo) - copy(buffer[n:], supPrivInfo) - - hasher := hash.New() - - return &concatKDF{ - z: z, - info: buffer, - hasher: hasher, - cache: []byte{}, - i: 1, - } -} - -func (ctx *concatKDF) Read(out []byte) (int, error) { - copied := copy(out, ctx.cache) - ctx.cache = ctx.cache[copied:] - - for copied < len(out) { - ctx.hasher.Reset() - - // Write on a hash.Hash never fails - _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i) - _, _ = ctx.hasher.Write(ctx.z) - _, _ = ctx.hasher.Write(ctx.info) - - hash := ctx.hasher.Sum(nil) - chunkCopied := copy(out[copied:], hash) - copied += chunkCopied - ctx.cache = hash[chunkCopied:] - - ctx.i++ - } - - return copied, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go deleted file mode 100644 index 48219b3e..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go +++ /dev/null @@ -1,150 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto" - "testing" -) - -// Taken from: https://tools.ietf.org/id/draft-ietf-jose-json-web-algorithms-38.txt -func TestVectorConcatKDF(t *testing.T) { - z := []byte{ - 158, 86, 217, 29, 129, 113, 53, 211, 114, 131, 66, 131, 191, 132, - 38, 156, 251, 49, 110, 163, 218, 128, 106, 72, 246, 218, 167, 121, - 140, 254, 144, 196} - - algID := []byte{0, 0, 0, 7, 65, 49, 50, 56, 71, 67, 77} - - ptyUInfo := []byte{0, 0, 0, 5, 65, 108, 105, 99, 101} - ptyVInfo := []byte{0, 0, 0, 3, 66, 111, 98} - - supPubInfo := []byte{0, 0, 0, 128} - supPrivInfo := []byte{} - - expected := []byte{ - 86, 170, 141, 234, 248, 35, 109, 32, 92, 34, 40, 205, 113, 167, 16, 26} - - ckdf := NewConcatKDF(crypto.SHA256, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo) - - out0 := make([]byte, 9) - out1 := make([]byte, 7) - - read0, err := ckdf.Read(out0) - if err != nil { - t.Error("error when reading from concat kdf reader", err) - return - } - - read1, err := ckdf.Read(out1) - if err != nil { - t.Error("error when reading from concat kdf reader", err) - return - } - - if read0+read1 != len(out0)+len(out1) { - t.Error("did not receive enough bytes from concat kdf reader") - return - } - - out := []byte{} - out = append(out, out0...) - out = append(out, out1...) - - if bytes.Compare(out, expected) != 0 { - t.Error("did not receive expected output from concat kdf reader") - return - } -} - -func TestCache(t *testing.T) { - z := []byte{ - 158, 86, 217, 29, 129, 113, 53, 211, 114, 131, 66, 131, 191, 132, - 38, 156, 251, 49, 110, 163, 218, 128, 106, 72, 246, 218, 167, 121, - 140, 254, 144, 196} - - algID := []byte{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4} - - ptyUInfo := []byte{1, 2, 3, 4} - ptyVInfo := []byte{4, 3, 2, 1} - - supPubInfo := []byte{} - supPrivInfo := []byte{} - - outputs := [][]byte{} - - // Read the same amount of data in different chunk sizes - chunkSizes := []int{1, 2, 4, 8, 16, 32, 64, 128, 256, 512} - - for _, c := range chunkSizes { - out := make([]byte, 1024) - reader := NewConcatKDF(crypto.SHA256, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo) - - for i := 0; i < 1024; i += c { - _, _ = reader.Read(out[i : i+c]) - } - - outputs = append(outputs, out) - } - - for i := range outputs { - if bytes.Compare(outputs[i], outputs[(i+1)%len(outputs)]) != 0 { - t.Error("not all outputs from KDF matched") - } - } -} - -func benchmarkKDF(b *testing.B, total int) { - z := []byte{ - 158, 86, 217, 29, 129, 113, 53, 211, 114, 131, 66, 131, 191, 132, - 38, 156, 251, 49, 110, 163, 218, 128, 106, 72, 246, 218, 167, 121, - 140, 254, 144, 196} - - algID := []byte{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4} - - ptyUInfo := []byte{1, 2, 3, 4} - ptyVInfo := []byte{4, 3, 2, 1} - - supPubInfo := []byte{} - supPrivInfo := []byte{} - - out := make([]byte, total) - reader := NewConcatKDF(crypto.SHA256, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo) - - b.ResetTimer() - b.SetBytes(int64(total)) - for i := 0; i < b.N; i++ { - _, _ = reader.Read(out) - } -} - -func BenchmarkConcatKDF_1k(b *testing.B) { - benchmarkKDF(b, 1024) -} - -func BenchmarkConcatKDF_64k(b *testing.B) { - benchmarkKDF(b, 65536) -} - -func BenchmarkConcatKDF_1MB(b *testing.B) { - benchmarkKDF(b, 1048576) -} - -func BenchmarkConcatKDF_64MB(b *testing.B) { - benchmarkKDF(b, 67108864) -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go deleted file mode 100644 index c6a5a821..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go +++ /dev/null @@ -1,51 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto" - "crypto/ecdsa" - "encoding/binary" -) - -// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA. -func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte { - // algId, partyUInfo, partyVInfo inputs must be prefixed with the length - algID := lengthPrefixed([]byte(alg)) - ptyUInfo := lengthPrefixed(apuData) - ptyVInfo := lengthPrefixed(apvData) - - // suppPubInfo is the encoded length of the output size in bits - supPubInfo := make([]byte, 4) - binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8) - - z, _ := priv.PublicKey.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes()) - reader := NewConcatKDF(crypto.SHA256, z.Bytes(), algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{}) - - key := make([]byte, size) - - // Read on the KDF will never fail - _, _ = reader.Read(key) - return key -} - -func lengthPrefixed(data []byte) []byte { - out := make([]byte, len(data)+4) - binary.BigEndian.PutUint32(out, uint32(len(data))) - copy(out[4:], data) - return out -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go deleted file mode 100644 index f92abb17..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go +++ /dev/null @@ -1,98 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "encoding/base64" - "math/big" - "testing" -) - -// Example keys from JWA, Appendix C -var aliceKey = &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - Curve: elliptic.P256(), - X: fromBase64Int("gI0GAILBdu7T53akrFmMyGcsF3n5dO7MmwNBHKW5SV0="), - Y: fromBase64Int("SLW_xSffzlPWrHEVI30DHM_4egVwt3NQqeUD7nMFpps="), - }, - D: fromBase64Int("0_NxaRPUMQoAJt50Gz8YiTr8gRTwyEaCumd-MToTmIo="), -} - -var bobKey = &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - Curve: elliptic.P256(), - X: fromBase64Int("weNJy2HscCSM6AEDTDg04biOvhFhyyWvOHQfeF_PxMQ="), - Y: fromBase64Int("e8lnCO-AlStT-NJVX-crhB7QRYhiix03illJOVAOyck="), - }, - D: fromBase64Int("VEmDZpDXXK8p8N0Cndsxs924q6nS1RXFASRl6BfUqdw="), -} - -// Build big int from base64-encoded string. Strips whitespace (for testing). -func fromBase64Int(data string) *big.Int { - val, err := base64.URLEncoding.DecodeString(data) - if err != nil { - panic("Invalid test data") - } - return new(big.Int).SetBytes(val) -} - -func TestVectorECDHES(t *testing.T) { - apuData := []byte("Alice") - apvData := []byte("Bob") - - expected := []byte{ - 86, 170, 141, 234, 248, 35, 109, 32, 92, 34, 40, 205, 113, 167, 16, 26} - - output := DeriveECDHES("A128GCM", apuData, apvData, bobKey, &aliceKey.PublicKey, 16) - - if bytes.Compare(output, expected) != 0 { - t.Error("output did not match what we expect, got", output, "wanted", expected) - } -} - -func BenchmarkECDHES_128(b *testing.B) { - apuData := []byte("APU") - apvData := []byte("APV") - - b.ResetTimer() - for i := 0; i < b.N; i++ { - DeriveECDHES("ID", apuData, apvData, bobKey, &aliceKey.PublicKey, 16) - } -} - -func BenchmarkECDHES_192(b *testing.B) { - apuData := []byte("APU") - apvData := []byte("APV") - - b.ResetTimer() - for i := 0; i < b.N; i++ { - DeriveECDHES("ID", apuData, apvData, bobKey, &aliceKey.PublicKey, 24) - } -} - -func BenchmarkECDHES_256(b *testing.B) { - apuData := []byte("APU") - apvData := []byte("APV") - - b.ResetTimer() - for i := 0; i < b.N; i++ { - DeriveECDHES("ID", apuData, apvData, bobKey, &aliceKey.PublicKey, 32) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go deleted file mode 100644 index 1d36d501..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go +++ /dev/null @@ -1,109 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto/cipher" - "crypto/subtle" - "encoding/binary" - "errors" -) - -var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6} - -// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher. -func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { - if len(cek)%8 != 0 { - return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks") - } - - n := len(cek) / 8 - r := make([][]byte, n) - - for i := range r { - r[i] = make([]byte, 8) - copy(r[i], cek[i*8:]) - } - - buffer := make([]byte, 16) - tBytes := make([]byte, 8) - copy(buffer, defaultIV) - - for t := 0; t < 6*n; t++ { - copy(buffer[8:], r[t%n]) - - block.Encrypt(buffer, buffer) - - binary.BigEndian.PutUint64(tBytes, uint64(t+1)) - - for i := 0; i < 8; i++ { - buffer[i] = buffer[i] ^ tBytes[i] - } - copy(r[t%n], buffer[8:]) - } - - out := make([]byte, (n+1)*8) - copy(out, buffer[:8]) - for i := range r { - copy(out[(i+1)*8:], r[i]) - } - - return out, nil -} - -// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher. -func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { - if len(ciphertext)%8 != 0 { - return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks") - } - - n := (len(ciphertext) / 8) - 1 - r := make([][]byte, n) - - for i := range r { - r[i] = make([]byte, 8) - copy(r[i], ciphertext[(i+1)*8:]) - } - - buffer := make([]byte, 16) - tBytes := make([]byte, 8) - copy(buffer[:8], ciphertext[:8]) - - for t := 6*n - 1; t >= 0; t-- { - binary.BigEndian.PutUint64(tBytes, uint64(t+1)) - - for i := 0; i < 8; i++ { - buffer[i] = buffer[i] ^ tBytes[i] - } - copy(buffer[8:], r[t%n]) - - block.Decrypt(buffer, buffer) - - copy(r[t%n], buffer[8:]) - } - - if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 { - return nil, errors.New("square/go-jose: failed to unwrap key") - } - - out := make([]byte, n*8) - for i := range r { - copy(out[i*8:], r[i]) - } - - return out, nil -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go deleted file mode 100644 index ceecf812..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go +++ /dev/null @@ -1,133 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto/aes" - "encoding/hex" - "testing" -) - -func TestAesKeyWrap(t *testing.T) { - // Test vectors from: http://csrc.nist.gov/groups/ST/toolkit/documents/kms/key-wrap.pdf - kek0, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F") - cek0, _ := hex.DecodeString("00112233445566778899AABBCCDDEEFF") - - expected0, _ := hex.DecodeString("1FA68B0A8112B447AEF34BD8FB5A7B829D3E862371D2CFE5") - - kek1, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F1011121314151617") - cek1, _ := hex.DecodeString("00112233445566778899AABBCCDDEEFF") - - expected1, _ := hex.DecodeString("96778B25AE6CA435F92B5B97C050AED2468AB8A17AD84E5D") - - kek2, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F") - cek2, _ := hex.DecodeString("00112233445566778899AABBCCDDEEFF0001020304050607") - - expected2, _ := hex.DecodeString("A8F9BC1612C68B3FF6E6F4FBE30E71E4769C8B80A32CB8958CD5D17D6B254DA1") - - block0, _ := aes.NewCipher(kek0) - block1, _ := aes.NewCipher(kek1) - block2, _ := aes.NewCipher(kek2) - - out0, _ := KeyWrap(block0, cek0) - out1, _ := KeyWrap(block1, cek1) - out2, _ := KeyWrap(block2, cek2) - - if bytes.Compare(out0, expected0) != 0 { - t.Error("output 0 not as expected, got", out0, "wanted", expected0) - } - - if bytes.Compare(out1, expected1) != 0 { - t.Error("output 1 not as expected, got", out1, "wanted", expected1) - } - - if bytes.Compare(out2, expected2) != 0 { - t.Error("output 2 not as expected, got", out2, "wanted", expected2) - } - - unwrap0, _ := KeyUnwrap(block0, out0) - unwrap1, _ := KeyUnwrap(block1, out1) - unwrap2, _ := KeyUnwrap(block2, out2) - - if bytes.Compare(unwrap0, cek0) != 0 { - t.Error("key unwrap did not return original input, got", unwrap0, "wanted", cek0) - } - - if bytes.Compare(unwrap1, cek1) != 0 { - t.Error("key unwrap did not return original input, got", unwrap1, "wanted", cek1) - } - - if bytes.Compare(unwrap2, cek2) != 0 { - t.Error("key unwrap did not return original input, got", unwrap2, "wanted", cek2) - } -} - -func TestAesKeyWrapInvalid(t *testing.T) { - kek, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F") - - // Invalid unwrap input (bit flipped) - input0, _ := hex.DecodeString("1EA68C1A8112B447AEF34BD8FB5A7B828D3E862371D2CFE5") - - block, _ := aes.NewCipher(kek) - - _, err := KeyUnwrap(block, input0) - if err == nil { - t.Error("key unwrap failed to detect invalid input") - } - - // Invalid unwrap input (truncated) - input1, _ := hex.DecodeString("1EA68C1A8112B447AEF34BD8FB5A7B828D3E862371D2CF") - - _, err = KeyUnwrap(block, input1) - if err == nil { - t.Error("key unwrap failed to detect truncated input") - } - - // Invalid wrap input (not multiple of 8) - input2, _ := hex.DecodeString("0123456789ABCD") - - _, err = KeyWrap(block, input2) - if err == nil { - t.Error("key wrap accepted invalid input") - } - -} - -func BenchmarkAesKeyWrap(b *testing.B) { - kek, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F") - key, _ := hex.DecodeString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF") - - block, _ := aes.NewCipher(kek) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - KeyWrap(block, key) - } -} - -func BenchmarkAesKeyUnwrap(b *testing.B) { - kek, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F") - input, _ := hex.DecodeString("1FA68B0A8112B447AEF34BD8FB5A7B829D3E862371D2CFE5") - - block, _ := aes.NewCipher(kek) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - KeyUnwrap(block, input) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter.go deleted file mode 100644 index f61af2c0..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter.go +++ /dev/null @@ -1,349 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto/ecdsa" - "crypto/rsa" - "fmt" - "reflect" -) - -// Encrypter represents an encrypter which produces an encrypted JWE object. -type Encrypter interface { - Encrypt(plaintext []byte) (*JsonWebEncryption, error) - EncryptWithAuthData(plaintext []byte, aad []byte) (*JsonWebEncryption, error) - SetCompression(alg CompressionAlgorithm) -} - -// MultiEncrypter represents an encrypter which supports multiple recipients. -type MultiEncrypter interface { - Encrypt(plaintext []byte) (*JsonWebEncryption, error) - EncryptWithAuthData(plaintext []byte, aad []byte) (*JsonWebEncryption, error) - SetCompression(alg CompressionAlgorithm) - AddRecipient(alg KeyAlgorithm, encryptionKey interface{}) error -} - -// A generic content cipher -type contentCipher interface { - keySize() int - encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) - decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) -} - -// A key generator (for generating/getting a CEK) -type keyGenerator interface { - keySize() int - genKey() ([]byte, rawHeader, error) -} - -// A generic key encrypter -type keyEncrypter interface { - encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key -} - -// A generic key decrypter -type keyDecrypter interface { - decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key -} - -// A generic encrypter based on the given key encrypter and content cipher. -type genericEncrypter struct { - contentAlg ContentEncryption - compressionAlg CompressionAlgorithm - cipher contentCipher - recipients []recipientKeyInfo - keyGenerator keyGenerator -} - -type recipientKeyInfo struct { - keyID string - keyAlg KeyAlgorithm - keyEncrypter keyEncrypter -} - -// SetCompression sets a compression algorithm to be applied before encryption. -func (ctx *genericEncrypter) SetCompression(compressionAlg CompressionAlgorithm) { - ctx.compressionAlg = compressionAlg -} - -// NewEncrypter creates an appropriate encrypter based on the key type -func NewEncrypter(alg KeyAlgorithm, enc ContentEncryption, encryptionKey interface{}) (Encrypter, error) { - encrypter := &genericEncrypter{ - contentAlg: enc, - compressionAlg: NONE, - recipients: []recipientKeyInfo{}, - cipher: getContentCipher(enc), - } - - if encrypter.cipher == nil { - return nil, ErrUnsupportedAlgorithm - } - - var keyID string - var rawKey interface{} - switch encryptionKey := encryptionKey.(type) { - case *JsonWebKey: - keyID = encryptionKey.KeyID - rawKey = encryptionKey.Key - default: - rawKey = encryptionKey - } - - switch alg { - case DIRECT: - // Direct encryption mode must be treated differently - if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { - return nil, ErrUnsupportedKeyType - } - encrypter.keyGenerator = staticKeyGenerator{ - key: rawKey.([]byte), - } - recipient, _ := newSymmetricRecipient(alg, rawKey.([]byte)) - if keyID != "" { - recipient.keyID = keyID - } - encrypter.recipients = []recipientKeyInfo{recipient} - return encrypter, nil - case ECDH_ES: - // ECDH-ES (w/o key wrapping) is similar to DIRECT mode - typeOf := reflect.TypeOf(rawKey) - if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { - return nil, ErrUnsupportedKeyType - } - encrypter.keyGenerator = ecKeyGenerator{ - size: encrypter.cipher.keySize(), - algID: string(enc), - publicKey: rawKey.(*ecdsa.PublicKey), - } - recipient, _ := newECDHRecipient(alg, rawKey.(*ecdsa.PublicKey)) - if keyID != "" { - recipient.keyID = keyID - } - encrypter.recipients = []recipientKeyInfo{recipient} - return encrypter, nil - default: - // Can just add a standard recipient - encrypter.keyGenerator = randomKeyGenerator{ - size: encrypter.cipher.keySize(), - } - err := encrypter.AddRecipient(alg, encryptionKey) - return encrypter, err - } -} - -// NewMultiEncrypter creates a multi-encrypter based on the given parameters -func NewMultiEncrypter(enc ContentEncryption) (MultiEncrypter, error) { - cipher := getContentCipher(enc) - - if cipher == nil { - return nil, ErrUnsupportedAlgorithm - } - - encrypter := &genericEncrypter{ - contentAlg: enc, - compressionAlg: NONE, - recipients: []recipientKeyInfo{}, - cipher: cipher, - keyGenerator: randomKeyGenerator{ - size: cipher.keySize(), - }, - } - - return encrypter, nil -} - -func (ctx *genericEncrypter) AddRecipient(alg KeyAlgorithm, encryptionKey interface{}) (err error) { - var recipient recipientKeyInfo - - switch alg { - case DIRECT, ECDH_ES: - return fmt.Errorf("square/go-jose: key algorithm '%s' not supported in multi-recipient mode", alg) - } - - recipient, err = makeJWERecipient(alg, encryptionKey) - - if err == nil { - ctx.recipients = append(ctx.recipients, recipient) - } - return err -} - -func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { - switch encryptionKey := encryptionKey.(type) { - case *rsa.PublicKey: - return newRSARecipient(alg, encryptionKey) - case *ecdsa.PublicKey: - return newECDHRecipient(alg, encryptionKey) - case []byte: - return newSymmetricRecipient(alg, encryptionKey) - case *JsonWebKey: - recipient, err := makeJWERecipient(alg, encryptionKey.Key) - if err == nil && encryptionKey.KeyID != "" { - recipient.keyID = encryptionKey.KeyID - } - return recipient, err - default: - return recipientKeyInfo{}, ErrUnsupportedKeyType - } -} - -// newDecrypter creates an appropriate decrypter based on the key type -func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { - switch decryptionKey := decryptionKey.(type) { - case *rsa.PrivateKey: - return &rsaDecrypterSigner{ - privateKey: decryptionKey, - }, nil - case *ecdsa.PrivateKey: - return &ecDecrypterSigner{ - privateKey: decryptionKey, - }, nil - case []byte: - return &symmetricKeyCipher{ - key: decryptionKey, - }, nil - case *JsonWebKey: - return newDecrypter(decryptionKey.Key) - default: - return nil, ErrUnsupportedKeyType - } -} - -// Implementation of encrypt method producing a JWE object. -func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JsonWebEncryption, error) { - return ctx.EncryptWithAuthData(plaintext, nil) -} - -// Implementation of encrypt method producing a JWE object. -func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JsonWebEncryption, error) { - obj := &JsonWebEncryption{} - obj.aad = aad - - obj.protected = &rawHeader{ - Enc: ctx.contentAlg, - } - obj.recipients = make([]recipientInfo, len(ctx.recipients)) - - if len(ctx.recipients) == 0 { - return nil, fmt.Errorf("square/go-jose: no recipients to encrypt to") - } - - cek, headers, err := ctx.keyGenerator.genKey() - if err != nil { - return nil, err - } - - obj.protected.merge(&headers) - - for i, info := range ctx.recipients { - recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) - if err != nil { - return nil, err - } - - recipient.header.Alg = string(info.keyAlg) - if info.keyID != "" { - recipient.header.Kid = info.keyID - } - obj.recipients[i] = recipient - } - - if len(ctx.recipients) == 1 { - // Move per-recipient headers into main protected header if there's - // only a single recipient. - obj.protected.merge(obj.recipients[0].header) - obj.recipients[0].header = nil - } - - if ctx.compressionAlg != NONE { - plaintext, err = compress(ctx.compressionAlg, plaintext) - if err != nil { - return nil, err - } - - obj.protected.Zip = ctx.compressionAlg - } - - authData := obj.computeAuthData() - parts, err := ctx.cipher.encrypt(cek, authData, plaintext) - if err != nil { - return nil, err - } - - obj.iv = parts.iv - obj.ciphertext = parts.ciphertext - obj.tag = parts.tag - - return obj, nil -} - -// Decrypt and validate the object and return the plaintext. -func (obj JsonWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { - headers := obj.mergedHeaders(nil) - - if len(headers.Crit) > 0 { - return nil, fmt.Errorf("square/go-jose: unsupported crit header") - } - - decrypter, err := newDecrypter(decryptionKey) - if err != nil { - return nil, err - } - - cipher := getContentCipher(headers.Enc) - if cipher == nil { - return nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(headers.Enc)) - } - - generator := randomKeyGenerator{ - size: cipher.keySize(), - } - - parts := &aeadParts{ - iv: obj.iv, - ciphertext: obj.ciphertext, - tag: obj.tag, - } - - authData := obj.computeAuthData() - - var plaintext []byte - for _, recipient := range obj.recipients { - recipientHeaders := obj.mergedHeaders(&recipient) - - cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) - if err == nil { - // Found a valid CEK -- let's try to decrypt. - plaintext, err = cipher.decrypt(cek, authData, parts) - if err == nil { - break - } - } - } - - if plaintext == nil { - return nil, ErrCryptoFailure - } - - // The "zip" header paramter may only be present in the protected header. - if obj.protected.Zip != "" { - plaintext, err = decompress(obj.protected.Zip, plaintext) - } - - return plaintext, err -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter_test.go deleted file mode 100644 index 86b8fc0a..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter_test.go +++ /dev/null @@ -1,784 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "fmt" - "io" - "testing" -) - -// We generate only a single RSA and EC key for testing, speeds up tests. -var rsaTestKey, _ = rsa.GenerateKey(rand.Reader, 2048) - -var ecTestKey256, _ = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) -var ecTestKey384, _ = ecdsa.GenerateKey(elliptic.P384(), rand.Reader) -var ecTestKey521, _ = ecdsa.GenerateKey(elliptic.P521(), rand.Reader) - -func RoundtripJWE(keyAlg KeyAlgorithm, encAlg ContentEncryption, compressionAlg CompressionAlgorithm, serializer func(*JsonWebEncryption) (string, error), corrupter func(*JsonWebEncryption) bool, aad []byte, encryptionKey interface{}, decryptionKey interface{}) error { - enc, err := NewEncrypter(keyAlg, encAlg, encryptionKey) - if err != nil { - return fmt.Errorf("error on new encrypter: %s", err) - } - - enc.SetCompression(compressionAlg) - - input := []byte("Lorem ipsum dolor sit amet") - obj, err := enc.EncryptWithAuthData(input, aad) - if err != nil { - return fmt.Errorf("error in encrypt: %s", err) - } - - msg, err := serializer(obj) - if err != nil { - return fmt.Errorf("error in serializer: %s", err) - } - - parsed, err := ParseEncrypted(msg) - if err != nil { - return fmt.Errorf("error in parse: %s, on msg '%s'", err, msg) - } - - // (Maybe) mangle object - skip := corrupter(parsed) - if skip { - return fmt.Errorf("corrupter indicated message should be skipped") - } - - if bytes.Compare(parsed.GetAuthData(), aad) != 0 { - return fmt.Errorf("auth data in parsed object does not match") - } - - output, err := parsed.Decrypt(decryptionKey) - if err != nil { - return fmt.Errorf("error on decrypt: %s", err) - } - - if bytes.Compare(input, output) != 0 { - return fmt.Errorf("Decrypted output does not match input, got '%s' but wanted '%s'", output, input) - } - - return nil -} - -func TestRoundtripsJWE(t *testing.T) { - // Test matrix - keyAlgs := []KeyAlgorithm{ - DIRECT, ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW, A128KW, A192KW, A256KW, - RSA1_5, RSA_OAEP, RSA_OAEP_256, A128GCMKW, A192GCMKW, A256GCMKW} - encAlgs := []ContentEncryption{A128GCM, A192GCM, A256GCM, A128CBC_HS256, A192CBC_HS384, A256CBC_HS512} - zipAlgs := []CompressionAlgorithm{NONE, DEFLATE} - - serializers := []func(*JsonWebEncryption) (string, error){ - func(obj *JsonWebEncryption) (string, error) { return obj.CompactSerialize() }, - func(obj *JsonWebEncryption) (string, error) { return obj.FullSerialize(), nil }, - } - - corrupter := func(obj *JsonWebEncryption) bool { return false } - - // Note: can't use AAD with compact serialization - aads := [][]byte{ - nil, - []byte("Ut enim ad minim veniam"), - } - - // Test all different configurations - for _, alg := range keyAlgs { - for _, enc := range encAlgs { - for _, key := range generateTestKeys(alg, enc) { - for _, zip := range zipAlgs { - for i, serializer := range serializers { - err := RoundtripJWE(alg, enc, zip, serializer, corrupter, aads[i], key.enc, key.dec) - if err != nil { - t.Error(err, alg, enc, zip, i) - } - } - } - } - } - } -} - -func TestRoundtripsJWECorrupted(t *testing.T) { - // Test matrix - keyAlgs := []KeyAlgorithm{DIRECT, ECDH_ES, ECDH_ES_A128KW, A128KW, RSA1_5, RSA_OAEP, RSA_OAEP_256, A128GCMKW} - encAlgs := []ContentEncryption{A128GCM, A192GCM, A256GCM, A128CBC_HS256, A192CBC_HS384, A256CBC_HS512} - zipAlgs := []CompressionAlgorithm{NONE, DEFLATE} - - serializers := []func(*JsonWebEncryption) (string, error){ - func(obj *JsonWebEncryption) (string, error) { return obj.CompactSerialize() }, - func(obj *JsonWebEncryption) (string, error) { return obj.FullSerialize(), nil }, - } - - bitflip := func(slice []byte) bool { - if len(slice) > 0 { - slice[0] ^= 0xFF - return false - } - return true - } - - corrupters := []func(*JsonWebEncryption) bool{ - func(obj *JsonWebEncryption) bool { - // Set invalid ciphertext - return bitflip(obj.ciphertext) - }, - func(obj *JsonWebEncryption) bool { - // Set invalid auth tag - return bitflip(obj.tag) - }, - func(obj *JsonWebEncryption) bool { - // Set invalid AAD - return bitflip(obj.aad) - }, - func(obj *JsonWebEncryption) bool { - // Mess with encrypted key - return bitflip(obj.recipients[0].encryptedKey) - }, - func(obj *JsonWebEncryption) bool { - // Mess with GCM-KW auth tag - return bitflip(obj.protected.Tag.bytes()) - }, - } - - // Note: can't use AAD with compact serialization - aads := [][]byte{ - nil, - []byte("Ut enim ad minim veniam"), - } - - // Test all different configurations - for _, alg := range keyAlgs { - for _, enc := range encAlgs { - for _, key := range generateTestKeys(alg, enc) { - for _, zip := range zipAlgs { - for i, serializer := range serializers { - for j, corrupter := range corrupters { - err := RoundtripJWE(alg, enc, zip, serializer, corrupter, aads[i], key.enc, key.dec) - if err == nil { - t.Error("failed to detect corrupt data", err, alg, enc, zip, i, j) - } - } - } - } - } - } - } -} - -func TestEncrypterWithJWKAndKeyID(t *testing.T) { - enc, err := NewEncrypter(A128KW, A128GCM, &JsonWebKey{ - KeyID: "test-id", - Key: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, - }) - if err != nil { - t.Error(err) - } - - ciphertext, _ := enc.Encrypt([]byte("Lorem ipsum dolor sit amet")) - - serialized1, _ := ciphertext.CompactSerialize() - serialized2 := ciphertext.FullSerialize() - - parsed1, _ := ParseEncrypted(serialized1) - parsed2, _ := ParseEncrypted(serialized2) - - if parsed1.Header.KeyID != "test-id" { - t.Errorf("expected message to have key id from JWK, but found '%s' instead", parsed1.Header.KeyID) - } - if parsed2.Header.KeyID != "test-id" { - t.Errorf("expected message to have key id from JWK, but found '%s' instead", parsed2.Header.KeyID) - } -} - -func TestEncrypterWithBrokenRand(t *testing.T) { - keyAlgs := []KeyAlgorithm{ECDH_ES_A128KW, A128KW, RSA1_5, RSA_OAEP, RSA_OAEP_256, A128GCMKW} - encAlgs := []ContentEncryption{A128GCM, A192GCM, A256GCM, A128CBC_HS256, A192CBC_HS384, A256CBC_HS512} - - serializer := func(obj *JsonWebEncryption) (string, error) { return obj.CompactSerialize() } - corrupter := func(obj *JsonWebEncryption) bool { return false } - - // Break rand reader - readers := []func() io.Reader{ - // Totally broken - func() io.Reader { return bytes.NewReader([]byte{}) }, - // Not enough bytes - func() io.Reader { return io.LimitReader(rand.Reader, 20) }, - } - - defer resetRandReader() - - for _, alg := range keyAlgs { - for _, enc := range encAlgs { - for _, key := range generateTestKeys(alg, enc) { - for i, getReader := range readers { - randReader = getReader() - err := RoundtripJWE(alg, enc, NONE, serializer, corrupter, nil, key.enc, key.dec) - if err == nil { - t.Error("encrypter should fail if rand is broken", i) - } - } - } - } - } -} - -func TestNewEncrypterErrors(t *testing.T) { - _, err := NewEncrypter("XYZ", "XYZ", nil) - if err == nil { - t.Error("was able to instantiate encrypter with invalid cipher") - } - - _, err = NewMultiEncrypter("XYZ") - if err == nil { - t.Error("was able to instantiate multi-encrypter with invalid cipher") - } - - _, err = NewEncrypter(DIRECT, A128GCM, nil) - if err == nil { - t.Error("was able to instantiate encrypter with invalid direct key") - } - - _, err = NewEncrypter(ECDH_ES, A128GCM, nil) - if err == nil { - t.Error("was able to instantiate encrypter with invalid EC key") - } -} - -func TestMultiRecipientJWE(t *testing.T) { - enc, err := NewMultiEncrypter(A128GCM) - if err != nil { - panic(err) - } - - err = enc.AddRecipient(RSA_OAEP, &rsaTestKey.PublicKey) - if err != nil { - t.Error("error when adding RSA recipient", err) - } - - sharedKey := []byte{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - } - - err = enc.AddRecipient(A256GCMKW, sharedKey) - if err != nil { - t.Error("error when adding AES recipient: ", err) - return - } - - input := []byte("Lorem ipsum dolor sit amet") - obj, err := enc.Encrypt(input) - if err != nil { - t.Error("error in encrypt: ", err) - return - } - - msg := obj.FullSerialize() - - parsed, err := ParseEncrypted(msg) - if err != nil { - t.Error("error in parse: ", err) - return - } - - output, err := parsed.Decrypt(rsaTestKey) - if err != nil { - t.Error("error on decrypt with RSA: ", err) - return - } - - if bytes.Compare(input, output) != 0 { - t.Error("Decrypted output does not match input: ", output, input) - return - } - - output, err = parsed.Decrypt(sharedKey) - if err != nil { - t.Error("error on decrypt with AES: ", err) - return - } - - if bytes.Compare(input, output) != 0 { - t.Error("Decrypted output does not match input", output, input) - return - } -} - -func TestMultiRecipientErrors(t *testing.T) { - enc, err := NewMultiEncrypter(A128GCM) - if err != nil { - panic(err) - } - - input := []byte("Lorem ipsum dolor sit amet") - _, err = enc.Encrypt(input) - if err == nil { - t.Error("should fail when encrypting to zero recipients") - } - - err = enc.AddRecipient(DIRECT, nil) - if err == nil { - t.Error("should reject DIRECT mode when encrypting to multiple recipients") - } - - err = enc.AddRecipient(ECDH_ES, nil) - if err == nil { - t.Error("should reject ECDH_ES mode when encrypting to multiple recipients") - } - - err = enc.AddRecipient(RSA1_5, nil) - if err == nil { - t.Error("should reject invalid recipient key") - } -} - -type testKey struct { - enc, dec interface{} -} - -func symmetricTestKey(size int) []testKey { - key, _, _ := randomKeyGenerator{size: size}.genKey() - - return []testKey{ - testKey{ - enc: key, - dec: key, - }, - testKey{ - enc: &JsonWebKey{KeyID: "test", Key: key}, - dec: &JsonWebKey{KeyID: "test", Key: key}, - }, - } -} - -func generateTestKeys(keyAlg KeyAlgorithm, encAlg ContentEncryption) []testKey { - switch keyAlg { - case DIRECT: - return symmetricTestKey(getContentCipher(encAlg).keySize()) - case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - return []testKey{ - testKey{ - dec: ecTestKey256, - enc: &ecTestKey256.PublicKey, - }, - testKey{ - dec: ecTestKey384, - enc: &ecTestKey384.PublicKey, - }, - testKey{ - dec: ecTestKey521, - enc: &ecTestKey521.PublicKey, - }, - testKey{ - dec: &JsonWebKey{KeyID: "test", Key: ecTestKey256}, - enc: &JsonWebKey{KeyID: "test", Key: &ecTestKey256.PublicKey}, - }, - } - case A128GCMKW, A128KW: - return symmetricTestKey(16) - case A192GCMKW, A192KW: - return symmetricTestKey(24) - case A256GCMKW, A256KW: - return symmetricTestKey(32) - case RSA1_5, RSA_OAEP, RSA_OAEP_256: - return []testKey{testKey{ - dec: rsaTestKey, - enc: &rsaTestKey.PublicKey, - }} - } - - panic("Must update test case") -} - -func RunRoundtripsJWE(b *testing.B, alg KeyAlgorithm, enc ContentEncryption, zip CompressionAlgorithm, priv, pub interface{}) { - serializer := func(obj *JsonWebEncryption) (string, error) { - return obj.CompactSerialize() - } - - corrupter := func(obj *JsonWebEncryption) bool { return false } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := RoundtripJWE(alg, enc, zip, serializer, corrupter, nil, pub, priv) - if err != nil { - b.Error(err) - } - } -} - -var ( - chunks = map[string][]byte{ - "1B": make([]byte, 1), - "64B": make([]byte, 64), - "1KB": make([]byte, 1024), - "64KB": make([]byte, 65536), - "1MB": make([]byte, 1048576), - "64MB": make([]byte, 67108864), - } - - symKey, _, _ = randomKeyGenerator{size: 32}.genKey() - - encrypters = map[string]Encrypter{ - "OAEPAndGCM": mustEncrypter(RSA_OAEP, A128GCM, &rsaTestKey.PublicKey), - "PKCSAndGCM": mustEncrypter(RSA1_5, A128GCM, &rsaTestKey.PublicKey), - "OAEPAndCBC": mustEncrypter(RSA_OAEP, A128CBC_HS256, &rsaTestKey.PublicKey), - "PKCSAndCBC": mustEncrypter(RSA1_5, A128CBC_HS256, &rsaTestKey.PublicKey), - "DirectGCM128": mustEncrypter(DIRECT, A128GCM, symKey), - "DirectCBC128": mustEncrypter(DIRECT, A128CBC_HS256, symKey), - "DirectGCM256": mustEncrypter(DIRECT, A256GCM, symKey), - "DirectCBC256": mustEncrypter(DIRECT, A256CBC_HS512, symKey), - "AESKWAndGCM128": mustEncrypter(A128KW, A128GCM, symKey), - "AESKWAndCBC256": mustEncrypter(A256KW, A256GCM, symKey), - "ECDHOnP256AndGCM128": mustEncrypter(ECDH_ES, A128GCM, &ecTestKey256.PublicKey), - "ECDHOnP384AndGCM128": mustEncrypter(ECDH_ES, A128GCM, &ecTestKey384.PublicKey), - "ECDHOnP521AndGCM128": mustEncrypter(ECDH_ES, A128GCM, &ecTestKey521.PublicKey), - } -) - -func BenchmarkEncrypt1BWithOAEPAndGCM(b *testing.B) { benchEncrypt("1B", "OAEPAndGCM", b) } -func BenchmarkEncrypt64BWithOAEPAndGCM(b *testing.B) { benchEncrypt("64B", "OAEPAndGCM", b) } -func BenchmarkEncrypt1KBWithOAEPAndGCM(b *testing.B) { benchEncrypt("1KB", "OAEPAndGCM", b) } -func BenchmarkEncrypt64KBWithOAEPAndGCM(b *testing.B) { benchEncrypt("64KB", "OAEPAndGCM", b) } -func BenchmarkEncrypt1MBWithOAEPAndGCM(b *testing.B) { benchEncrypt("1MB", "OAEPAndGCM", b) } -func BenchmarkEncrypt64MBWithOAEPAndGCM(b *testing.B) { benchEncrypt("64MB", "OAEPAndGCM", b) } - -func BenchmarkEncrypt1BWithPKCSAndGCM(b *testing.B) { benchEncrypt("1B", "PKCSAndGCM", b) } -func BenchmarkEncrypt64BWithPKCSAndGCM(b *testing.B) { benchEncrypt("64B", "PKCSAndGCM", b) } -func BenchmarkEncrypt1KBWithPKCSAndGCM(b *testing.B) { benchEncrypt("1KB", "PKCSAndGCM", b) } -func BenchmarkEncrypt64KBWithPKCSAndGCM(b *testing.B) { benchEncrypt("64KB", "PKCSAndGCM", b) } -func BenchmarkEncrypt1MBWithPKCSAndGCM(b *testing.B) { benchEncrypt("1MB", "PKCSAndGCM", b) } -func BenchmarkEncrypt64MBWithPKCSAndGCM(b *testing.B) { benchEncrypt("64MB", "PKCSAndGCM", b) } - -func BenchmarkEncrypt1BWithOAEPAndCBC(b *testing.B) { benchEncrypt("1B", "OAEPAndCBC", b) } -func BenchmarkEncrypt64BWithOAEPAndCBC(b *testing.B) { benchEncrypt("64B", "OAEPAndCBC", b) } -func BenchmarkEncrypt1KBWithOAEPAndCBC(b *testing.B) { benchEncrypt("1KB", "OAEPAndCBC", b) } -func BenchmarkEncrypt64KBWithOAEPAndCBC(b *testing.B) { benchEncrypt("64KB", "OAEPAndCBC", b) } -func BenchmarkEncrypt1MBWithOAEPAndCBC(b *testing.B) { benchEncrypt("1MB", "OAEPAndCBC", b) } -func BenchmarkEncrypt64MBWithOAEPAndCBC(b *testing.B) { benchEncrypt("64MB", "OAEPAndCBC", b) } - -func BenchmarkEncrypt1BWithPKCSAndCBC(b *testing.B) { benchEncrypt("1B", "PKCSAndCBC", b) } -func BenchmarkEncrypt64BWithPKCSAndCBC(b *testing.B) { benchEncrypt("64B", "PKCSAndCBC", b) } -func BenchmarkEncrypt1KBWithPKCSAndCBC(b *testing.B) { benchEncrypt("1KB", "PKCSAndCBC", b) } -func BenchmarkEncrypt64KBWithPKCSAndCBC(b *testing.B) { benchEncrypt("64KB", "PKCSAndCBC", b) } -func BenchmarkEncrypt1MBWithPKCSAndCBC(b *testing.B) { benchEncrypt("1MB", "PKCSAndCBC", b) } -func BenchmarkEncrypt64MBWithPKCSAndCBC(b *testing.B) { benchEncrypt("64MB", "PKCSAndCBC", b) } - -func BenchmarkEncrypt1BWithDirectGCM128(b *testing.B) { benchEncrypt("1B", "DirectGCM128", b) } -func BenchmarkEncrypt64BWithDirectGCM128(b *testing.B) { benchEncrypt("64B", "DirectGCM128", b) } -func BenchmarkEncrypt1KBWithDirectGCM128(b *testing.B) { benchEncrypt("1KB", "DirectGCM128", b) } -func BenchmarkEncrypt64KBWithDirectGCM128(b *testing.B) { benchEncrypt("64KB", "DirectGCM128", b) } -func BenchmarkEncrypt1MBWithDirectGCM128(b *testing.B) { benchEncrypt("1MB", "DirectGCM128", b) } -func BenchmarkEncrypt64MBWithDirectGCM128(b *testing.B) { benchEncrypt("64MB", "DirectGCM128", b) } - -func BenchmarkEncrypt1BWithDirectCBC128(b *testing.B) { benchEncrypt("1B", "DirectCBC128", b) } -func BenchmarkEncrypt64BWithDirectCBC128(b *testing.B) { benchEncrypt("64B", "DirectCBC128", b) } -func BenchmarkEncrypt1KBWithDirectCBC128(b *testing.B) { benchEncrypt("1KB", "DirectCBC128", b) } -func BenchmarkEncrypt64KBWithDirectCBC128(b *testing.B) { benchEncrypt("64KB", "DirectCBC128", b) } -func BenchmarkEncrypt1MBWithDirectCBC128(b *testing.B) { benchEncrypt("1MB", "DirectCBC128", b) } -func BenchmarkEncrypt64MBWithDirectCBC128(b *testing.B) { benchEncrypt("64MB", "DirectCBC128", b) } - -func BenchmarkEncrypt1BWithDirectGCM256(b *testing.B) { benchEncrypt("1B", "DirectGCM256", b) } -func BenchmarkEncrypt64BWithDirectGCM256(b *testing.B) { benchEncrypt("64B", "DirectGCM256", b) } -func BenchmarkEncrypt1KBWithDirectGCM256(b *testing.B) { benchEncrypt("1KB", "DirectGCM256", b) } -func BenchmarkEncrypt64KBWithDirectGCM256(b *testing.B) { benchEncrypt("64KB", "DirectGCM256", b) } -func BenchmarkEncrypt1MBWithDirectGCM256(b *testing.B) { benchEncrypt("1MB", "DirectGCM256", b) } -func BenchmarkEncrypt64MBWithDirectGCM256(b *testing.B) { benchEncrypt("64MB", "DirectGCM256", b) } - -func BenchmarkEncrypt1BWithDirectCBC256(b *testing.B) { benchEncrypt("1B", "DirectCBC256", b) } -func BenchmarkEncrypt64BWithDirectCBC256(b *testing.B) { benchEncrypt("64B", "DirectCBC256", b) } -func BenchmarkEncrypt1KBWithDirectCBC256(b *testing.B) { benchEncrypt("1KB", "DirectCBC256", b) } -func BenchmarkEncrypt64KBWithDirectCBC256(b *testing.B) { benchEncrypt("64KB", "DirectCBC256", b) } -func BenchmarkEncrypt1MBWithDirectCBC256(b *testing.B) { benchEncrypt("1MB", "DirectCBC256", b) } -func BenchmarkEncrypt64MBWithDirectCBC256(b *testing.B) { benchEncrypt("64MB", "DirectCBC256", b) } - -func BenchmarkEncrypt1BWithAESKWAndGCM128(b *testing.B) { benchEncrypt("1B", "AESKWAndGCM128", b) } -func BenchmarkEncrypt64BWithAESKWAndGCM128(b *testing.B) { benchEncrypt("64B", "AESKWAndGCM128", b) } -func BenchmarkEncrypt1KBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("1KB", "AESKWAndGCM128", b) } -func BenchmarkEncrypt64KBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("64KB", "AESKWAndGCM128", b) } -func BenchmarkEncrypt1MBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("1MB", "AESKWAndGCM128", b) } -func BenchmarkEncrypt64MBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("64MB", "AESKWAndGCM128", b) } - -func BenchmarkEncrypt1BWithAESKWAndCBC256(b *testing.B) { benchEncrypt("1B", "AESKWAndCBC256", b) } -func BenchmarkEncrypt64BWithAESKWAndCBC256(b *testing.B) { benchEncrypt("64B", "AESKWAndCBC256", b) } -func BenchmarkEncrypt1KBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("1KB", "AESKWAndCBC256", b) } -func BenchmarkEncrypt64KBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("64KB", "AESKWAndCBC256", b) } -func BenchmarkEncrypt1MBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("1MB", "AESKWAndCBC256", b) } -func BenchmarkEncrypt64MBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("64MB", "AESKWAndCBC256", b) } - -func BenchmarkEncrypt1BWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("1B", "ECDHOnP256AndGCM128", b) -} -func BenchmarkEncrypt64BWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("64B", "ECDHOnP256AndGCM128", b) -} -func BenchmarkEncrypt1KBWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("1KB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkEncrypt64KBWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("64KB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkEncrypt1MBWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("1MB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkEncrypt64MBWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("64MB", "ECDHOnP256AndGCM128", b) -} - -func BenchmarkEncrypt1BWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("1B", "ECDHOnP384AndGCM128", b) -} -func BenchmarkEncrypt64BWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("64B", "ECDHOnP384AndGCM128", b) -} -func BenchmarkEncrypt1KBWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("1KB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkEncrypt64KBWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("64KB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkEncrypt1MBWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("1MB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkEncrypt64MBWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("64MB", "ECDHOnP384AndGCM128", b) -} - -func BenchmarkEncrypt1BWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("1B", "ECDHOnP521AndGCM128", b) -} -func BenchmarkEncrypt64BWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("64B", "ECDHOnP521AndGCM128", b) -} -func BenchmarkEncrypt1KBWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("1KB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkEncrypt64KBWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("64KB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkEncrypt1MBWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("1MB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkEncrypt64MBWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("64MB", "ECDHOnP521AndGCM128", b) -} - -func benchEncrypt(chunkKey, primKey string, b *testing.B) { - data, ok := chunks[chunkKey] - if !ok { - b.Fatalf("unknown chunk size %s", chunkKey) - } - - enc, ok := encrypters[primKey] - if !ok { - b.Fatalf("unknown encrypter %s", primKey) - } - - b.SetBytes(int64(len(data))) - for i := 0; i < b.N; i++ { - enc.Encrypt(data) - } -} - -var ( - decryptionKeys = map[string]interface{}{ - "OAEPAndGCM": rsaTestKey, - "PKCSAndGCM": rsaTestKey, - "OAEPAndCBC": rsaTestKey, - "PKCSAndCBC": rsaTestKey, - - "DirectGCM128": symKey, - "DirectCBC128": symKey, - "DirectGCM256": symKey, - "DirectCBC256": symKey, - - "AESKWAndGCM128": symKey, - "AESKWAndCBC256": symKey, - - "ECDHOnP256AndGCM128": ecTestKey256, - "ECDHOnP384AndGCM128": ecTestKey384, - "ECDHOnP521AndGCM128": ecTestKey521, - } -) - -func BenchmarkDecrypt1BWithOAEPAndGCM(b *testing.B) { benchDecrypt("1B", "OAEPAndGCM", b) } -func BenchmarkDecrypt64BWithOAEPAndGCM(b *testing.B) { benchDecrypt("64B", "OAEPAndGCM", b) } -func BenchmarkDecrypt1KBWithOAEPAndGCM(b *testing.B) { benchDecrypt("1KB", "OAEPAndGCM", b) } -func BenchmarkDecrypt64KBWithOAEPAndGCM(b *testing.B) { benchDecrypt("64KB", "OAEPAndGCM", b) } -func BenchmarkDecrypt1MBWithOAEPAndGCM(b *testing.B) { benchDecrypt("1MB", "OAEPAndGCM", b) } -func BenchmarkDecrypt64MBWithOAEPAndGCM(b *testing.B) { benchDecrypt("64MB", "OAEPAndGCM", b) } - -func BenchmarkDecrypt1BWithPKCSAndGCM(b *testing.B) { benchDecrypt("1B", "PKCSAndGCM", b) } -func BenchmarkDecrypt64BWithPKCSAndGCM(b *testing.B) { benchDecrypt("64B", "PKCSAndGCM", b) } -func BenchmarkDecrypt1KBWithPKCSAndGCM(b *testing.B) { benchDecrypt("1KB", "PKCSAndGCM", b) } -func BenchmarkDecrypt64KBWithPKCSAndGCM(b *testing.B) { benchDecrypt("64KB", "PKCSAndGCM", b) } -func BenchmarkDecrypt1MBWithPKCSAndGCM(b *testing.B) { benchDecrypt("1MB", "PKCSAndGCM", b) } -func BenchmarkDecrypt64MBWithPKCSAndGCM(b *testing.B) { benchDecrypt("64MB", "PKCSAndGCM", b) } - -func BenchmarkDecrypt1BWithOAEPAndCBC(b *testing.B) { benchDecrypt("1B", "OAEPAndCBC", b) } -func BenchmarkDecrypt64BWithOAEPAndCBC(b *testing.B) { benchDecrypt("64B", "OAEPAndCBC", b) } -func BenchmarkDecrypt1KBWithOAEPAndCBC(b *testing.B) { benchDecrypt("1KB", "OAEPAndCBC", b) } -func BenchmarkDecrypt64KBWithOAEPAndCBC(b *testing.B) { benchDecrypt("64KB", "OAEPAndCBC", b) } -func BenchmarkDecrypt1MBWithOAEPAndCBC(b *testing.B) { benchDecrypt("1MB", "OAEPAndCBC", b) } -func BenchmarkDecrypt64MBWithOAEPAndCBC(b *testing.B) { benchDecrypt("64MB", "OAEPAndCBC", b) } - -func BenchmarkDecrypt1BWithPKCSAndCBC(b *testing.B) { benchDecrypt("1B", "PKCSAndCBC", b) } -func BenchmarkDecrypt64BWithPKCSAndCBC(b *testing.B) { benchDecrypt("64B", "PKCSAndCBC", b) } -func BenchmarkDecrypt1KBWithPKCSAndCBC(b *testing.B) { benchDecrypt("1KB", "PKCSAndCBC", b) } -func BenchmarkDecrypt64KBWithPKCSAndCBC(b *testing.B) { benchDecrypt("64KB", "PKCSAndCBC", b) } -func BenchmarkDecrypt1MBWithPKCSAndCBC(b *testing.B) { benchDecrypt("1MB", "PKCSAndCBC", b) } -func BenchmarkDecrypt64MBWithPKCSAndCBC(b *testing.B) { benchDecrypt("64MB", "PKCSAndCBC", b) } - -func BenchmarkDecrypt1BWithDirectGCM128(b *testing.B) { benchDecrypt("1B", "DirectGCM128", b) } -func BenchmarkDecrypt64BWithDirectGCM128(b *testing.B) { benchDecrypt("64B", "DirectGCM128", b) } -func BenchmarkDecrypt1KBWithDirectGCM128(b *testing.B) { benchDecrypt("1KB", "DirectGCM128", b) } -func BenchmarkDecrypt64KBWithDirectGCM128(b *testing.B) { benchDecrypt("64KB", "DirectGCM128", b) } -func BenchmarkDecrypt1MBWithDirectGCM128(b *testing.B) { benchDecrypt("1MB", "DirectGCM128", b) } -func BenchmarkDecrypt64MBWithDirectGCM128(b *testing.B) { benchDecrypt("64MB", "DirectGCM128", b) } - -func BenchmarkDecrypt1BWithDirectCBC128(b *testing.B) { benchDecrypt("1B", "DirectCBC128", b) } -func BenchmarkDecrypt64BWithDirectCBC128(b *testing.B) { benchDecrypt("64B", "DirectCBC128", b) } -func BenchmarkDecrypt1KBWithDirectCBC128(b *testing.B) { benchDecrypt("1KB", "DirectCBC128", b) } -func BenchmarkDecrypt64KBWithDirectCBC128(b *testing.B) { benchDecrypt("64KB", "DirectCBC128", b) } -func BenchmarkDecrypt1MBWithDirectCBC128(b *testing.B) { benchDecrypt("1MB", "DirectCBC128", b) } -func BenchmarkDecrypt64MBWithDirectCBC128(b *testing.B) { benchDecrypt("64MB", "DirectCBC128", b) } - -func BenchmarkDecrypt1BWithDirectGCM256(b *testing.B) { benchDecrypt("1B", "DirectGCM256", b) } -func BenchmarkDecrypt64BWithDirectGCM256(b *testing.B) { benchDecrypt("64B", "DirectGCM256", b) } -func BenchmarkDecrypt1KBWithDirectGCM256(b *testing.B) { benchDecrypt("1KB", "DirectGCM256", b) } -func BenchmarkDecrypt64KBWithDirectGCM256(b *testing.B) { benchDecrypt("64KB", "DirectGCM256", b) } -func BenchmarkDecrypt1MBWithDirectGCM256(b *testing.B) { benchDecrypt("1MB", "DirectGCM256", b) } -func BenchmarkDecrypt64MBWithDirectGCM256(b *testing.B) { benchDecrypt("64MB", "DirectGCM256", b) } - -func BenchmarkDecrypt1BWithDirectCBC256(b *testing.B) { benchDecrypt("1B", "DirectCBC256", b) } -func BenchmarkDecrypt64BWithDirectCBC256(b *testing.B) { benchDecrypt("64B", "DirectCBC256", b) } -func BenchmarkDecrypt1KBWithDirectCBC256(b *testing.B) { benchDecrypt("1KB", "DirectCBC256", b) } -func BenchmarkDecrypt64KBWithDirectCBC256(b *testing.B) { benchDecrypt("64KB", "DirectCBC256", b) } -func BenchmarkDecrypt1MBWithDirectCBC256(b *testing.B) { benchDecrypt("1MB", "DirectCBC256", b) } -func BenchmarkDecrypt64MBWithDirectCBC256(b *testing.B) { benchDecrypt("64MB", "DirectCBC256", b) } - -func BenchmarkDecrypt1BWithAESKWAndGCM128(b *testing.B) { benchDecrypt("1B", "AESKWAndGCM128", b) } -func BenchmarkDecrypt64BWithAESKWAndGCM128(b *testing.B) { benchDecrypt("64B", "AESKWAndGCM128", b) } -func BenchmarkDecrypt1KBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("1KB", "AESKWAndGCM128", b) } -func BenchmarkDecrypt64KBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("64KB", "AESKWAndGCM128", b) } -func BenchmarkDecrypt1MBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("1MB", "AESKWAndGCM128", b) } -func BenchmarkDecrypt64MBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("64MB", "AESKWAndGCM128", b) } - -func BenchmarkDecrypt1BWithAESKWAndCBC256(b *testing.B) { benchDecrypt("1B", "AESKWAndCBC256", b) } -func BenchmarkDecrypt64BWithAESKWAndCBC256(b *testing.B) { benchDecrypt("64B", "AESKWAndCBC256", b) } -func BenchmarkDecrypt1KBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("1KB", "AESKWAndCBC256", b) } -func BenchmarkDecrypt64KBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("64KB", "AESKWAndCBC256", b) } -func BenchmarkDecrypt1MBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("1MB", "AESKWAndCBC256", b) } -func BenchmarkDecrypt64MBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("64MB", "AESKWAndCBC256", b) } - -func BenchmarkDecrypt1BWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("1B", "ECDHOnP256AndGCM128", b) -} -func BenchmarkDecrypt64BWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("64B", "ECDHOnP256AndGCM128", b) -} -func BenchmarkDecrypt1KBWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("1KB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkDecrypt64KBWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("64KB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkDecrypt1MBWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("1MB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkDecrypt64MBWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("64MB", "ECDHOnP256AndGCM128", b) -} - -func BenchmarkDecrypt1BWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("1B", "ECDHOnP384AndGCM128", b) -} -func BenchmarkDecrypt64BWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("64B", "ECDHOnP384AndGCM128", b) -} -func BenchmarkDecrypt1KBWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("1KB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkDecrypt64KBWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("64KB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkDecrypt1MBWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("1MB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkDecrypt64MBWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("64MB", "ECDHOnP384AndGCM128", b) -} - -func BenchmarkDecrypt1BWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("1B", "ECDHOnP521AndGCM128", b) -} -func BenchmarkDecrypt64BWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("64B", "ECDHOnP521AndGCM128", b) -} -func BenchmarkDecrypt1KBWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("1KB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkDecrypt64KBWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("64KB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkDecrypt1MBWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("1MB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkDecrypt64MBWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("64MB", "ECDHOnP521AndGCM128", b) -} - -func benchDecrypt(chunkKey, primKey string, b *testing.B) { - chunk, ok := chunks[chunkKey] - if !ok { - b.Fatalf("unknown chunk size %s", chunkKey) - } - - enc, ok := encrypters[primKey] - if !ok { - b.Fatalf("unknown encrypter %s", primKey) - } - - dec, ok := decryptionKeys[primKey] - if !ok { - b.Fatalf("unknown decryption key %s", primKey) - } - - data, err := enc.Encrypt(chunk) - if err != nil { - b.Fatal(err) - } - - b.SetBytes(int64(len(chunk))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - data.Decrypt(dec) - } -} - -func mustEncrypter(keyAlg KeyAlgorithm, encAlg ContentEncryption, encryptionKey interface{}) Encrypter { - enc, err := NewEncrypter(keyAlg, encAlg, encryptionKey) - if err != nil { - panic(err) - } - return enc -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc.go deleted file mode 100644 index b4cd1e98..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - -Package jose aims to provide an implementation of the Javascript Object Signing -and Encryption set of standards. For the moment, it mainly focuses on -encryption and signing based on the JSON Web Encryption and JSON Web Signature -standards. The library supports both the compact and full serialization -formats, and has optional support for multiple recipients. - -*/ -package jose // import "gopkg.in/square/go-jose.v1" diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc_test.go deleted file mode 100644 index 50468295..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc_test.go +++ /dev/null @@ -1,226 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto/ecdsa" - "crypto/rand" - "crypto/rsa" - "fmt" -) - -// Dummy encrypter for use in examples -var encrypter, _ = NewEncrypter(DIRECT, A128GCM, []byte{}) - -func Example_jWE() { - // Generate a public/private key pair to use for this example. The library - // also provides two utility functions (LoadPublicKey and LoadPrivateKey) - // that can be used to load keys from PEM/DER-encoded data. - privateKey, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - panic(err) - } - - // Instantiate an encrypter using RSA-OAEP with AES128-GCM. An error would - // indicate that the selected algorithm(s) are not currently supported. - publicKey := &privateKey.PublicKey - encrypter, err := NewEncrypter(RSA_OAEP, A128GCM, publicKey) - if err != nil { - panic(err) - } - - // Encrypt a sample plaintext. Calling the encrypter returns an encrypted - // JWE object, which can then be serialized for output afterwards. An error - // would indicate a problem in an underlying cryptographic primitive. - var plaintext = []byte("Lorem ipsum dolor sit amet") - object, err := encrypter.Encrypt(plaintext) - if err != nil { - panic(err) - } - - // Serialize the encrypted object using the full serialization format. - // Alternatively you can also use the compact format here by calling - // object.CompactSerialize() instead. - serialized := object.FullSerialize() - - // Parse the serialized, encrypted JWE object. An error would indicate that - // the given input did not represent a valid message. - object, err = ParseEncrypted(serialized) - if err != nil { - panic(err) - } - - // Now we can decrypt and get back our original plaintext. An error here - // would indicate the the message failed to decrypt, e.g. because the auth - // tag was broken or the message was tampered with. - decrypted, err := object.Decrypt(privateKey) - if err != nil { - panic(err) - } - - fmt.Printf(string(decrypted)) - // output: Lorem ipsum dolor sit amet -} - -func Example_jWS() { - // Generate a public/private key pair to use for this example. The library - // also provides two utility functions (LoadPublicKey and LoadPrivateKey) - // that can be used to load keys from PEM/DER-encoded data. - privateKey, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - panic(err) - } - - // Instantiate a signer using RSASSA-PSS (SHA512) with the given private key. - signer, err := NewSigner(PS512, privateKey) - if err != nil { - panic(err) - } - - // Sign a sample payload. Calling the signer returns a protected JWS object, - // which can then be serialized for output afterwards. An error would - // indicate a problem in an underlying cryptographic primitive. - var payload = []byte("Lorem ipsum dolor sit amet") - object, err := signer.Sign(payload) - if err != nil { - panic(err) - } - - // Serialize the encrypted object using the full serialization format. - // Alternatively you can also use the compact format here by calling - // object.CompactSerialize() instead. - serialized := object.FullSerialize() - - // Parse the serialized, protected JWS object. An error would indicate that - // the given input did not represent a valid message. - object, err = ParseSigned(serialized) - if err != nil { - panic(err) - } - - // Now we can verify the signature on the payload. An error here would - // indicate the the message failed to verify, e.g. because the signature was - // broken or the message was tampered with. - output, err := object.Verify(&privateKey.PublicKey) - if err != nil { - panic(err) - } - - fmt.Printf(string(output)) - // output: Lorem ipsum dolor sit amet -} - -func ExampleNewEncrypter_publicKey() { - var publicKey *rsa.PublicKey - - // Instantiate an encrypter using RSA-OAEP with AES128-GCM. - NewEncrypter(RSA_OAEP, A128GCM, publicKey) - - // Instantiate an encrypter using RSA-PKCS1v1.5 with AES128-CBC+HMAC. - NewEncrypter(RSA1_5, A128CBC_HS256, publicKey) -} - -func ExampleNewEncrypter_symmetric() { - var sharedKey []byte - - // Instantiate an encrypter using AES128-GCM with AES-GCM key wrap. - NewEncrypter(A128GCMKW, A128GCM, sharedKey) - - // Instantiate an encrypter using AES256-GCM directly, w/o key wrapping. - NewEncrypter(DIRECT, A256GCM, sharedKey) -} - -func ExampleNewSigner_publicKey() { - var rsaPrivateKey *rsa.PrivateKey - var ecdsaPrivateKey *ecdsa.PrivateKey - - // Instantiate a signer using RSA-PKCS#1v1.5 with SHA-256. - NewSigner(RS256, rsaPrivateKey) - - // Instantiate a signer using ECDSA with SHA-384. - NewSigner(ES384, ecdsaPrivateKey) -} - -func ExampleNewSigner_symmetric() { - var sharedKey []byte - - // Instantiate an signer using HMAC-SHA256. - NewSigner(HS256, sharedKey) - - // Instantiate an signer using HMAC-SHA512. - NewSigner(HS512, sharedKey) -} - -func ExampleNewMultiEncrypter() { - var publicKey *rsa.PublicKey - var sharedKey []byte - - // Instantiate an encrypter using AES-GCM. - encrypter, err := NewMultiEncrypter(A128GCM) - if err != nil { - panic(err) - } - - // Add a recipient using a shared key with AES-GCM key wap - err = encrypter.AddRecipient(A128GCMKW, sharedKey) - if err != nil { - panic(err) - } - - // Add a recipient using an RSA public key with RSA-OAEP - err = encrypter.AddRecipient(RSA_OAEP, publicKey) - if err != nil { - panic(err) - } -} - -func ExampleNewMultiSigner() { - var privateKey *rsa.PrivateKey - var sharedKey []byte - - // Instantiate a signer for multiple recipients. - signer := NewMultiSigner() - - // Add a recipient using a shared key with HMAC-SHA256 - err := signer.AddRecipient(HS256, sharedKey) - if err != nil { - panic(err) - } - - // Add a recipient using an RSA private key with RSASSA-PSS with SHA384 - err = signer.AddRecipient(PS384, privateKey) - if err != nil { - panic(err) - } -} - -func ExampleEncrypter_encrypt() { - // Encrypt a plaintext in order to get an encrypted JWE object. - var plaintext = []byte("This is a secret message") - - encrypter.Encrypt(plaintext) -} - -func ExampleEncrypter_encryptWithAuthData() { - // Encrypt a plaintext in order to get an encrypted JWE object. Also attach - // some additional authenticated data (AAD) to the object. Note that objects - // with attached AAD can only be represented using full serialization. - var plaintext = []byte("This is a secret message") - var aad = []byte("This is authenticated, but public data") - - encrypter.EncryptWithAuthData(plaintext, aad) -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding.go deleted file mode 100644 index 3e2ac0ae..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding.go +++ /dev/null @@ -1,191 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "compress/flate" - "encoding/base64" - "encoding/binary" - "io" - "math/big" - "regexp" - "strings" -) - -var stripWhitespaceRegex = regexp.MustCompile("\\s") - -// Url-safe base64 encode that strips padding -func base64URLEncode(data []byte) string { - var result = base64.URLEncoding.EncodeToString(data) - return strings.TrimRight(result, "=") -} - -// Url-safe base64 decoder that adds padding -func base64URLDecode(data string) ([]byte, error) { - var missing = (4 - len(data)%4) % 4 - data += strings.Repeat("=", missing) - return base64.URLEncoding.DecodeString(data) -} - -// Helper function to serialize known-good objects. -// Precondition: value is not a nil pointer. -func mustSerializeJSON(value interface{}) []byte { - out, err := MarshalJSON(value) - if err != nil { - panic(err) - } - // We never want to serialize the top-level value "null," since it's not a - // valid JOSE message. But if a caller passes in a nil pointer to this method, - // MarshalJSON will happily serialize it as the top-level value "null". If - // that value is then embedded in another operation, for instance by being - // base64-encoded and fed as input to a signing algorithm - // (https://github.com/square/go-jose/issues/22), the result will be - // incorrect. Because this method is intended for known-good objects, and a nil - // pointer is not a known-good object, we are free to panic in this case. - // Note: It's not possible to directly check whether the data pointed at by an - // interface is a nil pointer, so we do this hacky workaround. - // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I - if string(out) == "null" { - panic("Tried to serialize a nil pointer.") - } - return out -} - -// Strip all newlines and whitespace -func stripWhitespace(data string) string { - return stripWhitespaceRegex.ReplaceAllString(data, "") -} - -// Perform compression based on algorithm -func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { - switch algorithm { - case DEFLATE: - return deflate(input) - default: - return nil, ErrUnsupportedAlgorithm - } -} - -// Perform decompression based on algorithm -func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { - switch algorithm { - case DEFLATE: - return inflate(input) - default: - return nil, ErrUnsupportedAlgorithm - } -} - -// Compress with DEFLATE -func deflate(input []byte) ([]byte, error) { - output := new(bytes.Buffer) - - // Writing to byte buffer, err is always nil - writer, _ := flate.NewWriter(output, 1) - _, _ = io.Copy(writer, bytes.NewBuffer(input)) - - err := writer.Close() - return output.Bytes(), err -} - -// Decompress with DEFLATE -func inflate(input []byte) ([]byte, error) { - output := new(bytes.Buffer) - reader := flate.NewReader(bytes.NewBuffer(input)) - - _, err := io.Copy(output, reader) - if err != nil { - return nil, err - } - - err = reader.Close() - return output.Bytes(), err -} - -// byteBuffer represents a slice of bytes that can be serialized to url-safe base64. -type byteBuffer struct { - data []byte -} - -func newBuffer(data []byte) *byteBuffer { - if data == nil { - return nil - } - return &byteBuffer{ - data: data, - } -} - -func newFixedSizeBuffer(data []byte, length int) *byteBuffer { - if len(data) > length { - panic("square/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") - } - pad := make([]byte, length-len(data)) - return newBuffer(append(pad, data...)) -} - -func newBufferFromInt(num uint64) *byteBuffer { - data := make([]byte, 8) - binary.BigEndian.PutUint64(data, num) - return newBuffer(bytes.TrimLeft(data, "\x00")) -} - -func (b *byteBuffer) MarshalJSON() ([]byte, error) { - return MarshalJSON(b.base64()) -} - -func (b *byteBuffer) UnmarshalJSON(data []byte) error { - var encoded string - err := UnmarshalJSON(data, &encoded) - if err != nil { - return err - } - - if encoded == "" { - return nil - } - - decoded, err := base64URLDecode(encoded) - if err != nil { - return err - } - - *b = *newBuffer(decoded) - - return nil -} - -func (b *byteBuffer) base64() string { - return base64URLEncode(b.data) -} - -func (b *byteBuffer) bytes() []byte { - // Handling nil here allows us to transparently handle nil slices when serializing. - if b == nil { - return nil - } - return b.data -} - -func (b byteBuffer) bigInt() *big.Int { - return new(big.Int).SetBytes(b.data) -} - -func (b byteBuffer) toInt() int { - return int(b.bigInt().Int64()) -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding_test.go deleted file mode 100644 index e2f8d979..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding_test.go +++ /dev/null @@ -1,173 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "strings" - "testing" -) - -func TestBase64URLEncode(t *testing.T) { - // Test arrays with various sizes - if base64URLEncode([]byte{}) != "" { - t.Error("failed to encode empty array") - } - - if base64URLEncode([]byte{0}) != "AA" { - t.Error("failed to encode [0x00]") - } - - if base64URLEncode([]byte{0, 1}) != "AAE" { - t.Error("failed to encode [0x00, 0x01]") - } - - if base64URLEncode([]byte{0, 1, 2}) != "AAEC" { - t.Error("failed to encode [0x00, 0x01, 0x02]") - } - - if base64URLEncode([]byte{0, 1, 2, 3}) != "AAECAw" { - t.Error("failed to encode [0x00, 0x01, 0x02, 0x03]") - } -} - -func TestBase64URLDecode(t *testing.T) { - // Test arrays with various sizes - val, err := base64URLDecode("") - if err != nil || !bytes.Equal(val, []byte{}) { - t.Error("failed to decode empty array") - } - - val, err = base64URLDecode("AA") - if err != nil || !bytes.Equal(val, []byte{0}) { - t.Error("failed to decode [0x00]") - } - - val, err = base64URLDecode("AAE") - if err != nil || !bytes.Equal(val, []byte{0, 1}) { - t.Error("failed to decode [0x00, 0x01]") - } - - val, err = base64URLDecode("AAEC") - if err != nil || !bytes.Equal(val, []byte{0, 1, 2}) { - t.Error("failed to decode [0x00, 0x01, 0x02]") - } - - val, err = base64URLDecode("AAECAw") - if err != nil || !bytes.Equal(val, []byte{0, 1, 2, 3}) { - t.Error("failed to decode [0x00, 0x01, 0x02, 0x03]") - } -} - -func TestDeflateRoundtrip(t *testing.T) { - original := []byte("Lorem ipsum dolor sit amet") - - compressed, err := deflate(original) - if err != nil { - panic(err) - } - - output, err := inflate(compressed) - if err != nil { - panic(err) - } - - if bytes.Compare(output, original) != 0 { - t.Error("Input and output do not match") - } -} - -func TestInvalidCompression(t *testing.T) { - _, err := compress("XYZ", []byte{}) - if err == nil { - t.Error("should not accept invalid algorithm") - } - - _, err = decompress("XYZ", []byte{}) - if err == nil { - t.Error("should not accept invalid algorithm") - } - - _, err = decompress(DEFLATE, []byte{1, 2, 3, 4}) - if err == nil { - t.Error("should not accept invalid data") - } -} - -func TestByteBufferTrim(t *testing.T) { - buf := newBufferFromInt(1) - if !bytes.Equal(buf.data, []byte{1}) { - t.Error("Byte buffer for integer '1' should contain [0x01]") - } - - buf = newBufferFromInt(65537) - if !bytes.Equal(buf.data, []byte{1, 0, 1}) { - t.Error("Byte buffer for integer '65537' should contain [0x01, 0x00, 0x01]") - } -} - -func TestFixedSizeBuffer(t *testing.T) { - data0 := []byte{} - data1 := []byte{1} - data2 := []byte{1, 2} - data3 := []byte{1, 2, 3} - data4 := []byte{1, 2, 3, 4} - - buf0 := newFixedSizeBuffer(data0, 4) - buf1 := newFixedSizeBuffer(data1, 4) - buf2 := newFixedSizeBuffer(data2, 4) - buf3 := newFixedSizeBuffer(data3, 4) - buf4 := newFixedSizeBuffer(data4, 4) - - if !bytes.Equal(buf0.data, []byte{0, 0, 0, 0}) { - t.Error("Invalid padded buffer for buf0") - } - if !bytes.Equal(buf1.data, []byte{0, 0, 0, 1}) { - t.Error("Invalid padded buffer for buf1") - } - if !bytes.Equal(buf2.data, []byte{0, 0, 1, 2}) { - t.Error("Invalid padded buffer for buf2") - } - if !bytes.Equal(buf3.data, []byte{0, 1, 2, 3}) { - t.Error("Invalid padded buffer for buf3") - } - if !bytes.Equal(buf4.data, []byte{1, 2, 3, 4}) { - t.Error("Invalid padded buffer for buf4") - } -} - -func TestSerializeJSONRejectsNil(t *testing.T) { - defer func() { - r := recover() - if r == nil || !strings.Contains(r.(string), "nil pointer") { - t.Error("serialize function should not accept nil pointer") - } - }() - - mustSerializeJSON(nil) -} - -func TestFixedSizeBufferTooLarge(t *testing.T) { - defer func() { - r := recover() - if r == nil { - t.Error("should not be able to create fixed size buffer with oversized data") - } - }() - - newFixedSizeBuffer(make([]byte, 2), 1) -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/LICENSE b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/LICENSE deleted file mode 100644 index 74487567..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/README.md b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/README.md deleted file mode 100644 index 86de5e55..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Safe JSON - -This repository contains a fork of the `encoding/json` package from Go 1.6. - -The following changes were made: - -* Object deserialization uses case-sensitive member name matching instead of - [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). - This is to avoid differences in the interpretation of JOSE messages between - go-jose and libraries written in other languages. -* When deserializing a JSON object, we check for duplicate keys and reject the - input whenever we detect a duplicate. Rather than trying to work with malformed - data, we prefer to reject it right away. diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/bench_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/bench_test.go deleted file mode 100644 index ed89d115..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/bench_test.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Large data benchmark. -// The JSON data is a summary of agl's changes in the -// go, webkit, and chromium open source projects. -// We benchmark converting between the JSON form -// and in-memory data structures. - -package json - -import ( - "bytes" - "compress/gzip" - "io/ioutil" - "os" - "strings" - "testing" -) - -type codeResponse struct { - Tree *codeNode `json:"tree"` - Username string `json:"username"` -} - -type codeNode struct { - Name string `json:"name"` - Kids []*codeNode `json:"kids"` - CLWeight float64 `json:"cl_weight"` - Touches int `json:"touches"` - MinT int64 `json:"min_t"` - MaxT int64 `json:"max_t"` - MeanT int64 `json:"mean_t"` -} - -var codeJSON []byte -var codeStruct codeResponse - -func codeInit() { - f, err := os.Open("testdata/code.json.gz") - if err != nil { - panic(err) - } - defer f.Close() - gz, err := gzip.NewReader(f) - if err != nil { - panic(err) - } - data, err := ioutil.ReadAll(gz) - if err != nil { - panic(err) - } - - codeJSON = data - - if err := Unmarshal(codeJSON, &codeStruct); err != nil { - panic("unmarshal code.json: " + err.Error()) - } - - if data, err = Marshal(&codeStruct); err != nil { - panic("marshal code.json: " + err.Error()) - } - - if !bytes.Equal(data, codeJSON) { - println("different lengths", len(data), len(codeJSON)) - for i := 0; i < len(data) && i < len(codeJSON); i++ { - if data[i] != codeJSON[i] { - println("re-marshal: changed at byte", i) - println("orig: ", string(codeJSON[i-10:i+10])) - println("new: ", string(data[i-10:i+10])) - break - } - } - panic("re-marshal code.json: different result") - } -} - -func BenchmarkCodeEncoder(b *testing.B) { - if codeJSON == nil { - b.StopTimer() - codeInit() - b.StartTimer() - } - enc := NewEncoder(ioutil.Discard) - for i := 0; i < b.N; i++ { - if err := enc.Encode(&codeStruct); err != nil { - b.Fatal("Encode:", err) - } - } - b.SetBytes(int64(len(codeJSON))) -} - -func BenchmarkCodeMarshal(b *testing.B) { - if codeJSON == nil { - b.StopTimer() - codeInit() - b.StartTimer() - } - for i := 0; i < b.N; i++ { - if _, err := Marshal(&codeStruct); err != nil { - b.Fatal("Marshal:", err) - } - } - b.SetBytes(int64(len(codeJSON))) -} - -func BenchmarkCodeDecoder(b *testing.B) { - if codeJSON == nil { - b.StopTimer() - codeInit() - b.StartTimer() - } - var buf bytes.Buffer - dec := NewDecoder(&buf) - var r codeResponse - for i := 0; i < b.N; i++ { - buf.Write(codeJSON) - // hide EOF - buf.WriteByte('\n') - buf.WriteByte('\n') - buf.WriteByte('\n') - if err := dec.Decode(&r); err != nil { - b.Fatal("Decode:", err) - } - } - b.SetBytes(int64(len(codeJSON))) -} - -func BenchmarkDecoderStream(b *testing.B) { - b.StopTimer() - var buf bytes.Buffer - dec := NewDecoder(&buf) - buf.WriteString(`"` + strings.Repeat("x", 1000000) + `"` + "\n\n\n") - var x interface{} - if err := dec.Decode(&x); err != nil { - b.Fatal("Decode:", err) - } - ones := strings.Repeat(" 1\n", 300000) + "\n\n\n" - b.StartTimer() - for i := 0; i < b.N; i++ { - if i%300000 == 0 { - buf.WriteString(ones) - } - x = nil - if err := dec.Decode(&x); err != nil || x != 1.0 { - b.Fatalf("Decode: %v after %d", err, i) - } - } -} - -func BenchmarkCodeUnmarshal(b *testing.B) { - if codeJSON == nil { - b.StopTimer() - codeInit() - b.StartTimer() - } - for i := 0; i < b.N; i++ { - var r codeResponse - if err := Unmarshal(codeJSON, &r); err != nil { - b.Fatal("Unmmarshal:", err) - } - } - b.SetBytes(int64(len(codeJSON))) -} - -func BenchmarkCodeUnmarshalReuse(b *testing.B) { - if codeJSON == nil { - b.StopTimer() - codeInit() - b.StartTimer() - } - var r codeResponse - for i := 0; i < b.N; i++ { - if err := Unmarshal(codeJSON, &r); err != nil { - b.Fatal("Unmmarshal:", err) - } - } -} - -func BenchmarkUnmarshalString(b *testing.B) { - data := []byte(`"hello, world"`) - var s string - - for i := 0; i < b.N; i++ { - if err := Unmarshal(data, &s); err != nil { - b.Fatal("Unmarshal:", err) - } - } -} - -func BenchmarkUnmarshalFloat64(b *testing.B) { - var f float64 - data := []byte(`3.14`) - - for i := 0; i < b.N; i++ { - if err := Unmarshal(data, &f); err != nil { - b.Fatal("Unmarshal:", err) - } - } -} - -func BenchmarkUnmarshalInt64(b *testing.B) { - var x int64 - data := []byte(`3`) - - for i := 0; i < b.N; i++ { - if err := Unmarshal(data, &x); err != nil { - b.Fatal("Unmarshal:", err) - } - } -} - -func BenchmarkIssue10335(b *testing.B) { - b.ReportAllocs() - var s struct{} - j := []byte(`{"a":{ }}`) - for n := 0; n < b.N; n++ { - if err := Unmarshal(j, &s); err != nil { - b.Fatal(err) - } - } -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode.go deleted file mode 100644 index 37457e5a..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode.go +++ /dev/null @@ -1,1183 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Represents JSON data structure using native Go types: booleans, floats, -// strings, arrays, and maps. - -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "errors" - "fmt" - "reflect" - "runtime" - "strconv" - "unicode" - "unicode/utf16" - "unicode/utf8" -) - -// Unmarshal parses the JSON-encoded data and stores the result -// in the value pointed to by v. -// -// Unmarshal uses the inverse of the encodings that -// Marshal uses, allocating maps, slices, and pointers as necessary, -// with the following additional rules: -// -// To unmarshal JSON into a pointer, Unmarshal first handles the case of -// the JSON being the JSON literal null. In that case, Unmarshal sets -// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into -// the value pointed at by the pointer. If the pointer is nil, Unmarshal -// allocates a new value for it to point to. -// -// To unmarshal JSON into a struct, Unmarshal matches incoming object -// keys to the keys used by Marshal (either the struct field name or its tag), -// preferring an exact match but also accepting a case-insensitive match. -// Unmarshal will only set exported fields of the struct. -// -// To unmarshal JSON into an interface value, -// Unmarshal stores one of these in the interface value: -// -// bool, for JSON booleans -// float64, for JSON numbers -// string, for JSON strings -// []interface{}, for JSON arrays -// map[string]interface{}, for JSON objects -// nil for JSON null -// -// To unmarshal a JSON array into a slice, Unmarshal resets the slice length -// to zero and then appends each element to the slice. -// As a special case, to unmarshal an empty JSON array into a slice, -// Unmarshal replaces the slice with a new empty slice. -// -// To unmarshal a JSON array into a Go array, Unmarshal decodes -// JSON array elements into corresponding Go array elements. -// If the Go array is smaller than the JSON array, -// the additional JSON array elements are discarded. -// If the JSON array is smaller than the Go array, -// the additional Go array elements are set to zero values. -// -// To unmarshal a JSON object into a string-keyed map, Unmarshal first -// establishes a map to use, If the map is nil, Unmarshal allocates a new map. -// Otherwise Unmarshal reuses the existing map, keeping existing entries. -// Unmarshal then stores key-value pairs from the JSON object into the map. -// -// If a JSON value is not appropriate for a given target type, -// or if a JSON number overflows the target type, Unmarshal -// skips that field and completes the unmarshaling as best it can. -// If no more serious errors are encountered, Unmarshal returns -// an UnmarshalTypeError describing the earliest such error. -// -// The JSON null value unmarshals into an interface, map, pointer, or slice -// by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect -// on the value and produces no error. -// -// When unmarshaling quoted strings, invalid UTF-8 or -// invalid UTF-16 surrogate pairs are not treated as an error. -// Instead, they are replaced by the Unicode replacement -// character U+FFFD. -// -func Unmarshal(data []byte, v interface{}) error { - // Check for well-formedness. - // Avoids filling out half a data structure - // before discovering a JSON syntax error. - var d decodeState - err := checkValid(data, &d.scan) - if err != nil { - return err - } - - d.init(data) - return d.unmarshal(v) -} - -// Unmarshaler is the interface implemented by objects -// that can unmarshal a JSON description of themselves. -// The input can be assumed to be a valid encoding of -// a JSON value. UnmarshalJSON must copy the JSON data -// if it wishes to retain the data after returning. -type Unmarshaler interface { - UnmarshalJSON([]byte) error -} - -// An UnmarshalTypeError describes a JSON value that was -// not appropriate for a value of a specific Go type. -type UnmarshalTypeError struct { - Value string // description of JSON value - "bool", "array", "number -5" - Type reflect.Type // type of Go value it could not be assigned to - Offset int64 // error occurred after reading Offset bytes -} - -func (e *UnmarshalTypeError) Error() string { - return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() -} - -// An UnmarshalFieldError describes a JSON object key that -// led to an unexported (and therefore unwritable) struct field. -// (No longer used; kept for compatibility.) -type UnmarshalFieldError struct { - Key string - Type reflect.Type - Field reflect.StructField -} - -func (e *UnmarshalFieldError) Error() string { - return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() -} - -// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. -// (The argument to Unmarshal must be a non-nil pointer.) -type InvalidUnmarshalError struct { - Type reflect.Type -} - -func (e *InvalidUnmarshalError) Error() string { - if e.Type == nil { - return "json: Unmarshal(nil)" - } - - if e.Type.Kind() != reflect.Ptr { - return "json: Unmarshal(non-pointer " + e.Type.String() + ")" - } - return "json: Unmarshal(nil " + e.Type.String() + ")" -} - -func (d *decodeState) unmarshal(v interface{}) (err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = r.(error) - } - }() - - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - return &InvalidUnmarshalError{reflect.TypeOf(v)} - } - - d.scan.reset() - // We decode rv not rv.Elem because the Unmarshaler interface - // test must be applied at the top level of the value. - d.value(rv) - return d.savedError -} - -// A Number represents a JSON number literal. -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -// isValidNumber reports whether s is a valid JSON number literal. -func isValidNumber(s string) bool { - // This function implements the JSON numbers grammar. - // See https://tools.ietf.org/html/rfc7159#section-6 - // and http://json.org/number.gif - - if s == "" { - return false - } - - // Optional - - if s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - - // Digits - switch { - default: - return false - - case s[0] == '0': - s = s[1:] - - case '1' <= s[0] && s[0] <= '9': - s = s[1:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // . followed by 1 or more digits. - if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { - s = s[2:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // e or E followed by an optional - or + and - // 1 or more digits. - if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { - s = s[1:] - if s[0] == '+' || s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // Make sure we are at the end. - return s == "" -} - -// decodeState represents the state while decoding a JSON value. -type decodeState struct { - data []byte - off int // read offset in data - scan scanner - nextscan scanner // for calls to nextValue - savedError error - useNumber bool -} - -// errPhase is used for errors that should not happen unless -// there is a bug in the JSON decoder or something is editing -// the data slice while the decoder executes. -var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") - -func (d *decodeState) init(data []byte) *decodeState { - d.data = data - d.off = 0 - d.savedError = nil - return d -} - -// error aborts the decoding by panicking with err. -func (d *decodeState) error(err error) { - panic(err) -} - -// saveError saves the first err it is called with, -// for reporting at the end of the unmarshal. -func (d *decodeState) saveError(err error) { - if d.savedError == nil { - d.savedError = err - } -} - -// next cuts off and returns the next full JSON value in d.data[d.off:]. -// The next value is known to be an object or array, not a literal. -func (d *decodeState) next() []byte { - c := d.data[d.off] - item, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // Our scanner has seen the opening brace/bracket - // and thinks we're still in the middle of the object. - // invent a closing brace/bracket to get it out. - if c == '{' { - d.scan.step(&d.scan, '}') - } else { - d.scan.step(&d.scan, ']') - } - - return item -} - -// scanWhile processes bytes in d.data[d.off:] until it -// receives a scan code not equal to op. -// It updates d.off and returns the new scan code. -func (d *decodeState) scanWhile(op int) int { - var newOp int - for { - if d.off >= len(d.data) { - newOp = d.scan.eof() - d.off = len(d.data) + 1 // mark processed EOF with len+1 - } else { - c := d.data[d.off] - d.off++ - newOp = d.scan.step(&d.scan, c) - } - if newOp != op { - break - } - } - return newOp -} - -// value decodes a JSON value from d.data[d.off:] into the value. -// it updates d.off to point past the decoded value. -func (d *decodeState) value(v reflect.Value) { - if !v.IsValid() { - _, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // d.scan thinks we're still at the beginning of the item. - // Feed in an empty string - the shortest, simplest value - - // so that it knows we got to the end of the value. - if d.scan.redo { - // rewind. - d.scan.redo = false - d.scan.step = stateBeginValue - } - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - - n := len(d.scan.parseState) - if n > 0 && d.scan.parseState[n-1] == parseObjectKey { - // d.scan thinks we just read an object key; finish the object - d.scan.step(&d.scan, ':') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '}') - } - - return - } - - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(v) - - case scanBeginObject: - d.object(v) - - case scanBeginLiteral: - d.literal(v) - } -} - -type unquotedValue struct{} - -// valueQuoted is like value but decodes a -// quoted string literal or literal null into an interface value. -// If it finds anything other than a quoted string literal or null, -// valueQuoted returns unquotedValue{}. -func (d *decodeState) valueQuoted() interface{} { - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(reflect.Value{}) - - case scanBeginObject: - d.object(reflect.Value{}) - - case scanBeginLiteral: - switch v := d.literalInterface().(type) { - case nil, string: - return v - } - } - return unquotedValue{} -} - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// array consumes an array from d.data[d.off-1:], decoding into the value v. -// the first byte of the array ('[') has been read already. -func (d *decodeState) array(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - } - - v = pv - - // Check type of target. - switch v.Kind() { - case reflect.Interface: - if v.NumMethod() == 0 { - // Decoding into nil interface? Switch to non-reflect code. - v.Set(reflect.ValueOf(d.arrayInterface())) - return - } - // Otherwise it's invalid. - fallthrough - default: - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - case reflect.Array: - case reflect.Slice: - break - } - - i := 0 - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - // Get element of array, growing if necessary. - if v.Kind() == reflect.Slice { - // Grow slice if necessary - if i >= v.Cap() { - newcap := v.Cap() + v.Cap()/2 - if newcap < 4 { - newcap = 4 - } - newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) - reflect.Copy(newv, v) - v.Set(newv) - } - if i >= v.Len() { - v.SetLen(i + 1) - } - } - - if i < v.Len() { - // Decode into element. - d.value(v.Index(i)) - } else { - // Ran out of fixed array: skip. - d.value(reflect.Value{}) - } - i++ - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - - if i < v.Len() { - if v.Kind() == reflect.Array { - // Array. Zero the rest. - z := reflect.Zero(v.Type().Elem()) - for ; i < v.Len(); i++ { - v.Index(i).Set(z) - } - } else { - v.SetLen(i) - } - } - if i == 0 && v.Kind() == reflect.Slice { - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) - } -} - -var nullLiteral = []byte("null") - -// object consumes an object from d.data[d.off-1:], decoding into the value v. -// the first byte ('{') of the object has been read already. -func (d *decodeState) object(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - v = pv - - // Decoding into nil interface? Switch to non-reflect code. - if v.Kind() == reflect.Interface && v.NumMethod() == 0 { - v.Set(reflect.ValueOf(d.objectInterface())) - return - } - - // Check type of target: struct or map[string]T - switch v.Kind() { - case reflect.Map: - // map must have string kind - t := v.Type() - if t.Key().Kind() != reflect.String { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - if v.IsNil() { - v.Set(reflect.MakeMap(t)) - } - case reflect.Struct: - - default: - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - - var mapElem reflect.Value - keys := map[string]bool{} - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Check for duplicate keys. - _, ok = keys[key] - if !ok { - keys[key] = true - } else { - d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) - } - - // Figure out field corresponding to key. - var subv reflect.Value - destring := false // whether the value is wrapped in a string to be decoded first - - if v.Kind() == reflect.Map { - elemType := v.Type().Elem() - if !mapElem.IsValid() { - mapElem = reflect.New(elemType).Elem() - } else { - mapElem.Set(reflect.Zero(elemType)) - } - subv = mapElem - } else { - var f *field - fields := cachedTypeFields(v.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, []byte(key)) { - f = ff - break - } - } - if f != nil { - subv = v - destring = f.quoted - for _, i := range f.index { - if subv.Kind() == reflect.Ptr { - if subv.IsNil() { - subv.Set(reflect.New(subv.Type().Elem())) - } - subv = subv.Elem() - } - subv = subv.Field(i) - } - } - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - if destring { - switch qv := d.valueQuoted().(type) { - case nil: - d.literalStore(nullLiteral, subv, false) - case string: - d.literalStore([]byte(qv), subv, true) - default: - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) - } - } else { - d.value(subv) - } - - // Write value back to map; - // if using struct, subv points into struct already. - if v.Kind() == reflect.Map { - kv := reflect.ValueOf(key).Convert(v.Type().Key()) - v.SetMapIndex(kv, subv) - } - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } -} - -// literal consumes a literal from d.data[d.off-1:], decoding into the value v. -// The first byte of the literal has been read already -// (that's how the caller knows it's a literal). -func (d *decodeState) literal(v reflect.Value) { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - - d.literalStore(d.data[start:d.off], v, false) -} - -// convertNumber converts the number literal s to a float64 or a Number -// depending on the setting of d.useNumber. -func (d *decodeState) convertNumber(s string) (interface{}, error) { - if d.useNumber { - return Number(s), nil - } - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} - } - return f, nil -} - -var numberType = reflect.TypeOf(Number("")) - -// literalStore decodes a literal stored in item into v. -// -// fromQuoted indicates whether this literal came from unwrapping a -// string from the ",string" struct tag option. this is used only to -// produce more helpful error messages. -func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { - // Check for unmarshaler. - if len(item) == 0 { - //Empty string given - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - return - } - wantptr := item[0] == 'n' // null - u, ut, pv := d.indirect(v, wantptr) - if u != nil { - err := u.UnmarshalJSON(item) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - if item[0] != '"' { - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - return - } - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - err := ut.UnmarshalText(s) - if err != nil { - d.error(err) - } - return - } - - v = pv - - switch c := item[0]; c { - case 'n': // null - switch v.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - v.Set(reflect.Zero(v.Type())) - // otherwise, ignore null for primitives/string - } - case 't', 'f': // true, false - value := c == 't' - switch v.Kind() { - default: - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - case reflect.Bool: - v.SetBool(value) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(value)) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - } - - case '"': // string - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - switch v.Kind() { - default: - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - case reflect.Slice: - if v.Type().Elem().Kind() != reflect.Uint8 { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - break - } - b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) - n, err := base64.StdEncoding.Decode(b, s) - if err != nil { - d.saveError(err) - break - } - v.SetBytes(b[:n]) - case reflect.String: - v.SetString(string(s)) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(string(s))) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - } - - default: // number - if c != '-' && (c < '0' || c > '9') { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - s := string(item) - switch v.Kind() { - default: - if v.Kind() == reflect.String && v.Type() == numberType { - v.SetString(s) - if !isValidNumber(s) { - d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) - } - break - } - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - } - case reflect.Interface: - n, err := d.convertNumber(s) - if err != nil { - d.saveError(err) - break - } - if v.NumMethod() != 0 { - d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - break - } - v.Set(reflect.ValueOf(n)) - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, err := strconv.ParseInt(s, 10, 64) - if err != nil || v.OverflowInt(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetInt(n) - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, err := strconv.ParseUint(s, 10, 64) - if err != nil || v.OverflowUint(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetUint(n) - - case reflect.Float32, reflect.Float64: - n, err := strconv.ParseFloat(s, v.Type().Bits()) - if err != nil || v.OverflowFloat(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetFloat(n) - } - } -} - -// The xxxInterface routines build up a value to be stored -// in an empty interface. They are not strictly necessary, -// but they avoid the weight of reflection in this common case. - -// valueInterface is like value but returns interface{} -func (d *decodeState) valueInterface() interface{} { - switch d.scanWhile(scanSkipSpace) { - default: - d.error(errPhase) - panic("unreachable") - case scanBeginArray: - return d.arrayInterface() - case scanBeginObject: - return d.objectInterface() - case scanBeginLiteral: - return d.literalInterface() - } -} - -// arrayInterface is like array but returns []interface{}. -func (d *decodeState) arrayInterface() []interface{} { - var v = make([]interface{}, 0) - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - v = append(v, d.valueInterface()) - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - return v -} - -// objectInterface is like object but returns map[string]interface{}. -func (d *decodeState) objectInterface() map[string]interface{} { - m := make(map[string]interface{}) - keys := map[string]bool{} - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read string key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Check for duplicate keys. - _, ok = keys[key] - if !ok { - keys[key] = true - } else { - d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - m[key] = d.valueInterface() - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } - return m -} - -// literalInterface is like literal but returns an interface value. -func (d *decodeState) literalInterface() interface{} { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - item := d.data[start:d.off] - - switch c := item[0]; c { - case 'n': // null - return nil - - case 't', 'f': // true, false - return c == 't' - - case '"': // string - s, ok := unquote(item) - if !ok { - d.error(errPhase) - } - return s - - default: // number - if c != '-' && (c < '0' || c > '9') { - d.error(errPhase) - } - n, err := d.convertNumber(string(item)) - if err != nil { - d.saveError(err) - } - return n - } -} - -// getu4 decodes \uXXXX from the beginning of s, returning the hex value, -// or it returns -1. -func getu4(s []byte) rune { - if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1 - } - r, err := strconv.ParseUint(string(s[2:6]), 16, 64) - if err != nil { - return -1 - } - return rune(r) -} - -// unquote converts a quoted JSON string literal s into an actual string t. -// The rules are different than for Go, so cannot use strconv.Unquote. -func unquote(s []byte) (t string, ok bool) { - s, ok = unquoteBytes(s) - t = string(s) - return -} - -func unquoteBytes(s []byte) (t []byte, ok bool) { - if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { - return - } - s = s[1 : len(s)-1] - - // Check for unusual characters. If there are none, - // then no unquoting is needed, so return a slice of the - // original bytes. - r := 0 - for r < len(s) { - c := s[r] - if c == '\\' || c == '"' || c < ' ' { - break - } - if c < utf8.RuneSelf { - r++ - continue - } - rr, size := utf8.DecodeRune(s[r:]) - if rr == utf8.RuneError && size == 1 { - break - } - r += size - } - if r == len(s) { - return s, true - } - - b := make([]byte, len(s)+2*utf8.UTFMax) - w := copy(b, s[0:r]) - for r < len(s) { - // Out of room? Can only happen if s is full of - // malformed UTF-8 and we're replacing each - // byte with RuneError. - if w >= len(b)-2*utf8.UTFMax { - nb := make([]byte, (len(b)+utf8.UTFMax)*2) - copy(nb, b[0:w]) - b = nb - } - switch c := s[r]; { - case c == '\\': - r++ - if r >= len(s) { - return - } - switch s[r] { - default: - return - case '"', '\\', '/', '\'': - b[w] = s[r] - r++ - w++ - case 'b': - b[w] = '\b' - r++ - w++ - case 'f': - b[w] = '\f' - r++ - w++ - case 'n': - b[w] = '\n' - r++ - w++ - case 'r': - b[w] = '\r' - r++ - w++ - case 't': - b[w] = '\t' - r++ - w++ - case 'u': - r-- - rr := getu4(s[r:]) - if rr < 0 { - return - } - r += 6 - if utf16.IsSurrogate(rr) { - rr1 := getu4(s[r:]) - if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { - // A valid pair; consume. - r += 6 - w += utf8.EncodeRune(b[w:], dec) - break - } - // Invalid surrogate; fall back to replacement rune. - rr = unicode.ReplacementChar - } - w += utf8.EncodeRune(b[w:], rr) - } - - // Quote, control characters are invalid. - case c == '"', c < ' ': - return - - // ASCII - case c < utf8.RuneSelf: - b[w] = c - r++ - w++ - - // Coerce to well-formed UTF-8. - default: - rr, size := utf8.DecodeRune(s[r:]) - r += size - w += utf8.EncodeRune(b[w:], rr) - } - } - return b[0:w], true -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode_test.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode_test.go deleted file mode 100644 index 7577b21a..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode_test.go +++ /dev/null @@ -1,1474 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package json - -import ( - "bytes" - "encoding" - "fmt" - "image" - "net" - "reflect" - "strings" - "testing" - "time" -) - -type T struct { - X string - Y int - Z int `json:"-"` -} - -type U struct { - Alphabet string `json:"alpha"` -} - -type V struct { - F1 interface{} - F2 int32 - F3 Number -} - -// ifaceNumAsFloat64/ifaceNumAsNumber are used to test unmarshaling with and -// without UseNumber -var ifaceNumAsFloat64 = map[string]interface{}{ - "k1": float64(1), - "k2": "s", - "k3": []interface{}{float64(1), float64(2.0), float64(3e-3)}, - "k4": map[string]interface{}{"kk1": "s", "kk2": float64(2)}, -} - -var ifaceNumAsNumber = map[string]interface{}{ - "k1": Number("1"), - "k2": "s", - "k3": []interface{}{Number("1"), Number("2.0"), Number("3e-3")}, - "k4": map[string]interface{}{"kk1": "s", "kk2": Number("2")}, -} - -type tx struct { - x int -} - -// A type that can unmarshal itself. - -type unmarshaler struct { - T bool -} - -func (u *unmarshaler) UnmarshalJSON(b []byte) error { - *u = unmarshaler{true} // All we need to see that UnmarshalJSON is called. - return nil -} - -type ustruct struct { - M unmarshaler -} - -type unmarshalerText struct { - T bool -} - -// needed for re-marshaling tests -func (u *unmarshalerText) MarshalText() ([]byte, error) { - return []byte(""), nil -} - -func (u *unmarshalerText) UnmarshalText(b []byte) error { - *u = unmarshalerText{true} // All we need to see that UnmarshalText is called. - return nil -} - -var _ encoding.TextUnmarshaler = (*unmarshalerText)(nil) - -type ustructText struct { - M unmarshalerText -} - -var ( - um0, um1 unmarshaler // target2 of unmarshaling - ump = &um1 - umtrue = unmarshaler{true} - umslice = []unmarshaler{{true}} - umslicep = new([]unmarshaler) - umstruct = ustruct{unmarshaler{true}} - - um0T, um1T unmarshalerText // target2 of unmarshaling - umpT = &um1T - umtrueT = unmarshalerText{true} - umsliceT = []unmarshalerText{{true}} - umslicepT = new([]unmarshalerText) - umstructT = ustructText{unmarshalerText{true}} -) - -// Test data structures for anonymous fields. - -type Point struct { - Z int -} - -type Top struct { - Level0 int - Embed0 - *Embed0a - *Embed0b `json:"e,omitempty"` // treated as named - Embed0c `json:"-"` // ignored - Loop - Embed0p // has Point with X, Y, used - Embed0q // has Point with Z, used - embed // contains exported field -} - -type Embed0 struct { - Level1a int // overridden by Embed0a's Level1a with json tag - Level1b int // used because Embed0a's Level1b is renamed - Level1c int // used because Embed0a's Level1c is ignored - Level1d int // annihilated by Embed0a's Level1d - Level1e int `json:"x"` // annihilated by Embed0a.Level1e -} - -type Embed0a struct { - Level1a int `json:"Level1a,omitempty"` - Level1b int `json:"LEVEL1B,omitempty"` - Level1c int `json:"-"` - Level1d int // annihilated by Embed0's Level1d - Level1f int `json:"x"` // annihilated by Embed0's Level1e -} - -type Embed0b Embed0 - -type Embed0c Embed0 - -type Embed0p struct { - image.Point -} - -type Embed0q struct { - Point -} - -type embed struct { - Q int -} - -type Loop struct { - Loop1 int `json:",omitempty"` - Loop2 int `json:",omitempty"` - *Loop -} - -// From reflect test: -// The X in S6 and S7 annihilate, but they also block the X in S8.S9. -type S5 struct { - S6 - S7 - S8 -} - -type S6 struct { - X int -} - -type S7 S6 - -type S8 struct { - S9 -} - -type S9 struct { - X int - Y int -} - -// From reflect test: -// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9. -type S10 struct { - S11 - S12 - S13 -} - -type S11 struct { - S6 -} - -type S12 struct { - S6 -} - -type S13 struct { - S8 -} - -type unmarshalTest struct { - in string - ptr interface{} - out interface{} - err error - useNumber bool -} - -type XYZ struct { - X interface{} - Y interface{} - Z interface{} -} - -func sliceAddr(x []int) *[]int { return &x } -func mapAddr(x map[string]int) *map[string]int { return &x } - -var unmarshalTests = []unmarshalTest{ - // basic types - {in: `true`, ptr: new(bool), out: true}, - {in: `1`, ptr: new(int), out: 1}, - {in: `1.2`, ptr: new(float64), out: 1.2}, - {in: `-5`, ptr: new(int16), out: int16(-5)}, - {in: `2`, ptr: new(Number), out: Number("2"), useNumber: true}, - {in: `2`, ptr: new(Number), out: Number("2")}, - {in: `2`, ptr: new(interface{}), out: float64(2.0)}, - {in: `2`, ptr: new(interface{}), out: Number("2"), useNumber: true}, - {in: `"a\u1234"`, ptr: new(string), out: "a\u1234"}, - {in: `"http:\/\/"`, ptr: new(string), out: "http://"}, - {in: `"g-clef: \uD834\uDD1E"`, ptr: new(string), out: "g-clef: \U0001D11E"}, - {in: `"invalid: \uD834x\uDD1E"`, ptr: new(string), out: "invalid: \uFFFDx\uFFFD"}, - {in: "null", ptr: new(interface{}), out: nil}, - {in: `{"X": [1,2,3], "Y": 4}`, ptr: new(T), out: T{Y: 4}, err: &UnmarshalTypeError{"array", reflect.TypeOf(""), 7}}, - {in: `{"x": 1}`, ptr: new(tx), out: tx{}}, - {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: float64(1), F2: int32(2), F3: Number("3")}}, - {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: Number("1"), F2: int32(2), F3: Number("3")}, useNumber: true}, - {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsFloat64}, - {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsNumber, useNumber: true}, - - // raw values with whitespace - {in: "\n true ", ptr: new(bool), out: true}, - {in: "\t 1 ", ptr: new(int), out: 1}, - {in: "\r 1.2 ", ptr: new(float64), out: 1.2}, - {in: "\t -5 \n", ptr: new(int16), out: int16(-5)}, - {in: "\t \"a\\u1234\" \n", ptr: new(string), out: "a\u1234"}, - - // Z has a "-" tag. - {in: `{"Y": 1, "Z": 2}`, ptr: new(T), out: T{Y: 1}}, - - {in: `{"alpha": "abc", "alphabet": "xyz"}`, ptr: new(U), out: U{Alphabet: "abc"}}, - {in: `{"alpha": "abc"}`, ptr: new(U), out: U{Alphabet: "abc"}}, - {in: `{"alphabet": "xyz"}`, ptr: new(U), out: U{}}, - - // syntax errors - {in: `{"X": "foo", "Y"}`, err: &SyntaxError{"invalid character '}' after object key", 17}}, - {in: `[1, 2, 3+]`, err: &SyntaxError{"invalid character '+' after array element", 9}}, - {in: `{"X":12x}`, err: &SyntaxError{"invalid character 'x' after object key:value pair", 8}, useNumber: true}, - - // raw value errors - {in: "\x01 42", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, - {in: " 42 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 5}}, - {in: "\x01 true", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, - {in: " false \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 8}}, - {in: "\x01 1.2", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, - {in: " 3.4 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 6}}, - {in: "\x01 \"string\"", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, - {in: " \"string\" \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 11}}, - - // array tests - {in: `[1, 2, 3]`, ptr: new([3]int), out: [3]int{1, 2, 3}}, - {in: `[1, 2, 3]`, ptr: new([1]int), out: [1]int{1}}, - {in: `[1, 2, 3]`, ptr: new([5]int), out: [5]int{1, 2, 3, 0, 0}}, - - // empty array to interface test - {in: `[]`, ptr: new([]interface{}), out: []interface{}{}}, - {in: `null`, ptr: new([]interface{}), out: []interface{}(nil)}, - {in: `{"T":[]}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": []interface{}{}}}, - {in: `{"T":null}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": interface{}(nil)}}, - - // composite tests - {in: allValueIndent, ptr: new(All), out: allValue}, - {in: allValueCompact, ptr: new(All), out: allValue}, - {in: allValueIndent, ptr: new(*All), out: &allValue}, - {in: allValueCompact, ptr: new(*All), out: &allValue}, - {in: pallValueIndent, ptr: new(All), out: pallValue}, - {in: pallValueCompact, ptr: new(All), out: pallValue}, - {in: pallValueIndent, ptr: new(*All), out: &pallValue}, - {in: pallValueCompact, ptr: new(*All), out: &pallValue}, - - // unmarshal interface test - {in: `{"T":false}`, ptr: &um0, out: umtrue}, // use "false" so test will fail if custom unmarshaler is not called - {in: `{"T":false}`, ptr: &ump, out: &umtrue}, - {in: `[{"T":false}]`, ptr: &umslice, out: umslice}, - {in: `[{"T":false}]`, ptr: &umslicep, out: &umslice}, - {in: `{"M":{"T":false}}`, ptr: &umstruct, out: umstruct}, - - // UnmarshalText interface test - {in: `"X"`, ptr: &um0T, out: umtrueT}, // use "false" so test will fail if custom unmarshaler is not called - {in: `"X"`, ptr: &umpT, out: &umtrueT}, - {in: `["X"]`, ptr: &umsliceT, out: umsliceT}, - {in: `["X"]`, ptr: &umslicepT, out: &umsliceT}, - {in: `{"M":"X"}`, ptr: &umstructT, out: umstructT}, - - // Overwriting of data. - // This is different from package xml, but it's what we've always done. - // Now documented and tested. - {in: `[2]`, ptr: sliceAddr([]int{1}), out: []int{2}}, - {in: `{"key": 2}`, ptr: mapAddr(map[string]int{"old": 0, "key": 1}), out: map[string]int{"key": 2}}, - - { - in: `{ - "Level0": 1, - "Level1b": 2, - "Level1c": 3, - "x": 4, - "Level1a": 5, - "LEVEL1B": 6, - "e": { - "Level1a": 8, - "Level1b": 9, - "Level1c": 10, - "Level1d": 11, - "x": 12 - }, - "Loop1": 13, - "Loop2": 14, - "X": 15, - "Y": 16, - "Z": 17, - "Q": 18 - }`, - ptr: new(Top), - out: Top{ - Level0: 1, - Embed0: Embed0{ - Level1b: 2, - Level1c: 3, - }, - Embed0a: &Embed0a{ - Level1a: 5, - Level1b: 6, - }, - Embed0b: &Embed0b{ - Level1a: 8, - Level1b: 9, - Level1c: 10, - Level1d: 11, - Level1e: 12, - }, - Loop: Loop{ - Loop1: 13, - Loop2: 14, - }, - Embed0p: Embed0p{ - Point: image.Point{X: 15, Y: 16}, - }, - Embed0q: Embed0q{ - Point: Point{Z: 17}, - }, - embed: embed{ - Q: 18, - }, - }, - }, - { - in: `{"X": 1,"Y":2}`, - ptr: new(S5), - out: S5{S8: S8{S9: S9{Y: 2}}}, - }, - { - in: `{"X": 1,"Y":2}`, - ptr: new(S10), - out: S10{S13: S13{S8: S8{S9: S9{Y: 2}}}}, - }, - - // invalid UTF-8 is coerced to valid UTF-8. - { - in: "\"hello\xffworld\"", - ptr: new(string), - out: "hello\ufffdworld", - }, - { - in: "\"hello\xc2\xc2world\"", - ptr: new(string), - out: "hello\ufffd\ufffdworld", - }, - { - in: "\"hello\xc2\xffworld\"", - ptr: new(string), - out: "hello\ufffd\ufffdworld", - }, - { - in: "\"hello\\ud800world\"", - ptr: new(string), - out: "hello\ufffdworld", - }, - { - in: "\"hello\\ud800\\ud800world\"", - ptr: new(string), - out: "hello\ufffd\ufffdworld", - }, - { - in: "\"hello\\ud800\\ud800world\"", - ptr: new(string), - out: "hello\ufffd\ufffdworld", - }, - { - in: "\"hello\xed\xa0\x80\xed\xb0\x80world\"", - ptr: new(string), - out: "hello\ufffd\ufffd\ufffd\ufffd\ufffd\ufffdworld", - }, - - // issue 8305 - { - in: `{"2009-11-10T23:00:00Z": "hello world"}`, - ptr: &map[time.Time]string{}, - err: &UnmarshalTypeError{"object", reflect.TypeOf(map[time.Time]string{}), 1}, - }, -} - -func TestMarshal(t *testing.T) { - b, err := Marshal(allValue) - if err != nil { - t.Fatalf("Marshal allValue: %v", err) - } - if string(b) != allValueCompact { - t.Errorf("Marshal allValueCompact") - diff(t, b, []byte(allValueCompact)) - return - } - - b, err = Marshal(pallValue) - if err != nil { - t.Fatalf("Marshal pallValue: %v", err) - } - if string(b) != pallValueCompact { - t.Errorf("Marshal pallValueCompact") - diff(t, b, []byte(pallValueCompact)) - return - } -} - -var badUTF8 = []struct { - in, out string -}{ - {"hello\xffworld", `"hello\ufffdworld"`}, - {"", `""`}, - {"\xff", `"\ufffd"`}, - {"\xff\xff", `"\ufffd\ufffd"`}, - {"a\xffb", `"a\ufffdb"`}, - {"\xe6\x97\xa5\xe6\x9c\xac\xff\xaa\x9e", `"日本\ufffd\ufffd\ufffd"`}, -} - -func TestMarshalBadUTF8(t *testing.T) { - for _, tt := range badUTF8 { - b, err := Marshal(tt.in) - if string(b) != tt.out || err != nil { - t.Errorf("Marshal(%q) = %#q, %v, want %#q, nil", tt.in, b, err, tt.out) - } - } -} - -func TestMarshalNumberZeroVal(t *testing.T) { - var n Number - out, err := Marshal(n) - if err != nil { - t.Fatal(err) - } - outStr := string(out) - if outStr != "0" { - t.Fatalf("Invalid zero val for Number: %q", outStr) - } -} - -func TestMarshalEmbeds(t *testing.T) { - top := &Top{ - Level0: 1, - Embed0: Embed0{ - Level1b: 2, - Level1c: 3, - }, - Embed0a: &Embed0a{ - Level1a: 5, - Level1b: 6, - }, - Embed0b: &Embed0b{ - Level1a: 8, - Level1b: 9, - Level1c: 10, - Level1d: 11, - Level1e: 12, - }, - Loop: Loop{ - Loop1: 13, - Loop2: 14, - }, - Embed0p: Embed0p{ - Point: image.Point{X: 15, Y: 16}, - }, - Embed0q: Embed0q{ - Point: Point{Z: 17}, - }, - embed: embed{ - Q: 18, - }, - } - b, err := Marshal(top) - if err != nil { - t.Fatal(err) - } - want := "{\"Level0\":1,\"Level1b\":2,\"Level1c\":3,\"Level1a\":5,\"LEVEL1B\":6,\"e\":{\"Level1a\":8,\"Level1b\":9,\"Level1c\":10,\"Level1d\":11,\"x\":12},\"Loop1\":13,\"Loop2\":14,\"X\":15,\"Y\":16,\"Z\":17,\"Q\":18}" - if string(b) != want { - t.Errorf("Wrong marshal result.\n got: %q\nwant: %q", b, want) - } -} - -func TestUnmarshal(t *testing.T) { - for i, tt := range unmarshalTests { - var scan scanner - in := []byte(tt.in) - if err := checkValid(in, &scan); err != nil { - if !reflect.DeepEqual(err, tt.err) { - t.Errorf("#%d: checkValid: %#v", i, err) - continue - } - } - if tt.ptr == nil { - continue - } - - // v = new(right-type) - v := reflect.New(reflect.TypeOf(tt.ptr).Elem()) - dec := NewDecoder(bytes.NewReader(in)) - if tt.useNumber { - dec.UseNumber() - } - if err := dec.Decode(v.Interface()); !reflect.DeepEqual(err, tt.err) { - t.Errorf("#%d: %v, want %v", i, err, tt.err) - continue - } else if err != nil { - continue - } - if !reflect.DeepEqual(v.Elem().Interface(), tt.out) { - t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), tt.out) - data, _ := Marshal(v.Elem().Interface()) - println(string(data)) - data, _ = Marshal(tt.out) - println(string(data)) - continue - } - - // Check round trip. - if tt.err == nil { - enc, err := Marshal(v.Interface()) - if err != nil { - t.Errorf("#%d: error re-marshaling: %v", i, err) - continue - } - vv := reflect.New(reflect.TypeOf(tt.ptr).Elem()) - dec = NewDecoder(bytes.NewReader(enc)) - if tt.useNumber { - dec.UseNumber() - } - if err := dec.Decode(vv.Interface()); err != nil { - t.Errorf("#%d: error re-unmarshaling %#q: %v", i, enc, err) - continue - } - if !reflect.DeepEqual(v.Elem().Interface(), vv.Elem().Interface()) { - t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), vv.Elem().Interface()) - t.Errorf(" In: %q", strings.Map(noSpace, string(in))) - t.Errorf("Marshal: %q", strings.Map(noSpace, string(enc))) - continue - } - } - } -} - -func TestUnmarshalMarshal(t *testing.T) { - initBig() - var v interface{} - if err := Unmarshal(jsonBig, &v); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - b, err := Marshal(v) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if !bytes.Equal(jsonBig, b) { - t.Errorf("Marshal jsonBig") - diff(t, b, jsonBig) - return - } -} - -var numberTests = []struct { - in string - i int64 - intErr string - f float64 - floatErr string -}{ - {in: "-1.23e1", intErr: "strconv.ParseInt: parsing \"-1.23e1\": invalid syntax", f: -1.23e1}, - {in: "-12", i: -12, f: -12.0}, - {in: "1e1000", intErr: "strconv.ParseInt: parsing \"1e1000\": invalid syntax", floatErr: "strconv.ParseFloat: parsing \"1e1000\": value out of range"}, -} - -// Independent of Decode, basic coverage of the accessors in Number -func TestNumberAccessors(t *testing.T) { - for _, tt := range numberTests { - n := Number(tt.in) - if s := n.String(); s != tt.in { - t.Errorf("Number(%q).String() is %q", tt.in, s) - } - if i, err := n.Int64(); err == nil && tt.intErr == "" && i != tt.i { - t.Errorf("Number(%q).Int64() is %d", tt.in, i) - } else if (err == nil && tt.intErr != "") || (err != nil && err.Error() != tt.intErr) { - t.Errorf("Number(%q).Int64() wanted error %q but got: %v", tt.in, tt.intErr, err) - } - if f, err := n.Float64(); err == nil && tt.floatErr == "" && f != tt.f { - t.Errorf("Number(%q).Float64() is %g", tt.in, f) - } else if (err == nil && tt.floatErr != "") || (err != nil && err.Error() != tt.floatErr) { - t.Errorf("Number(%q).Float64() wanted error %q but got: %v", tt.in, tt.floatErr, err) - } - } -} - -func TestLargeByteSlice(t *testing.T) { - s0 := make([]byte, 2000) - for i := range s0 { - s0[i] = byte(i) - } - b, err := Marshal(s0) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - var s1 []byte - if err := Unmarshal(b, &s1); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if !bytes.Equal(s0, s1) { - t.Errorf("Marshal large byte slice") - diff(t, s0, s1) - } -} - -type Xint struct { - X int -} - -func TestUnmarshalInterface(t *testing.T) { - var xint Xint - var i interface{} = &xint - if err := Unmarshal([]byte(`{"X":1}`), &i); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if xint.X != 1 { - t.Fatalf("Did not write to xint") - } -} - -func TestUnmarshalPtrPtr(t *testing.T) { - var xint Xint - pxint := &xint - if err := Unmarshal([]byte(`{"X":1}`), &pxint); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if xint.X != 1 { - t.Fatalf("Did not write to xint") - } -} - -func TestEscape(t *testing.T) { - const input = `"foobar"` + " [\u2028 \u2029]" - const expected = `"\"foobar\"\u003chtml\u003e [\u2028 \u2029]"` - b, err := Marshal(input) - if err != nil { - t.Fatalf("Marshal error: %v", err) - } - if s := string(b); s != expected { - t.Errorf("Encoding of [%s]:\n got [%s]\nwant [%s]", input, s, expected) - } -} - -// WrongString is a struct that's misusing the ,string modifier. -type WrongString struct { - Message string `json:"result,string"` -} - -type wrongStringTest struct { - in, err string -} - -var wrongStringTests = []wrongStringTest{ - {`{"result":"x"}`, `json: invalid use of ,string struct tag, trying to unmarshal "x" into string`}, - {`{"result":"foo"}`, `json: invalid use of ,string struct tag, trying to unmarshal "foo" into string`}, - {`{"result":"123"}`, `json: invalid use of ,string struct tag, trying to unmarshal "123" into string`}, - {`{"result":123}`, `json: invalid use of ,string struct tag, trying to unmarshal unquoted value into string`}, -} - -// If people misuse the ,string modifier, the error message should be -// helpful, telling the user that they're doing it wrong. -func TestErrorMessageFromMisusedString(t *testing.T) { - for n, tt := range wrongStringTests { - r := strings.NewReader(tt.in) - var s WrongString - err := NewDecoder(r).Decode(&s) - got := fmt.Sprintf("%v", err) - if got != tt.err { - t.Errorf("%d. got err = %q, want %q", n, got, tt.err) - } - } -} - -func noSpace(c rune) rune { - if isSpace(byte(c)) { //only used for ascii - return -1 - } - return c -} - -type All struct { - Bool bool - Int int - Int8 int8 - Int16 int16 - Int32 int32 - Int64 int64 - Uint uint - Uint8 uint8 - Uint16 uint16 - Uint32 uint32 - Uint64 uint64 - Uintptr uintptr - Float32 float32 - Float64 float64 - - Foo string `json:"bar"` - Foo2 string `json:"bar2,dummyopt"` - - IntStr int64 `json:",string"` - - PBool *bool - PInt *int - PInt8 *int8 - PInt16 *int16 - PInt32 *int32 - PInt64 *int64 - PUint *uint - PUint8 *uint8 - PUint16 *uint16 - PUint32 *uint32 - PUint64 *uint64 - PUintptr *uintptr - PFloat32 *float32 - PFloat64 *float64 - - String string - PString *string - - Map map[string]Small - MapP map[string]*Small - PMap *map[string]Small - PMapP *map[string]*Small - - EmptyMap map[string]Small - NilMap map[string]Small - - Slice []Small - SliceP []*Small - PSlice *[]Small - PSliceP *[]*Small - - EmptySlice []Small - NilSlice []Small - - StringSlice []string - ByteSlice []byte - - Small Small - PSmall *Small - PPSmall **Small - - Interface interface{} - PInterface *interface{} - - unexported int -} - -type Small struct { - Tag string -} - -var allValue = All{ - Bool: true, - Int: 2, - Int8: 3, - Int16: 4, - Int32: 5, - Int64: 6, - Uint: 7, - Uint8: 8, - Uint16: 9, - Uint32: 10, - Uint64: 11, - Uintptr: 12, - Float32: 14.1, - Float64: 15.1, - Foo: "foo", - Foo2: "foo2", - IntStr: 42, - String: "16", - Map: map[string]Small{ - "17": {Tag: "tag17"}, - "18": {Tag: "tag18"}, - }, - MapP: map[string]*Small{ - "19": {Tag: "tag19"}, - "20": nil, - }, - EmptyMap: map[string]Small{}, - Slice: []Small{{Tag: "tag20"}, {Tag: "tag21"}}, - SliceP: []*Small{{Tag: "tag22"}, nil, {Tag: "tag23"}}, - EmptySlice: []Small{}, - StringSlice: []string{"str24", "str25", "str26"}, - ByteSlice: []byte{27, 28, 29}, - Small: Small{Tag: "tag30"}, - PSmall: &Small{Tag: "tag31"}, - Interface: 5.2, -} - -var pallValue = All{ - PBool: &allValue.Bool, - PInt: &allValue.Int, - PInt8: &allValue.Int8, - PInt16: &allValue.Int16, - PInt32: &allValue.Int32, - PInt64: &allValue.Int64, - PUint: &allValue.Uint, - PUint8: &allValue.Uint8, - PUint16: &allValue.Uint16, - PUint32: &allValue.Uint32, - PUint64: &allValue.Uint64, - PUintptr: &allValue.Uintptr, - PFloat32: &allValue.Float32, - PFloat64: &allValue.Float64, - PString: &allValue.String, - PMap: &allValue.Map, - PMapP: &allValue.MapP, - PSlice: &allValue.Slice, - PSliceP: &allValue.SliceP, - PPSmall: &allValue.PSmall, - PInterface: &allValue.Interface, -} - -var allValueIndent = `{ - "Bool": true, - "Int": 2, - "Int8": 3, - "Int16": 4, - "Int32": 5, - "Int64": 6, - "Uint": 7, - "Uint8": 8, - "Uint16": 9, - "Uint32": 10, - "Uint64": 11, - "Uintptr": 12, - "Float32": 14.1, - "Float64": 15.1, - "bar": "foo", - "bar2": "foo2", - "IntStr": "42", - "PBool": null, - "PInt": null, - "PInt8": null, - "PInt16": null, - "PInt32": null, - "PInt64": null, - "PUint": null, - "PUint8": null, - "PUint16": null, - "PUint32": null, - "PUint64": null, - "PUintptr": null, - "PFloat32": null, - "PFloat64": null, - "String": "16", - "PString": null, - "Map": { - "17": { - "Tag": "tag17" - }, - "18": { - "Tag": "tag18" - } - }, - "MapP": { - "19": { - "Tag": "tag19" - }, - "20": null - }, - "PMap": null, - "PMapP": null, - "EmptyMap": {}, - "NilMap": null, - "Slice": [ - { - "Tag": "tag20" - }, - { - "Tag": "tag21" - } - ], - "SliceP": [ - { - "Tag": "tag22" - }, - null, - { - "Tag": "tag23" - } - ], - "PSlice": null, - "PSliceP": null, - "EmptySlice": [], - "NilSlice": null, - "StringSlice": [ - "str24", - "str25", - "str26" - ], - "ByteSlice": "Gxwd", - "Small": { - "Tag": "tag30" - }, - "PSmall": { - "Tag": "tag31" - }, - "PPSmall": null, - "Interface": 5.2, - "PInterface": null -}` - -var allValueCompact = strings.Map(noSpace, allValueIndent) - -var pallValueIndent = `{ - "Bool": false, - "Int": 0, - "Int8": 0, - "Int16": 0, - "Int32": 0, - "Int64": 0, - "Uint": 0, - "Uint8": 0, - "Uint16": 0, - "Uint32": 0, - "Uint64": 0, - "Uintptr": 0, - "Float32": 0, - "Float64": 0, - "bar": "", - "bar2": "", - "IntStr": "0", - "PBool": true, - "PInt": 2, - "PInt8": 3, - "PInt16": 4, - "PInt32": 5, - "PInt64": 6, - "PUint": 7, - "PUint8": 8, - "PUint16": 9, - "PUint32": 10, - "PUint64": 11, - "PUintptr": 12, - "PFloat32": 14.1, - "PFloat64": 15.1, - "String": "", - "PString": "16", - "Map": null, - "MapP": null, - "PMap": { - "17": { - "Tag": "tag17" - }, - "18": { - "Tag": "tag18" - } - }, - "PMapP": { - "19": { - "Tag": "tag19" - }, - "20": null - }, - "EmptyMap": null, - "NilMap": null, - "Slice": null, - "SliceP": null, - "PSlice": [ - { - "Tag": "tag20" - }, - { - "Tag": "tag21" - } - ], - "PSliceP": [ - { - "Tag": "tag22" - }, - null, - { - "Tag": "tag23" - } - ], - "EmptySlice": null, - "NilSlice": null, - "StringSlice": null, - "ByteSlice": null, - "Small": { - "Tag": "" - }, - "PSmall": null, - "PPSmall": { - "Tag": "tag31" - }, - "Interface": null, - "PInterface": 5.2 -}` - -var pallValueCompact = strings.Map(noSpace, pallValueIndent) - -func TestRefUnmarshal(t *testing.T) { - type S struct { - // Ref is defined in encode_test.go. - R0 Ref - R1 *Ref - R2 RefText - R3 *RefText - } - want := S{ - R0: 12, - R1: new(Ref), - R2: 13, - R3: new(RefText), - } - *want.R1 = 12 - *want.R3 = 13 - - var got S - if err := Unmarshal([]byte(`{"R0":"ref","R1":"ref","R2":"ref","R3":"ref"}`), &got); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if !reflect.DeepEqual(got, want) { - t.Errorf("got %+v, want %+v", got, want) - } -} - -// Test that the empty string doesn't panic decoding when ,string is specified -// Issue 3450 -func TestEmptyString(t *testing.T) { - type T2 struct { - Number1 int `json:",string"` - Number2 int `json:",string"` - } - data := `{"Number1":"1", "Number2":""}` - dec := NewDecoder(strings.NewReader(data)) - var t2 T2 - err := dec.Decode(&t2) - if err == nil { - t.Fatal("Decode: did not return error") - } - if t2.Number1 != 1 { - t.Fatal("Decode: did not set Number1") - } -} - -// Test that a null for ,string is not replaced with the previous quoted string (issue 7046). -// It should also not be an error (issue 2540, issue 8587). -func TestNullString(t *testing.T) { - type T struct { - A int `json:",string"` - B int `json:",string"` - C *int `json:",string"` - } - data := []byte(`{"A": "1", "B": null, "C": null}`) - var s T - s.B = 1 - s.C = new(int) - *s.C = 2 - err := Unmarshal(data, &s) - if err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if s.B != 1 || s.C != nil { - t.Fatalf("after Unmarshal, s.B=%d, s.C=%p, want 1, nil", s.B, s.C) - } -} - -func intp(x int) *int { - p := new(int) - *p = x - return p -} - -func intpp(x *int) **int { - pp := new(*int) - *pp = x - return pp -} - -var interfaceSetTests = []struct { - pre interface{} - json string - post interface{} -}{ - {"foo", `"bar"`, "bar"}, - {"foo", `2`, 2.0}, - {"foo", `true`, true}, - {"foo", `null`, nil}, - - {nil, `null`, nil}, - {new(int), `null`, nil}, - {(*int)(nil), `null`, nil}, - {new(*int), `null`, new(*int)}, - {(**int)(nil), `null`, nil}, - {intp(1), `null`, nil}, - {intpp(nil), `null`, intpp(nil)}, - {intpp(intp(1)), `null`, intpp(nil)}, -} - -func TestInterfaceSet(t *testing.T) { - for _, tt := range interfaceSetTests { - b := struct{ X interface{} }{tt.pre} - blob := `{"X":` + tt.json + `}` - if err := Unmarshal([]byte(blob), &b); err != nil { - t.Errorf("Unmarshal %#q: %v", blob, err) - continue - } - if !reflect.DeepEqual(b.X, tt.post) { - t.Errorf("Unmarshal %#q into %#v: X=%#v, want %#v", blob, tt.pre, b.X, tt.post) - } - } -} - -// JSON null values should be ignored for primitives and string values instead of resulting in an error. -// Issue 2540 -func TestUnmarshalNulls(t *testing.T) { - jsonData := []byte(`{ - "Bool" : null, - "Int" : null, - "Int8" : null, - "Int16" : null, - "Int32" : null, - "Int64" : null, - "Uint" : null, - "Uint8" : null, - "Uint16" : null, - "Uint32" : null, - "Uint64" : null, - "Float32" : null, - "Float64" : null, - "String" : null}`) - - nulls := All{ - Bool: true, - Int: 2, - Int8: 3, - Int16: 4, - Int32: 5, - Int64: 6, - Uint: 7, - Uint8: 8, - Uint16: 9, - Uint32: 10, - Uint64: 11, - Float32: 12.1, - Float64: 13.1, - String: "14"} - - err := Unmarshal(jsonData, &nulls) - if err != nil { - t.Errorf("Unmarshal of null values failed: %v", err) - } - if !nulls.Bool || nulls.Int != 2 || nulls.Int8 != 3 || nulls.Int16 != 4 || nulls.Int32 != 5 || nulls.Int64 != 6 || - nulls.Uint != 7 || nulls.Uint8 != 8 || nulls.Uint16 != 9 || nulls.Uint32 != 10 || nulls.Uint64 != 11 || - nulls.Float32 != 12.1 || nulls.Float64 != 13.1 || nulls.String != "14" { - - t.Errorf("Unmarshal of null values affected primitives") - } -} - -func TestStringKind(t *testing.T) { - type stringKind string - - var m1, m2 map[stringKind]int - m1 = map[stringKind]int{ - "foo": 42, - } - - data, err := Marshal(m1) - if err != nil { - t.Errorf("Unexpected error marshaling: %v", err) - } - - err = Unmarshal(data, &m2) - if err != nil { - t.Errorf("Unexpected error unmarshaling: %v", err) - } - - if !reflect.DeepEqual(m1, m2) { - t.Error("Items should be equal after encoding and then decoding") - } -} - -// Custom types with []byte as underlying type could not be marshalled -// and then unmarshalled. -// Issue 8962. -func TestByteKind(t *testing.T) { - type byteKind []byte - - a := byteKind("hello") - - data, err := Marshal(a) - if err != nil { - t.Error(err) - } - var b byteKind - err = Unmarshal(data, &b) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(a, b) { - t.Errorf("expected %v == %v", a, b) - } -} - -// The fix for issue 8962 introduced a regression. -// Issue 12921. -func TestSliceOfCustomByte(t *testing.T) { - type Uint8 uint8 - - a := []Uint8("hello") - - data, err := Marshal(a) - if err != nil { - t.Fatal(err) - } - var b []Uint8 - err = Unmarshal(data, &b) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(a, b) { - t.Fatal("expected %v == %v", a, b) - } -} - -var decodeTypeErrorTests = []struct { - dest interface{} - src string -}{ - {new(string), `{"user": "name"}`}, // issue 4628. - {new(error), `{}`}, // issue 4222 - {new(error), `[]`}, - {new(error), `""`}, - {new(error), `123`}, - {new(error), `true`}, -} - -func TestUnmarshalTypeError(t *testing.T) { - for _, item := range decodeTypeErrorTests { - err := Unmarshal([]byte(item.src), item.dest) - if _, ok := err.(*UnmarshalTypeError); !ok { - t.Errorf("expected type error for Unmarshal(%q, type %T): got %T", - item.src, item.dest, err) - } - } -} - -var unmarshalSyntaxTests = []string{ - "tru", - "fals", - "nul", - "123e", - `"hello`, - `[1,2,3`, - `{"key":1`, - `{"key":1,`, -} - -func TestUnmarshalSyntax(t *testing.T) { - var x interface{} - for _, src := range unmarshalSyntaxTests { - err := Unmarshal([]byte(src), &x) - if _, ok := err.(*SyntaxError); !ok { - t.Errorf("expected syntax error for Unmarshal(%q): got %T", src, err) - } - } -} - -// Test handling of unexported fields that should be ignored. -// Issue 4660 -type unexportedFields struct { - Name string - m map[string]interface{} `json:"-"` - m2 map[string]interface{} `json:"abcd"` -} - -func TestUnmarshalUnexported(t *testing.T) { - input := `{"Name": "Bob", "m": {"x": 123}, "m2": {"y": 456}, "abcd": {"z": 789}}` - want := &unexportedFields{Name: "Bob"} - - out := &unexportedFields{} - err := Unmarshal([]byte(input), out) - if err != nil { - t.Errorf("got error %v, expected nil", err) - } - if !reflect.DeepEqual(out, want) { - t.Errorf("got %q, want %q", out, want) - } -} - -// Time3339 is a time.Time which encodes to and from JSON -// as an RFC 3339 time in UTC. -type Time3339 time.Time - -func (t *Time3339) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("types: failed to unmarshal non-string value %q as an RFC 3339 time", b) - } - tm, err := time.Parse(time.RFC3339, string(b[1:len(b)-1])) - if err != nil { - return err - } - *t = Time3339(tm) - return nil -} - -func TestUnmarshalJSONLiteralError(t *testing.T) { - var t3 Time3339 - err := Unmarshal([]byte(`"0000-00-00T00:00:00Z"`), &t3) - if err == nil { - t.Fatalf("expected error; got time %v", time.Time(t3)) - } - if !strings.Contains(err.Error(), "range") { - t.Errorf("got err = %v; want out of range error", err) - } -} - -// Test that extra object elements in an array do not result in a -// "data changing underfoot" error. -// Issue 3717 -func TestSkipArrayObjects(t *testing.T) { - json := `[{}]` - var dest [0]interface{} - - err := Unmarshal([]byte(json), &dest) - if err != nil { - t.Errorf("got error %q, want nil", err) - } -} - -// Test semantics of pre-filled struct fields and pre-filled map fields. -// Issue 4900. -func TestPrefilled(t *testing.T) { - ptrToMap := func(m map[string]interface{}) *map[string]interface{} { return &m } - - // Values here change, cannot reuse table across runs. - var prefillTests = []struct { - in string - ptr interface{} - out interface{} - }{ - { - in: `{"X": 1, "Y": 2}`, - ptr: &XYZ{X: float32(3), Y: int16(4), Z: 1.5}, - out: &XYZ{X: float64(1), Y: float64(2), Z: 1.5}, - }, - { - in: `{"X": 1, "Y": 2}`, - ptr: ptrToMap(map[string]interface{}{"X": float32(3), "Y": int16(4), "Z": 1.5}), - out: ptrToMap(map[string]interface{}{"X": float64(1), "Y": float64(2), "Z": 1.5}), - }, - } - - for _, tt := range prefillTests { - ptrstr := fmt.Sprintf("%v", tt.ptr) - err := Unmarshal([]byte(tt.in), tt.ptr) // tt.ptr edited here - if err != nil { - t.Errorf("Unmarshal: %v", err) - } - if !reflect.DeepEqual(tt.ptr, tt.out) { - t.Errorf("Unmarshal(%#q, %s): have %v, want %v", tt.in, ptrstr, tt.ptr, tt.out) - } - } -} - -var invalidUnmarshalTests = []struct { - v interface{} - want string -}{ - {nil, "json: Unmarshal(nil)"}, - {struct{}{}, "json: Unmarshal(non-pointer struct {})"}, - {(*int)(nil), "json: Unmarshal(nil *int)"}, -} - -func TestInvalidUnmarshal(t *testing.T) { - buf := []byte(`{"a":"1"}`) - for _, tt := range invalidUnmarshalTests { - err := Unmarshal(buf, tt.v) - if err == nil { - t.Errorf("Unmarshal expecting error, got nil") - continue - } - if got := err.Error(); got != tt.want { - t.Errorf("Unmarshal = %q; want %q", got, tt.want) - } - } -} - -var invalidUnmarshalTextTests = []struct { - v interface{} - want string -}{ - {nil, "json: Unmarshal(nil)"}, - {struct{}{}, "json: Unmarshal(non-pointer struct {})"}, - {(*int)(nil), "json: Unmarshal(nil *int)"}, - {new(net.IP), "json: cannot unmarshal string into Go value of type *net.IP"}, -} - -func TestInvalidUnmarshalText(t *testing.T) { - buf := []byte(`123`) - for _, tt := range invalidUnmarshalTextTests { - err := Unmarshal(buf, tt.v) - if err == nil { - t.Errorf("Unmarshal expecting error, got nil") - continue - } - if got := err.Error(); got != tt.want { - t.Errorf("Unmarshal = %q; want %q", got, tt.want) - } - } -} - -// Test that string option is ignored for invalid types. -// Issue 9812. -func TestInvalidStringOption(t *testing.T) { - num := 0 - item := struct { - T time.Time `json:",string"` - M map[string]string `json:",string"` - S []string `json:",string"` - A [1]string `json:",string"` - I interface{} `json:",string"` - P *int `json:",string"` - }{M: make(map[string]string), S: make([]string, 0), I: num, P: &num} - - data, err := Marshal(item) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - err = Unmarshal(data, &item) - if err != nil { - t.Fatalf("Unmarshal: %v", err) - } -} diff --git a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/encode.go b/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/encode.go deleted file mode 100644 index 1dae8bb7..00000000 --- a/vendor/github.com/docker/distribution/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/encode.go +++ /dev/null @@ -1,1197 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package json implements encoding and decoding of JSON objects as defined in -// RFC 4627. The mapping between JSON objects and Go values is described -// in the documentation for the Marshal and Unmarshal functions. -// -// See "JSON and Go" for an introduction to this package: -// https://golang.org/doc/articles/json_and_go.html -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "fmt" - "math" - "reflect" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// Marshal returns the JSON encoding of v. -// -// Marshal traverses the value v recursively. -// If an encountered value implements the Marshaler interface -// and is not a nil pointer, Marshal calls its MarshalJSON method -// to produce JSON. If no MarshalJSON method is present but the -// value implements encoding.TextMarshaler instead, Marshal calls -// its MarshalText method. -// The nil pointer exception is not strictly necessary -// but mimics a similar, necessary exception in the behavior of -// UnmarshalJSON. -// -// Otherwise, Marshal uses the following type-dependent default encodings: -// -// Boolean values encode as JSON booleans. -// -// Floating point, integer, and Number values encode as JSON numbers. -// -// String values encode as JSON strings coerced to valid UTF-8, -// replacing invalid bytes with the Unicode replacement rune. -// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" -// to keep some browsers from misinterpreting JSON output as HTML. -// Ampersand "&" is also escaped to "\u0026" for the same reason. -// -// Array and slice values encode as JSON arrays, except that -// []byte encodes as a base64-encoded string, and a nil slice -// encodes as the null JSON object. -// -// Struct values encode as JSON objects. Each exported struct field -// becomes a member of the object unless -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option. -// The empty values are false, 0, any -// nil pointer or interface value, and any array, slice, map, or string of -// length zero. The object's default key string is the struct field name -// but can be specified in the struct field's tag value. The "json" key in -// the struct field's tag value is the key name, followed by an optional comma -// and options. Examples: -// -// // Field is ignored by this package. -// Field int `json:"-"` -// -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` -// -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` -// -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` -// -// The "string" option signals that a field is stored as JSON inside a -// JSON-encoded string. It applies only to fields of string, floating point, -// integer, or boolean types. This extra level of encoding is sometimes used -// when communicating with JavaScript programs: -// -// Int64String int64 `json:",string"` -// -// The key name will be used if it's a non-empty string consisting of -// only Unicode letters, digits, dollar signs, percent signs, hyphens, -// underscores and slashes. -// -// Anonymous struct fields are usually marshaled as if their inner exported fields -// were fields in the outer struct, subject to the usual Go visibility rules amended -// as described in the next paragraph. -// An anonymous struct field with a name given in its JSON tag is treated as -// having that name, rather than being anonymous. -// An anonymous struct field of interface type is treated the same as having -// that type as its name, rather than being anonymous. -// -// The Go visibility rules for struct fields are amended for JSON when -// deciding which field to marshal or unmarshal. If there are -// multiple fields at the same level, and that level is the least -// nested (and would therefore be the nesting level selected by the -// usual Go rules), the following extra rules apply: -// -// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, -// even if there are multiple untagged fields that would otherwise conflict. -// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. -// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. -// -// Handling of anonymous struct fields is new in Go 1.1. -// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of -// an anonymous struct field in both current and earlier versions, give the field -// a JSON tag of "-". -// -// Map values encode as JSON objects. -// The map's key type must be string; the map keys are used as JSON object -// keys, subject to the UTF-8 coercion described for string values above. -// -// Pointer values encode as the value pointed to. -// A nil pointer encodes as the null JSON object. -// -// Interface values encode as the value contained in the interface. -// A nil interface value encodes as the null JSON object. -// -// Channel, complex, and function values cannot be encoded in JSON. -// Attempting to encode such a value causes Marshal to return -// an UnsupportedTypeError. -// -// JSON cannot represent cyclic data structures and Marshal does not -// handle them. Passing cyclic structures to Marshal will result in -// an infinite recursion. -// -func Marshal(v interface{}) ([]byte, error) { - e := &encodeState{} - err := e.marshal(v) - if err != nil { - return nil, err - } - return e.Bytes(), nil -} - -// MarshalIndent is like Marshal but applies Indent to format the output. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - b, err := Marshal(v) - if err != nil { - return nil, err - } - var buf bytes.Buffer - err = Indent(&buf, b, prefix, indent) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 -// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 -// so that the JSON will be safe to embed inside HTML